aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/mac80211.tmpl6
-rw-r--r--Documentation/devices.txt6
-rw-r--r--Documentation/feature-removal-schedule.txt37
-rw-r--r--Documentation/scsi/osd.txt198
-rw-r--r--MAINTAINERS20
-rw-r--r--arch/alpha/kernel/entry.S3
-rw-r--r--arch/alpha/kernel/osf_sys.c2
-rw-r--r--arch/ia64/ia32/ia32_entry.S2
-rw-r--r--arch/ia64/kernel/perfmon.c2
-rw-r--r--arch/mips/kernel/linux32.c34
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/parisc/kernel/syscall_table.S2
-rw-r--r--arch/powerpc/include/asm/systbl.h2
-rw-r--r--arch/s390/kernel/compat_wrapper.S2
-rw-r--r--arch/sparc/kernel/smp_64.c4
-rw-r--r--arch/sparc/kernel/systbls_64.S2
-rw-r--r--arch/sparc/kernel/time_64.c4
-rw-r--r--arch/um/drivers/net_kern.c39
-rw-r--r--arch/um/include/shared/net_kern.h2
-rw-r--r--arch/x86/ia32/ia32entry.S2
-rw-r--r--arch/x86/ia32/sys_ia32.c22
-rw-r--r--arch/x86/include/asm/ia32.h7
-rw-r--r--arch/x86/include/asm/sys_ia32.h2
-rw-r--r--block/cmd-filter.c1
-rw-r--r--drivers/char/agp/intel-agp.c21
-rw-r--r--drivers/firewire/fw-card.c149
-rw-r--r--drivers/firewire/fw-cdev.c1044
-rw-r--r--drivers/firewire/fw-device.c203
-rw-r--r--drivers/firewire/fw-device.h23
-rw-r--r--drivers/firewire/fw-iso.c227
-rw-r--r--drivers/firewire/fw-ohci.c260
-rw-r--r--drivers/firewire/fw-sbp2.c57
-rw-r--r--drivers/firewire/fw-topology.c29
-rw-r--r--drivers/firewire/fw-topology.h19
-rw-r--r--drivers/firewire/fw-transaction.c185
-rw-r--r--drivers/firewire/fw-transaction.h138
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/drm_debugfs.c235
-rw-r--r--drivers/gpu/drm/drm_drv.c12
-rw-r--r--drivers/gpu/drm/drm_info.c328
-rw-r--r--drivers/gpu/drm/drm_proc.c721
-rw-r--r--drivers/gpu/drm/drm_stub.c15
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c116
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h21
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c898
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debugfs.c257
-rw-r--r--drivers/gpu/drm/i915/i915_gem_proc.c334
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c31
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h22
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h12
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c66
-rw-r--r--drivers/gpu/drm/i915/intel_display.c406
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c2
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c148
-rw-r--r--drivers/ieee1394/csr.c8
-rw-r--r--drivers/ieee1394/dv1394.c2
-rw-r--r--drivers/ieee1394/eth1394.c4
-rw-r--r--drivers/ieee1394/highlevel.c2
-rw-r--r--drivers/ieee1394/nodemgr.c4
-rw-r--r--drivers/ieee1394/nodemgr.h2
-rw-r--r--drivers/ieee1394/raw1394.c14
-rw-r--r--drivers/ieee1394/sbp2.c9
-rw-r--r--drivers/ieee1394/video1394.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c2
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c17
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h2
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c2
-rw-r--r--drivers/media/dvb/firewire/firedtv-avc.c10
-rw-r--r--drivers/mtd/mtdsuper.c7
-rw-r--r--drivers/net/3c503.c3
-rw-r--r--drivers/net/Kconfig8
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/ac3200.c22
-rw-r--r--drivers/net/appletalk/cops.c45
-rw-r--r--drivers/net/appletalk/ltpc.c38
-rw-r--r--drivers/net/at1700.c19
-rw-r--r--drivers/net/benet/be_main.c28
-rw-r--r--drivers/net/cs89x0.c28
-rw-r--r--drivers/net/cxgb3/adapter.h4
-rw-r--r--drivers/net/cxgb3/common.h4
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c12
-rw-r--r--drivers/net/cxgb3/sge.c164
-rw-r--r--drivers/net/cxgb3/t3_hw.c80
-rw-r--r--drivers/net/depca.c19
-rw-r--r--drivers/net/eepro.c17
-rw-r--r--drivers/net/eexpress.c17
-rw-r--r--drivers/net/eth16i.c18
-rw-r--r--drivers/net/ethoc.c1112
-rw-r--r--drivers/net/ewrk3.c19
-rw-r--r--drivers/net/gianfar.c48
-rw-r--r--drivers/net/ibmlana.c17
-rw-r--r--drivers/net/irda/donauboe.c12
-rw-r--r--drivers/net/lance.c19
-rw-r--r--drivers/net/lp486e.c17
-rw-r--r--drivers/net/ni52.c21
-rw-r--r--drivers/net/ni65.c75
-rw-r--r--drivers/net/seeq8005.c17
-rw-r--r--drivers/net/smc-ultra.c5
-rw-r--r--drivers/net/smc-ultra32.c23
-rw-r--r--drivers/net/smc9194.c17
-rw-r--r--drivers/net/smsc911x.c5
-rw-r--r--drivers/net/tokenring/madgemc.c11
-rw-r--r--drivers/net/tokenring/proteon.c9
-rw-r--r--drivers/net/tokenring/skisa.c9
-rw-r--r--drivers/net/tokenring/smctr.c16
-rw-r--r--drivers/net/ucc_geth.c21
-rw-r--r--drivers/net/wan/sdla.c36
-rw-r--r--drivers/net/wireless/Kconfig1
-rw-r--r--drivers/net/wireless/Makefile1
-rw-r--r--drivers/net/wireless/ar9170/Kconfig17
-rw-r--r--drivers/net/wireless/ar9170/Makefile3
-rw-r--r--drivers/net/wireless/ar9170/ar9170.h209
-rw-r--r--drivers/net/wireless/ar9170/cmd.c129
-rw-r--r--drivers/net/wireless/ar9170/cmd.h91
-rw-r--r--drivers/net/wireless/ar9170/eeprom.h179
-rw-r--r--drivers/net/wireless/ar9170/hw.h417
-rw-r--r--drivers/net/wireless/ar9170/led.c171
-rw-r--r--drivers/net/wireless/ar9170/mac.c452
-rw-r--r--drivers/net/wireless/ar9170/main.c1671
-rw-r--r--drivers/net/wireless/ar9170/phy.c1240
-rw-r--r--drivers/net/wireless/ar9170/usb.c748
-rw-r--r--drivers/net/wireless/ar9170/usb.h74
-rw-r--r--drivers/net/wireless/arlan-main.c21
-rw-r--r--drivers/net/wireless/ath5k/ath5k.h35
-rw-r--r--drivers/net/wireless/ath5k/attach.c2
-rw-r--r--drivers/net/wireless/ath5k/base.c46
-rw-r--r--drivers/net/wireless/ath5k/base.h2
-rw-r--r--drivers/net/wireless/ath5k/desc.c4
-rw-r--r--drivers/net/wireless/ath5k/eeprom.c774
-rw-r--r--drivers/net/wireless/ath5k/eeprom.h128
-rw-r--r--drivers/net/wireless/ath5k/initvals.c4
-rw-r--r--drivers/net/wireless/ath5k/led.c2
-rw-r--r--drivers/net/wireless/ath5k/phy.c1170
-rw-r--r--drivers/net/wireless/ath5k/reg.h19
-rw-r--r--drivers/net/wireless/ath5k/reset.c35
-rw-r--r--drivers/net/wireless/ath9k/ahb.c2
-rw-r--r--drivers/net/wireless/ath9k/ani.c2
-rw-r--r--drivers/net/wireless/ath9k/ani.h2
-rw-r--r--drivers/net/wireless/ath9k/ath9k.h9
-rw-r--r--drivers/net/wireless/ath9k/beacon.c56
-rw-r--r--drivers/net/wireless/ath9k/calib.c2
-rw-r--r--drivers/net/wireless/ath9k/calib.h2
-rw-r--r--drivers/net/wireless/ath9k/debug.c2
-rw-r--r--drivers/net/wireless/ath9k/debug.h2
-rw-r--r--drivers/net/wireless/ath9k/eeprom.c308
-rw-r--r--drivers/net/wireless/ath9k/eeprom.h5
-rw-r--r--drivers/net/wireless/ath9k/hw.c14
-rw-r--r--drivers/net/wireless/ath9k/hw.h2
-rw-r--r--drivers/net/wireless/ath9k/initvals.h2
-rw-r--r--drivers/net/wireless/ath9k/mac.c2
-rw-r--r--drivers/net/wireless/ath9k/mac.h2
-rw-r--r--drivers/net/wireless/ath9k/main.c47
-rw-r--r--drivers/net/wireless/ath9k/pci.c20
-rw-r--r--drivers/net/wireless/ath9k/phy.c2
-rw-r--r--drivers/net/wireless/ath9k/phy.h2
-rw-r--r--drivers/net/wireless/ath9k/rc.c23
-rw-r--r--drivers/net/wireless/ath9k/rc.h2
-rw-r--r--drivers/net/wireless/ath9k/recv.c9
-rw-r--r--drivers/net/wireless/ath9k/reg.h2
-rw-r--r--drivers/net/wireless/ath9k/regd.c2
-rw-r--r--drivers/net/wireless/ath9k/regd.h2
-rw-r--r--drivers/net/wireless/ath9k/regd_common.h2
-rw-r--r--drivers/net/wireless/ath9k/xmit.c78
-rw-r--r--drivers/net/wireless/b43/main.c2
-rw-r--r--drivers/net/wireless/b43/xmit.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-hw.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c179
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c54
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-helpers.h52
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c38
-rw-r--r--drivers/net/wireless/libertas/radiotap.h10
-rw-r--r--drivers/net/wireless/libertas/rx.c12
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c4
-rw-r--r--drivers/net/wireless/p54/Kconfig39
-rw-r--r--drivers/net/wireless/p54/p54common.c16
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c2
-rw-r--r--drivers/net/wireless/wavelan.c79
-rw-r--r--drivers/net/wireless/wavelan.p.h9
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c24
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c24
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c188
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h3
-rw-r--r--drivers/s390/scsi/zfcp_def.h17
-rw-r--r--drivers/s390/scsi/zfcp_erp.c290
-rw-r--r--drivers/s390/scsi/zfcp_ext.h73
-rw-r--r--drivers/s390/scsi/zfcp_fc.c82
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c240
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h4
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c47
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c250
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c20
-rw-r--r--drivers/scsi/3w-9xxx.c101
-rw-r--r--drivers/scsi/3w-9xxx.h2
-rw-r--r--drivers/scsi/Kconfig18
-rw-r--r--drivers/scsi/Makefile3
-rw-r--r--drivers/scsi/ch.c1
-rw-r--r--drivers/scsi/constants.c13
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_ddp.c5
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_ddp.h8
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_iscsi.c27
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.c3
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.h1
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_pdu.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c14
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c49
-rw-r--r--drivers/scsi/fcoe/fcoe_sw.c67
-rw-r--r--drivers/scsi/fcoe/libfcoe.c16
-rw-r--r--drivers/scsi/hosts.c3
-rw-r--r--drivers/scsi/hptiop.c3
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c62
-rw-r--r--drivers/scsi/ipr.c13
-rw-r--r--drivers/scsi/ipr.h4
-rw-r--r--drivers/scsi/ips.c3
-rw-r--r--drivers/scsi/iscsi_tcp.c73
-rw-r--r--drivers/scsi/libfc/fc_exch.c6
-rw-r--r--drivers/scsi/libfc/fc_fcp.c75
-rw-r--r--drivers/scsi/libfc/fc_lport.c5
-rw-r--r--drivers/scsi/libfc/fc_rport.c2
-rw-r--r--drivers/scsi/libiscsi.c236
-rw-r--r--drivers/scsi/libiscsi_tcp.c122
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c6
-rw-r--r--drivers/scsi/mpt2sas/Kconfig66
-rw-r--r--drivers/scsi/mpt2sas/Makefile7
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2.h1067
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h2151
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_init.h420
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_ioc.h1295
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_raid.h295
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_sas.h282
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_tool.h249
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_type.h61
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c3435
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h779
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_config.c1873
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c2516
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.h416
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_debug.h181
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c5687
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c1211
-rw-r--r--drivers/scsi/osd/Kbuild45
-rw-r--r--drivers/scsi/osd/Kconfig53
-rwxr-xr-xdrivers/scsi/osd/Makefile37
-rw-r--r--drivers/scsi/osd/osd_debug.h30
-rw-r--r--drivers/scsi/osd/osd_initiator.c1657
-rw-r--r--drivers/scsi/osd/osd_uld.c487
-rw-r--r--drivers/scsi/osst.c96
-rw-r--r--drivers/scsi/osst.h2
-rw-r--r--drivers/scsi/scsi.c104
-rw-r--r--drivers/scsi/scsi_debug.c443
-rw-r--r--drivers/scsi/scsi_error.c34
-rw-r--r--drivers/scsi/scsi_lib.c203
-rw-r--r--drivers/scsi/scsi_scan.c1
-rw-r--r--drivers/scsi/scsi_sysfs.c1
-rw-r--r--drivers/scsi/scsi_transport_fc.c2
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c45
-rw-r--r--drivers/scsi/sd.c349
-rw-r--r--drivers/scsi/sd.h1
-rw-r--r--drivers/scsi/ses.c33
-rw-r--r--drivers/scsi/sg.c495
-rw-r--r--drivers/scsi/st.c6
-rw-r--r--drivers/scsi/stex.c107
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c137
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c64
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.h4
-rw-r--r--drivers/ssb/Kconfig16
-rw-r--r--drivers/ssb/b43_pci_bridge.c1
-rw-r--r--drivers/usb/storage/transport.c2
-rw-r--r--fs/9p/v9fs_vfs.h4
-rw-r--r--fs/9p/vfs_dentry.c4
-rw-r--r--fs/9p/vfs_super.c5
-rw-r--r--fs/Kconfig56
-rw-r--r--fs/Makefile6
-rw-r--r--fs/adfs/adfs.h2
-rw-r--r--fs/adfs/dir.c2
-rw-r--r--fs/affs/affs.h3
-rw-r--r--fs/affs/amigaffs.c8
-rw-r--r--fs/affs/namei.c4
-rw-r--r--fs/afs/dir.c2
-rw-r--r--fs/anon_inodes.c2
-rw-r--r--fs/attr.c3
-rw-r--r--fs/autofs/root.c2
-rw-r--r--fs/autofs4/inode.c2
-rw-r--r--fs/autofs4/root.c4
-rw-r--r--fs/block_dev.c146
-rw-r--r--fs/buffer.c145
-rw-r--r--fs/cifs/cifsfs.c3
-rw-r--r--fs/cifs/cifsfs.h4
-rw-r--r--fs/cifs/dir.c4
-rw-r--r--fs/coda/dir.c2
-rw-r--r--fs/compat.c28
-rw-r--r--fs/configfs/dir.c2
-rw-r--r--fs/dcache.c48
-rw-r--r--fs/devpts/inode.c188
-rw-r--r--fs/dlm/dir.c18
-rw-r--r--fs/dlm/dlm_internal.h2
-rw-r--r--fs/dlm/lock.c60
-rw-r--r--fs/dlm/lockspace.c2
-rw-r--r--fs/dlm/lowcomms.c181
-rw-r--r--fs/dlm/user.c24
-rw-r--r--fs/drop_caches.c2
-rw-r--r--fs/ecryptfs/dentry.c2
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h2
-rw-r--r--fs/ext2/balloc.c8
-rw-r--r--fs/ext2/ialloc.c10
-rw-r--r--fs/ext2/inode.c2
-rw-r--r--fs/ext2/super.c1
-rw-r--r--fs/ext2/xattr.c8
-rw-r--r--fs/ext3/balloc.c8
-rw-r--r--fs/ext3/ialloc.c12
-rw-r--r--fs/ext3/inode.c6
-rw-r--r--fs/ext3/namei.c6
-rw-r--r--fs/ext3/super.c48
-rw-r--r--fs/ext3/xattr.c6
-rw-r--r--fs/ext4/balloc.c2
-rw-r--r--fs/ext4/ext4.h2
-rw-r--r--fs/ext4/ialloc.c12
-rw-r--r--fs/ext4/inode.c40
-rw-r--r--fs/ext4/mballoc.c46
-rw-r--r--fs/ext4/namei.c6
-rw-r--r--fs/ext4/super.c54
-rw-r--r--fs/ext4/xattr.c6
-rw-r--r--fs/fat/namei_msdos.c2
-rw-r--r--fs/fat/namei_vfat.c4
-rw-r--r--fs/fuse/dir.c2
-rw-r--r--fs/fuse/fuse_i.h2
-rw-r--r--fs/gfs2/ops_dentry.c2
-rw-r--r--fs/gfs2/super.h2
-rw-r--r--fs/hfs/hfs_fs.h2
-rw-r--r--fs/hfs/sysdep.c2
-rw-r--r--fs/hfsplus/hfsplus_fs.h2
-rw-r--r--fs/hfsplus/inode.c2
-rw-r--r--fs/hostfs/hostfs_kern.c4
-rw-r--r--fs/hpfs/dentry.c2
-rw-r--r--fs/inode.c6
-rw-r--r--fs/isofs/inode.c2
-rw-r--r--fs/jfs/acl.c2
-rw-r--r--fs/jfs/inode.c6
-rw-r--r--fs/jfs/jfs_dtree.c18
-rw-r--r--fs/jfs/jfs_extent.c10
-rw-r--r--fs/jfs/jfs_inode.c4
-rw-r--r--fs/jfs/jfs_inode.h2
-rw-r--r--fs/jfs/jfs_xtree.c14
-rw-r--r--fs/jfs/namei.c10
-rw-r--r--fs/jfs/xattr.c12
-rw-r--r--fs/libfs.c5
-rw-r--r--fs/namei.c48
-rw-r--r--fs/namespace.c3
-rw-r--r--fs/ncpfs/dir.c4
-rw-r--r--fs/nfs/dir.c4
-rw-r--r--fs/nfs/nfs4_fs.h2
-rw-r--r--fs/nfsd/vfs.c4
-rw-r--r--fs/notify/inotify/inotify.c16
-rw-r--r--fs/ocfs2/dcache.c2
-rw-r--r--fs/ocfs2/dcache.h2
-rw-r--r--fs/open.c2
-rw-r--r--fs/pipe.c7
-rw-r--r--fs/proc/base.c6
-rw-r--r--fs/proc/generic.c2
-rw-r--r--fs/proc/proc_sysctl.c4
-rw-r--r--fs/proc/root.c3
-rw-r--r--fs/quota/Kconfig59
-rw-r--r--fs/quota/Makefile14
-rw-r--r--fs/quota/dquot.c (renamed from fs/dquot.c)572
-rw-r--r--fs/quota/quota.c (renamed from fs/quota.c)37
-rw-r--r--fs/quota/quota_tree.c (renamed from fs/quota_tree.c)132
-rw-r--r--fs/quota/quota_tree.h (renamed from fs/quota_tree.h)0
-rw-r--r--fs/quota/quota_v1.c (renamed from fs/quota_v1.c)48
-rw-r--r--fs/quota/quota_v2.c (renamed from fs/quota_v2.c)3
-rw-r--r--fs/quota/quotaio_v1.h (renamed from fs/quotaio_v1.h)0
-rw-r--r--fs/quota/quotaio_v2.h (renamed from fs/quotaio_v2.h)0
-rw-r--r--fs/ramfs/file-nommu.c6
-rw-r--r--fs/reiserfs/bitmap.c14
-rw-r--r--fs/reiserfs/inode.c10
-rw-r--r--fs/reiserfs/namei.c6
-rw-r--r--fs/reiserfs/stree.c14
-rw-r--r--fs/reiserfs/super.c60
-rw-r--r--fs/reiserfs/xattr.c2
-rw-r--r--fs/smbfs/dir.c4
-rw-r--r--fs/super.c17
-rw-r--r--fs/sync.c2
-rw-r--r--fs/sysfs/dir.c2
-rw-r--r--fs/sysv/namei.c2
-rw-r--r--fs/sysv/sysv.h2
-rw-r--r--fs/ubifs/super.c3
-rw-r--r--fs/udf/balloc.c14
-rw-r--r--fs/udf/ialloc.c8
-rw-r--r--fs/ufs/balloc.c12
-rw-r--r--fs/ufs/ialloc.c8
-rw-r--r--fs/ufs/inode.c39
-rw-r--r--fs/ufs/namei.c2
-rw-r--r--fs/ufs/super.c11
-rw-r--r--fs/ufs/ufs.h2
-rw-r--r--include/drm/drmP.h77
-rw-r--r--include/drm/drm_pciids.h2
-rw-r--r--include/linux/bsg.h2
-rw-r--r--include/linux/buffer_head.h7
-rw-r--r--include/linux/compat.h8
-rw-r--r--include/linux/dcache.h2
-rw-r--r--include/linux/firewire-cdev.h218
-rw-r--r--include/linux/fs.h220
-rw-r--r--include/linux/ieee80211.h17
-rw-r--r--include/linux/if_ether.h1
-rw-r--r--include/linux/if_frad.h1
-rw-r--r--include/linux/major.h1
-rw-r--r--include/linux/miscdevice.h1
-rw-r--r--include/linux/ncp_fs.h2
-rw-r--r--include/linux/netdevice.h19
-rw-r--r--include/linux/netfilter/x_tables.h23
-rw-r--r--include/linux/nfs_fs.h2
-rw-r--r--include/linux/nfs_xdr.h2
-rw-r--r--include/linux/nl80211.h88
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/quota.h11
-rw-r--r--include/linux/quotaops.h119
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--include/net/cfg80211.h203
-rw-r--r--include/net/ethoc.h22
-rw-r--r--include/net/ieee80211_radiotap.h4
-rw-r--r--include/net/mac80211.h84
-rw-r--r--include/net/netfilter/nf_conntrack.h14
-rw-r--r--include/net/netfilter/nf_conntrack_helper.h2
-rw-r--r--include/net/netfilter/nf_conntrack_l3proto.h7
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h7
-rw-r--r--include/net/netfilter/nf_conntrack_tuple.h6
-rw-r--r--include/net/netlink.h1
-rw-r--r--include/net/netns/conntrack.h5
-rw-r--r--include/scsi/fc/fc_fcoe.h7
-rw-r--r--include/scsi/fc_frame.h19
-rw-r--r--include/scsi/libfc.h31
-rw-r--r--include/scsi/libfcoe.h18
-rw-r--r--include/scsi/libiscsi.h19
-rw-r--r--include/scsi/osd_attributes.h327
-rw-r--r--include/scsi/osd_initiator.h433
-rw-r--r--include/scsi/osd_protocol.h579
-rw-r--r--include/scsi/osd_sec.h45
-rw-r--r--include/scsi/osd_sense.h260
-rw-r--r--include/scsi/osd_types.h40
-rw-r--r--include/scsi/scsi.h31
-rw-r--r--include/scsi/scsi_cmnd.h15
-rw-r--r--include/scsi/scsi_device.h10
-rw-r--r--include/scsi/scsi_transport_iscsi.h4
-rw-r--r--kernel/cgroup.c5
-rw-r--r--lib/nlattr.c27
-rw-r--r--net/appletalk/ddp.c6
-rw-r--r--net/ax25/af_ax25.c12
-rw-r--r--net/core/dev.c13
-rw-r--r--net/ipv4/netfilter/arp_tables.c18
-rw-r--r--net/ipv4/netfilter/ip_tables.c27
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c6
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c63
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c6
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c2
-rw-r--r--net/ipv6/ip6_input.c4
-rw-r--r--net/ipv6/netfilter/ip6_tables.c27
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c6
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c6
-rw-r--r--net/mac80211/agg-rx.c8
-rw-r--r--net/mac80211/agg-tx.c232
-rw-r--r--net/mac80211/cfg.c245
-rw-r--r--net/mac80211/debugfs.c24
-rw-r--r--net/mac80211/ibss.c3
-rw-r--r--net/mac80211/ieee80211_i.h78
-rw-r--r--net/mac80211/iface.c14
-rw-r--r--net/mac80211/main.c25
-rw-r--r--net/mac80211/mlme.c259
-rw-r--r--net/mac80211/pm.c78
-rw-r--r--net/mac80211/rate.c6
-rw-r--r--net/mac80211/rate.h4
-rw-r--r--net/mac80211/rx.c29
-rw-r--r--net/mac80211/scan.c77
-rw-r--r--net/mac80211/sta_info.c17
-rw-r--r--net/mac80211/sta_info.h7
-rw-r--r--net/mac80211/tx.c579
-rw-r--r--net/mac80211/util.c126
-rw-r--r--net/mac80211/wep.c21
-rw-r--r--net/mac80211/wext.c33
-rw-r--r--net/mac80211/wpa.c28
-rw-r--r--net/netfilter/Kconfig2
-rw-r--r--net/netfilter/nf_conntrack_core.c129
-rw-r--r--net/netfilter/nf_conntrack_expect.c2
-rw-r--r--net/netfilter/nf_conntrack_helper.c8
-rw-r--r--net/netfilter/nf_conntrack_netlink.c94
-rw-r--r--net/netfilter/nf_conntrack_proto.c16
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c9
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c1
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c10
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c15
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_udplite.c1
-rw-r--r--net/netfilter/nf_conntrack_standalone.c57
-rw-r--r--net/netfilter/xt_connlimit.c6
-rw-r--r--net/netfilter/xt_physdev.c21
-rw-r--r--net/netrom/af_netrom.c17
-rw-r--r--net/rose/af_rose.c4
-rw-r--r--net/socket.c2
-rw-r--r--net/sunrpc/rpc_pipe.c2
-rw-r--r--net/wireless/Kconfig50
-rw-r--r--net/wireless/Makefile3
-rw-r--r--net/wireless/core.c30
-rw-r--r--net/wireless/core.h5
-rw-r--r--net/wireless/mlme.c46
-rw-r--r--net/wireless/nl80211.c824
-rw-r--r--net/wireless/nl80211.h38
-rw-r--r--net/wireless/reg.c65
-rw-r--r--net/wireless/scan.c27
-rw-r--r--net/wireless/wext-compat.c11
-rw-r--r--net/x25/af_x25.c6
-rw-r--r--net/xfrm/xfrm_state.c2
523 files changed, 46988 insertions, 8275 deletions
diff --git a/Documentation/DocBook/mac80211.tmpl b/Documentation/DocBook/mac80211.tmpl
index 8af6d9626878..fbeaffc1dcc3 100644
--- a/Documentation/DocBook/mac80211.tmpl
+++ b/Documentation/DocBook/mac80211.tmpl
@@ -227,6 +227,12 @@ usage should require reading the full document.
227!Pinclude/net/mac80211.h Powersave support 227!Pinclude/net/mac80211.h Powersave support
228 </chapter> 228 </chapter>
229 229
230 <chapter id="beacon-filter">
231 <title>Beacon filter support</title>
232!Pinclude/net/mac80211.h Beacon filter support
233!Finclude/net/mac80211.h ieee80211_beacon_loss
234 </chapter>
235
230 <chapter id="qos"> 236 <chapter id="qos">
231 <title>Multiple queues and QoS support</title> 237 <title>Multiple queues and QoS support</title>
232 <para>TBD</para> 238 <para>TBD</para>
diff --git a/Documentation/devices.txt b/Documentation/devices.txt
index 2be08240ee80..62254d4510c6 100644
--- a/Documentation/devices.txt
+++ b/Documentation/devices.txt
@@ -3145,6 +3145,12 @@ Your cooperation is appreciated.
3145 1 = /dev/blockrom1 Second ROM card's translation layer interface 3145 1 = /dev/blockrom1 Second ROM card's translation layer interface
3146 ... 3146 ...
3147 3147
3148260 char OSD (Object-based-device) SCSI Device
3149 0 = /dev/osd0 First OSD Device
3150 1 = /dev/osd1 Second OSD Device
3151 ...
3152 255 = /dev/osd255 256th OSD Device
3153
3148 **** ADDITIONAL /dev DIRECTORY ENTRIES 3154 **** ADDITIONAL /dev DIRECTORY ENTRIES
3149 3155
3150This section details additional entries that should or may exist in 3156This section details additional entries that should or may exist in
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index e47c0ff8ba7a..02ea3773535e 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -6,20 +6,47 @@ be removed from this file.
6 6
7--------------------------- 7---------------------------
8 8
9What: old static regulatory information and ieee80211_regdom module parameter 9What: The ieee80211_regdom module parameter
10When: 2.6.29 10When: March 2010 / desktop catchup
11
12Why: This was inherited by the CONFIG_WIRELESS_OLD_REGULATORY code,
13 and currently serves as an option for users to define an
14 ISO / IEC 3166 alpha2 code for the country they are currently
15 present in. Although there are userspace API replacements for this
16 through nl80211 distributions haven't yet caught up with implementing
17 decent alternatives through standard GUIs. Although available as an
18 option through iw or wpa_supplicant its just a matter of time before
19 distributions pick up good GUI options for this. The ideal solution
20 would actually consist of intelligent designs which would do this for
21 the user automatically even when travelling through different countries.
22 Until then we leave this module parameter as a compromise.
23
24 When userspace improves with reasonable widely-available alternatives for
25 this we will no longer need this module parameter. This entry hopes that
26 by the super-futuristically looking date of "March 2010" we will have
27 such replacements widely available.
28
29Who: Luis R. Rodriguez <lrodriguez@atheros.com>
30
31---------------------------
32
33What: CONFIG_WIRELESS_OLD_REGULATORY - old static regulatory information
34When: March 2010 / desktop catchup
35
11Why: The old regulatory infrastructure has been replaced with a new one 36Why: The old regulatory infrastructure has been replaced with a new one
12 which does not require statically defined regulatory domains. We do 37 which does not require statically defined regulatory domains. We do
13 not want to keep static regulatory domains in the kernel due to the 38 not want to keep static regulatory domains in the kernel due to the
14 the dynamic nature of regulatory law and localization. We kept around 39 the dynamic nature of regulatory law and localization. We kept around
15 the old static definitions for the regulatory domains of: 40 the old static definitions for the regulatory domains of:
41
16 * US 42 * US
17 * JP 43 * JP
18 * EU 44 * EU
45
19 and used by default the US when CONFIG_WIRELESS_OLD_REGULATORY was 46 and used by default the US when CONFIG_WIRELESS_OLD_REGULATORY was
20 set. We also kept around the ieee80211_regdom module parameter in case 47 set. We will remove this option once the standard Linux desktop catches
21 some applications were relying on it. Changing regulatory domains 48 up with the new userspace APIs we have implemented.
22 can now be done instead by using nl80211, as is done with iw. 49
23Who: Luis R. Rodriguez <lrodriguez@atheros.com> 50Who: Luis R. Rodriguez <lrodriguez@atheros.com>
24 51
25--------------------------- 52---------------------------
diff --git a/Documentation/scsi/osd.txt b/Documentation/scsi/osd.txt
new file mode 100644
index 000000000000..da162f7fd5f5
--- /dev/null
+++ b/Documentation/scsi/osd.txt
@@ -0,0 +1,198 @@
1The OSD Standard
2================
3OSD (Object-Based Storage Device) is a T10 SCSI command set that is designed
4to provide efficient operation of input/output logical units that manage the
5allocation, placement, and accessing of variable-size data-storage containers,
6called objects. Objects are intended to contain operating system and application
7constructs. Each object has associated attributes attached to it, which are
8integral part of the object and provide metadata about the object. The standard
9defines some common obligatory attributes, but user attributes can be added as
10needed.
11
12See: http://www.t10.org/ftp/t10/drafts/osd2/ for the latest draft for OSD 2
13or search the web for "OSD SCSI"
14
15OSD in the Linux Kernel
16=======================
17osd-initiator:
18 The main component of OSD in Kernel is the osd-initiator library. Its main
19user is intended to be the pNFS-over-objects layout driver, which uses objects
20as its back-end data storage. Other clients are the other osd parts listed below.
21
22osd-uld:
23 This is a SCSI ULD that registers for OSD type devices and provides a testing
24platform, both for the in-kernel initiator as well as connected targets. It
25currently has no useful user-mode API, though it could have if need be.
26
27exofs:
28 Is an OSD based Linux file system. It uses the osd-initiator and osd-uld,
29to export a usable file system for users.
30See Documentation/filesystems/exofs.txt for more details
31
32osd target:
33 There are no current plans for an OSD target implementation in kernel. For all
34needs, a user-mode target that is based on the scsi tgt target framework is
35available from Ohio Supercomputer Center (OSC) at:
36http://www.open-osd.org/bin/view/Main/OscOsdProject
37There are several other target implementations. See http://open-osd.org for more
38links.
39
40Files and Folders
41=================
42This is the complete list of files included in this work:
43include/scsi/
44 osd_initiator.h Main API for the initiator library
45 osd_types.h Common OSD types
46 osd_sec.h Security Manager API
47 osd_protocol.h Wire definitions of the OSD standard protocol
48 osd_attributes.h Wire definitions of OSD attributes
49
50drivers/scsi/osd/
51 osd_initiator.c OSD-Initiator library implementation
52 osd_uld.c The OSD scsi ULD
53 osd_ktest.{h,c} In-kernel test suite (called by osd_uld)
54 osd_debug.h Some printk macros
55 Makefile For both in-tree and out-of-tree compilation
56 Kconfig Enables inclusion of the different pieces
57 osd_test.c User-mode application to call the kernel tests
58
59The OSD-Initiator Library
60=========================
61osd_initiator is a low level implementation of an osd initiator encoder.
62But even though, it should be intuitive and easy to use. Perhaps over time an
63higher lever will form that automates some of the more common recipes.
64
65init/fini:
66- osd_dev_init() associates a scsi_device with an osd_dev structure
67 and initializes some global pools. This should be done once per scsi_device
68 (OSD LUN). The osd_dev structure is needed for calling osd_start_request().
69
70- osd_dev_fini() cleans up before a osd_dev/scsi_device destruction.
71
72OSD commands encoding, execution, and decoding of results:
73
74struct osd_request's is used to iteratively encode an OSD command and carry
75its state throughout execution. Each request goes through these stages:
76
77a. osd_start_request() allocates the request.
78
79b. Any of the osd_req_* methods is used to encode a request of the specified
80 type.
81
82c. osd_req_add_{get,set}_attr_* may be called to add get/set attributes to the
83 CDB. "List" or "Page" mode can be used exclusively. The attribute-list API
84 can be called multiple times on the same request. However, only one
85 attribute-page can be read, as mandated by the OSD standard.
86
87d. osd_finalize_request() computes offsets into the data-in and data-out buffers
88 and signs the request using the provided capability key and integrity-
89 check parameters.
90
91e. osd_execute_request() may be called to execute the request via the block
92 layer and wait for its completion. The request can be executed
93 asynchronously by calling the block layer API directly.
94
95f. After execution, osd_req_decode_sense() can be called to decode the request's
96 sense information.
97
98g. osd_req_decode_get_attr() may be called to retrieve osd_add_get_attr_list()
99 values.
100
101h. osd_end_request() must be called to deallocate the request and any resource
102 associated with it. Note that osd_end_request cleans up the request at any
103 stage and it must always be called after a successful osd_start_request().
104
105osd_request's structure:
106
107The OSD standard defines a complex structure of IO segments pointed to by
108members in the CDB. Up to 3 segments can be deployed in the IN-Buffer and up to
1094 in the OUT-Buffer. The ASCII illustration below depicts a secure-read with
110associated get+set of attributes-lists. Other combinations very on the same
111basic theme. From no-segments-used up to all-segments-used.
112
113|________OSD-CDB__________|
114| |
115|read_len (offset=0) -|---------\
116| | |
117|get_attrs_list_length | |
118|get_attrs_list_offset -|----\ |
119| | | |
120|retrieved_attrs_alloc_len| | |
121|retrieved_attrs_offset -|----|----|-\
122| | | | |
123|set_attrs_list_length | | | |
124|set_attrs_list_offset -|-\ | | |
125| | | | | |
126|in_data_integ_offset -|-|--|----|-|-\
127|out_data_integ_offset -|-|--|--\ | | |
128\_________________________/ | | | | | |
129 | | | | | |
130|_______OUT-BUFFER________| | | | | | |
131| Set attr list |</ | | | | |
132| | | | | | |
133|-------------------------| | | | | |
134| Get attr descriptors |<---/ | | | |
135| | | | | |
136|-------------------------| | | | |
137| Out-data integrity |<------/ | | |
138| | | | |
139\_________________________/ | | |
140 | | |
141|________IN-BUFFER________| | | |
142| In-Data read |<--------/ | |
143| | | |
144|-------------------------| | |
145| Get attr list |<----------/ |
146| | |
147|-------------------------| |
148| In-data integrity |<------------/
149| |
150\_________________________/
151
152A block device request can carry bidirectional payload by means of associating
153a bidi_read request with a main write-request. Each in/out request is described
154by a chain of BIOs associated with each request.
155The CDB is of a SCSI VARLEN CDB format, as described by OSD standard.
156The OSD standard also mandates alignment restrictions at start of each segment.
157
158In the code, in struct osd_request, there are two _osd_io_info structures to
159describe the IN/OUT buffers above, two BIOs for the data payload and up to five
160_osd_req_data_segment structures to hold the different segments allocation and
161information.
162
163Important: We have chosen to disregard the assumption that a BIO-chain (and
164the resulting sg-list) describes a linear memory buffer. Meaning only first and
165last scatter chain can be incomplete and all the middle chains are of PAGE_SIZE.
166For us, a scatter-gather-list, as its name implies and as used by the Networking
167layer, is to describe a vector of buffers that will be transferred to/from the
168wire. It works very well with current iSCSI transport. iSCSI is currently the
169only deployed OSD transport. In the future we anticipate SAS and FC attached OSD
170devices as well.
171
172The OSD Testing ULD
173===================
174TODO: More user-mode control on tests.
175
176Authors, Mailing list
177=====================
178Please communicate with us on any deployment of osd, whether using this code
179or not.
180
181Any problems, questions, bug reports, lonely OSD nights, please email:
182 OSD Dev List <osd-dev@open-osd.org>
183
184More up-to-date information can be found on:
185http://open-osd.org
186
187Boaz Harrosh <bharrosh@panasas.com>
188Benny Halevy <bhalevy@panasas.com>
189
190References
191==========
192Weber, R., "SCSI Object-Based Storage Device Commands",
193T10/1355-D ANSI/INCITS 400-2004,
194http://www.t10.org/ftp/t10/drafts/osd/osd-r10.pdf
195
196Weber, R., "SCSI Object-Based Storage Device Commands -2 (OSD-2)"
197T10/1729-D, Working Draft, rev. 3
198http://www.t10.org/ftp/t10/drafts/osd2/osd2r03.pdf
diff --git a/MAINTAINERS b/MAINTAINERS
index fa7be04b0cf0..d8a4c8d0a554 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -765,6 +765,14 @@ L: linux-wireless@vger.kernel.org
765L: ath9k-devel@lists.ath9k.org 765L: ath9k-devel@lists.ath9k.org
766S: Supported 766S: Supported
767 767
768ATHEROS AR9170 WIRELESS DRIVER
769P: Christian Lamparter
770M: chunkeey@web.de
771L: linux-wireless@vger.kernel.org
772W: http://wireless.kernel.org/en/users/Drivers/ar9170
773S: Maintained
774F: drivers/net/wireless/ar9170/
775
768ATI_REMOTE2 DRIVER 776ATI_REMOTE2 DRIVER
769P: Ville Syrjala 777P: Ville Syrjala
770M: syrjala@sci.fi 778M: syrjala@sci.fi
@@ -3302,6 +3310,16 @@ L: orinoco-devel@lists.sourceforge.net
3302W: http://www.nongnu.org/orinoco/ 3310W: http://www.nongnu.org/orinoco/
3303S: Maintained 3311S: Maintained
3304 3312
3313OSD LIBRARY
3314P: Boaz Harrosh
3315M: bharrosh@panasas.com
3316P: Benny Halevy
3317M: bhalevy@panasas.com
3318L: osd-dev@open-osd.org
3319W: http://open-osd.org
3320T: git://git.open-osd.org/open-osd.git
3321S: Maintained
3322
3305P54 WIRELESS DRIVER 3323P54 WIRELESS DRIVER
3306P: Michael Wu 3324P: Michael Wu
3307M: flamingice@sourmilk.net 3325M: flamingice@sourmilk.net
@@ -3602,7 +3620,7 @@ S: Maintained
3602RALINK RT2X00 WIRELESS LAN DRIVER 3620RALINK RT2X00 WIRELESS LAN DRIVER
3603P: rt2x00 project 3621P: rt2x00 project
3604L: linux-wireless@vger.kernel.org 3622L: linux-wireless@vger.kernel.org
3605L: rt2400-devel@lists.sourceforge.net 3623L: users@rt2x00.serialmonkey.com
3606W: http://rt2x00.serialmonkey.com/ 3624W: http://rt2x00.serialmonkey.com/
3607S: Maintained 3625S: Maintained
3608T: git kernel.org:/pub/scm/linux/kernel/git/ivd/rt2x00.git 3626T: git kernel.org:/pub/scm/linux/kernel/git/ivd/rt2x00.git
diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S
index e4a54b615894..b45d913a51c3 100644
--- a/arch/alpha/kernel/entry.S
+++ b/arch/alpha/kernel/entry.S
@@ -903,8 +903,9 @@ sys_alpha_pipe:
903 stq $26, 0($sp) 903 stq $26, 0($sp)
904 .prologue 0 904 .prologue 0
905 905
906 mov $31, $17
906 lda $16, 8($sp) 907 lda $16, 8($sp)
907 jsr $26, do_pipe 908 jsr $26, do_pipe_flags
908 909
909 ldq $26, 0($sp) 910 ldq $26, 0($sp)
910 bne $0, 1f 911 bne $0, 1f
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index ae41f097864b..42ee05981e71 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -46,8 +46,6 @@
46#include <asm/hwrpb.h> 46#include <asm/hwrpb.h>
47#include <asm/processor.h> 47#include <asm/processor.h>
48 48
49extern int do_pipe(int *);
50
51/* 49/*
52 * Brk needs to return an error. Still support Linux's brk(0) query idiom, 50 * Brk needs to return an error. Still support Linux's brk(0) query idiom,
53 * which OSF programs just shouldn't be doing. We're still not quite 51 * which OSF programs just shouldn't be doing. We're still not quite
diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S
index a46f8395e9a5..af9405cd70e5 100644
--- a/arch/ia64/ia32/ia32_entry.S
+++ b/arch/ia64/ia32/ia32_entry.S
@@ -240,7 +240,7 @@ ia32_syscall_table:
240 data8 sys_ni_syscall 240 data8 sys_ni_syscall
241 data8 sys_umask /* 60 */ 241 data8 sys_umask /* 60 */
242 data8 sys_chroot 242 data8 sys_chroot
243 data8 sys_ustat 243 data8 compat_sys_ustat
244 data8 sys_dup2 244 data8 sys_dup2
245 data8 sys_getppid 245 data8 sys_getppid
246 data8 sys_getpgrp /* 65 */ 246 data8 sys_getpgrp /* 65 */
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 0e499757309b..5c0f408cfd71 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -2196,7 +2196,7 @@ pfmfs_delete_dentry(struct dentry *dentry)
2196 return 1; 2196 return 1;
2197} 2197}
2198 2198
2199static struct dentry_operations pfmfs_dentry_operations = { 2199static const struct dentry_operations pfmfs_dentry_operations = {
2200 .d_delete = pfmfs_delete_dentry, 2200 .d_delete = pfmfs_delete_dentry,
2201}; 2201};
2202 2202
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 49aac6e17df9..2a472713de8e 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -355,40 +355,6 @@ SYSCALL_DEFINE1(32_personality, unsigned long, personality)
355 return ret; 355 return ret;
356} 356}
357 357
358/* ustat compatibility */
359struct ustat32 {
360 compat_daddr_t f_tfree;
361 compat_ino_t f_tinode;
362 char f_fname[6];
363 char f_fpack[6];
364};
365
366extern asmlinkage long sys_ustat(dev_t dev, struct ustat __user * ubuf);
367
368SYSCALL_DEFINE2(32_ustat, dev_t, dev, struct ustat32 __user *, ubuf32)
369{
370 int err;
371 struct ustat tmp;
372 struct ustat32 tmp32;
373 mm_segment_t old_fs = get_fs();
374
375 set_fs(KERNEL_DS);
376 err = sys_ustat(dev, (struct ustat __user *)&tmp);
377 set_fs(old_fs);
378
379 if (err)
380 goto out;
381
382 memset(&tmp32, 0, sizeof(struct ustat32));
383 tmp32.f_tfree = tmp.f_tfree;
384 tmp32.f_tinode = tmp.f_tinode;
385
386 err = copy_to_user(ubuf32, &tmp32, sizeof(struct ustat32)) ? -EFAULT : 0;
387
388out:
389 return err;
390}
391
392SYSCALL_DEFINE4(32_sendfile, long, out_fd, long, in_fd, 358SYSCALL_DEFINE4(32_sendfile, long, out_fd, long, in_fd,
393 compat_off_t __user *, offset, s32, count) 359 compat_off_t __user *, offset, s32, count)
394{ 360{
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 7438e92f8a01..f61d6b0e5731 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -253,7 +253,7 @@ EXPORT(sysn32_call_table)
253 PTR compat_sys_utime /* 6130 */ 253 PTR compat_sys_utime /* 6130 */
254 PTR sys_mknod 254 PTR sys_mknod
255 PTR sys_32_personality 255 PTR sys_32_personality
256 PTR sys_32_ustat 256 PTR compat_sys_ustat
257 PTR compat_sys_statfs 257 PTR compat_sys_statfs
258 PTR compat_sys_fstatfs /* 6135 */ 258 PTR compat_sys_fstatfs /* 6135 */
259 PTR sys_sysfs 259 PTR sys_sysfs
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index b0fef4ff9827..60997f1f69d4 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -265,7 +265,7 @@ sys_call_table:
265 PTR sys_olduname 265 PTR sys_olduname
266 PTR sys_umask /* 4060 */ 266 PTR sys_umask /* 4060 */
267 PTR sys_chroot 267 PTR sys_chroot
268 PTR sys_32_ustat 268 PTR compat_sys_ustat
269 PTR sys_dup2 269 PTR sys_dup2
270 PTR sys_getppid 270 PTR sys_getppid
271 PTR sys_getpgrp /* 4065 */ 271 PTR sys_getpgrp /* 4065 */
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 303d2b647e41..03b9a01bc16c 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -130,7 +130,7 @@
130 ENTRY_OURS(newuname) 130 ENTRY_OURS(newuname)
131 ENTRY_SAME(umask) /* 60 */ 131 ENTRY_SAME(umask) /* 60 */
132 ENTRY_SAME(chroot) 132 ENTRY_SAME(chroot)
133 ENTRY_SAME(ustat) 133 ENTRY_COMP(ustat)
134 ENTRY_SAME(dup2) 134 ENTRY_SAME(dup2)
135 ENTRY_SAME(getppid) 135 ENTRY_SAME(getppid)
136 ENTRY_SAME(getpgrp) /* 65 */ 136 ENTRY_SAME(getpgrp) /* 65 */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 72353f6070a4..fe166491e9dc 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -65,7 +65,7 @@ SYSCALL(ni_syscall)
65SYSX(sys_ni_syscall,sys_olduname, sys_olduname) 65SYSX(sys_ni_syscall,sys_olduname, sys_olduname)
66COMPAT_SYS_SPU(umask) 66COMPAT_SYS_SPU(umask)
67SYSCALL_SPU(chroot) 67SYSCALL_SPU(chroot)
68SYSCALL(ustat) 68COMPAT_SYS(ustat)
69SYSCALL_SPU(dup2) 69SYSCALL_SPU(dup2)
70SYSCALL_SPU(getppid) 70SYSCALL_SPU(getppid)
71SYSCALL_SPU(getpgrp) 71SYSCALL_SPU(getpgrp)
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 62c706eb0de6..87cf5a79a351 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -252,7 +252,7 @@ sys32_chroot_wrapper:
252sys32_ustat_wrapper: 252sys32_ustat_wrapper:
253 llgfr %r2,%r2 # dev_t 253 llgfr %r2,%r2 # dev_t
254 llgtr %r3,%r3 # struct ustat * 254 llgtr %r3,%r3 # struct ustat *
255 jg sys_ustat 255 jg compat_sys_ustat
256 256
257 .globl sys32_dup2_wrapper 257 .globl sys32_dup2_wrapper
258sys32_dup2_wrapper: 258sys32_dup2_wrapper:
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 6cd1a5b65067..79457f682b5a 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -1031,7 +1031,7 @@ void smp_fetch_global_regs(void)
1031 * If the address space is non-shared (ie. mm->count == 1) we avoid 1031 * If the address space is non-shared (ie. mm->count == 1) we avoid
1032 * cross calls when we want to flush the currently running process's 1032 * cross calls when we want to flush the currently running process's
1033 * tlb state. This is done by clearing all cpu bits except the current 1033 * tlb state. This is done by clearing all cpu bits except the current
1034 * processor's in current->active_mm->cpu_vm_mask and performing the 1034 * processor's in current->mm->cpu_vm_mask and performing the
1035 * flush locally only. This will force any subsequent cpus which run 1035 * flush locally only. This will force any subsequent cpus which run
1036 * this task to flush the context from the local tlb if the process 1036 * this task to flush the context from the local tlb if the process
1037 * migrates to another cpu (again). 1037 * migrates to another cpu (again).
@@ -1074,7 +1074,7 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long
1074 u32 ctx = CTX_HWBITS(mm->context); 1074 u32 ctx = CTX_HWBITS(mm->context);
1075 int cpu = get_cpu(); 1075 int cpu = get_cpu();
1076 1076
1077 if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1) 1077 if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
1078 mm->cpu_vm_mask = cpumask_of_cpu(cpu); 1078 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
1079 else 1079 else
1080 smp_cross_call_masked(&xcall_flush_tlb_pending, 1080 smp_cross_call_masked(&xcall_flush_tlb_pending,
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index f93c42a2b522..a8000b1cda74 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -51,7 +51,7 @@ sys_call_table32:
51/*150*/ .word sys_nis_syscall, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64 51/*150*/ .word sys_nis_syscall, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64
52 .word compat_sys_fcntl64, sys_inotify_rm_watch, compat_sys_statfs, compat_sys_fstatfs, sys_oldumount 52 .word compat_sys_fcntl64, sys_inotify_rm_watch, compat_sys_statfs, compat_sys_fstatfs, sys_oldumount
53/*160*/ .word compat_sys_sched_setaffinity, compat_sys_sched_getaffinity, sys32_getdomainname, sys32_setdomainname, sys_nis_syscall 53/*160*/ .word compat_sys_sched_setaffinity, compat_sys_sched_getaffinity, sys32_getdomainname, sys32_setdomainname, sys_nis_syscall
54 .word sys_quotactl, sys_set_tid_address, compat_sys_mount, sys_ustat, sys32_setxattr 54 .word sys_quotactl, sys_set_tid_address, compat_sys_mount, compat_sys_ustat, sys32_setxattr
55/*170*/ .word sys32_lsetxattr, sys32_fsetxattr, sys_getxattr, sys_lgetxattr, compat_sys_getdents 55/*170*/ .word sys32_lsetxattr, sys32_fsetxattr, sys_getxattr, sys_lgetxattr, compat_sys_getdents
56 .word sys_setsid, sys_fchdir, sys32_fgetxattr, sys_listxattr, sys_llistxattr 56 .word sys_setsid, sys_fchdir, sys32_fgetxattr, sys_listxattr, sys_llistxattr
57/*180*/ .word sys32_flistxattr, sys_removexattr, sys_lremovexattr, compat_sys_sigpending, sys_ni_syscall 57/*180*/ .word sys32_flistxattr, sys_removexattr, sys_lremovexattr, compat_sys_sigpending, sys_ni_syscall
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index 642562d83ec4..4ee2e48c4b39 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -724,12 +724,14 @@ void timer_interrupt(int irq, struct pt_regs *regs)
724 unsigned long tick_mask = tick_ops->softint_mask; 724 unsigned long tick_mask = tick_ops->softint_mask;
725 int cpu = smp_processor_id(); 725 int cpu = smp_processor_id();
726 struct clock_event_device *evt = &per_cpu(sparc64_events, cpu); 726 struct clock_event_device *evt = &per_cpu(sparc64_events, cpu);
727 struct irq_desc *desc;
727 728
728 clear_softint(tick_mask); 729 clear_softint(tick_mask);
729 730
730 irq_enter(); 731 irq_enter();
731 732
732 kstat_this_cpu.irqs[0]++; 733 desc = irq_to_desc(0);
734 kstat_incr_irqs_this_cpu(0, desc);
733 735
734 if (unlikely(!evt->event_handler)) { 736 if (unlikely(!evt->event_handler)) {
735 printk(KERN_WARNING 737 printk(KERN_WARNING
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index fde510b664d3..434224e2229f 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -86,7 +86,7 @@ static int uml_net_rx(struct net_device *dev)
86 drop_skb->dev = dev; 86 drop_skb->dev = dev;
87 /* Read a packet into drop_skb and don't do anything with it. */ 87 /* Read a packet into drop_skb and don't do anything with it. */
88 (*lp->read)(lp->fd, drop_skb, lp); 88 (*lp->read)(lp->fd, drop_skb, lp);
89 lp->stats.rx_dropped++; 89 dev->stats.rx_dropped++;
90 return 0; 90 return 0;
91 } 91 }
92 92
@@ -99,8 +99,8 @@ static int uml_net_rx(struct net_device *dev)
99 skb_trim(skb, pkt_len); 99 skb_trim(skb, pkt_len);
100 skb->protocol = (*lp->protocol)(skb); 100 skb->protocol = (*lp->protocol)(skb);
101 101
102 lp->stats.rx_bytes += skb->len; 102 dev->stats.rx_bytes += skb->len;
103 lp->stats.rx_packets++; 103 dev->stats.rx_packets++;
104 netif_rx(skb); 104 netif_rx(skb);
105 return pkt_len; 105 return pkt_len;
106 } 106 }
@@ -224,8 +224,8 @@ static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
224 len = (*lp->write)(lp->fd, skb, lp); 224 len = (*lp->write)(lp->fd, skb, lp);
225 225
226 if (len == skb->len) { 226 if (len == skb->len) {
227 lp->stats.tx_packets++; 227 dev->stats.tx_packets++;
228 lp->stats.tx_bytes += skb->len; 228 dev->stats.tx_bytes += skb->len;
229 dev->trans_start = jiffies; 229 dev->trans_start = jiffies;
230 netif_start_queue(dev); 230 netif_start_queue(dev);
231 231
@@ -234,7 +234,7 @@ static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
234 } 234 }
235 else if (len == 0) { 235 else if (len == 0) {
236 netif_start_queue(dev); 236 netif_start_queue(dev);
237 lp->stats.tx_dropped++; 237 dev->stats.tx_dropped++;
238 } 238 }
239 else { 239 else {
240 netif_start_queue(dev); 240 netif_start_queue(dev);
@@ -248,12 +248,6 @@ static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
248 return 0; 248 return 0;
249} 249}
250 250
251static struct net_device_stats *uml_net_get_stats(struct net_device *dev)
252{
253 struct uml_net_private *lp = netdev_priv(dev);
254 return &lp->stats;
255}
256
257static void uml_net_set_multicast_list(struct net_device *dev) 251static void uml_net_set_multicast_list(struct net_device *dev)
258{ 252{
259 return; 253 return;
@@ -377,6 +371,18 @@ static void net_device_release(struct device *dev)
377 free_netdev(netdev); 371 free_netdev(netdev);
378} 372}
379 373
374static const struct net_device_ops uml_netdev_ops = {
375 .ndo_open = uml_net_open,
376 .ndo_stop = uml_net_close,
377 .ndo_start_xmit = uml_net_start_xmit,
378 .ndo_set_multicast_list = uml_net_set_multicast_list,
379 .ndo_tx_timeout = uml_net_tx_timeout,
380 .ndo_set_mac_address = uml_net_set_mac,
381 .ndo_change_mtu = uml_net_change_mtu,
382 .ndo_set_mac_address = eth_mac_addr,
383 .ndo_validate_addr = eth_validate_addr,
384};
385
380/* 386/*
381 * Ensures that platform_driver_register is called only once by 387 * Ensures that platform_driver_register is called only once by
382 * eth_configure. Will be set in an initcall. 388 * eth_configure. Will be set in an initcall.
@@ -473,14 +479,7 @@ static void eth_configure(int n, void *init, char *mac,
473 479
474 set_ether_mac(dev, device->mac); 480 set_ether_mac(dev, device->mac);
475 dev->mtu = transport->user->mtu; 481 dev->mtu = transport->user->mtu;
476 dev->open = uml_net_open; 482 dev->netdev_ops = &uml_netdev_ops;
477 dev->hard_start_xmit = uml_net_start_xmit;
478 dev->stop = uml_net_close;
479 dev->get_stats = uml_net_get_stats;
480 dev->set_multicast_list = uml_net_set_multicast_list;
481 dev->tx_timeout = uml_net_tx_timeout;
482 dev->set_mac_address = uml_net_set_mac;
483 dev->change_mtu = uml_net_change_mtu;
484 dev->ethtool_ops = &uml_net_ethtool_ops; 483 dev->ethtool_ops = &uml_net_ethtool_ops;
485 dev->watchdog_timeo = (HZ >> 1); 484 dev->watchdog_timeo = (HZ >> 1);
486 dev->irq = UM_ETH_IRQ; 485 dev->irq = UM_ETH_IRQ;
diff --git a/arch/um/include/shared/net_kern.h b/arch/um/include/shared/net_kern.h
index d843c7924a7c..5c367f22595b 100644
--- a/arch/um/include/shared/net_kern.h
+++ b/arch/um/include/shared/net_kern.h
@@ -26,7 +26,7 @@ struct uml_net_private {
26 spinlock_t lock; 26 spinlock_t lock;
27 struct net_device *dev; 27 struct net_device *dev;
28 struct timer_list tl; 28 struct timer_list tl;
29 struct net_device_stats stats; 29
30 struct work_struct work; 30 struct work_struct work;
31 int fd; 31 int fd;
32 unsigned char mac[ETH_ALEN]; 32 unsigned char mac[ETH_ALEN];
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 5a0d76dc56a4..8ef8876666b2 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -557,7 +557,7 @@ ia32_sys_call_table:
557 .quad sys32_olduname 557 .quad sys32_olduname
558 .quad sys_umask /* 60 */ 558 .quad sys_umask /* 60 */
559 .quad sys_chroot 559 .quad sys_chroot
560 .quad sys32_ustat 560 .quad compat_sys_ustat
561 .quad sys_dup2 561 .quad sys_dup2
562 .quad sys_getppid 562 .quad sys_getppid
563 .quad sys_getpgrp /* 65 */ 563 .quad sys_getpgrp /* 65 */
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index 6c0d7f6231af..efac92fd1efb 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -638,28 +638,6 @@ long sys32_uname(struct old_utsname __user *name)
638 return err ? -EFAULT : 0; 638 return err ? -EFAULT : 0;
639} 639}
640 640
641long sys32_ustat(unsigned dev, struct ustat32 __user *u32p)
642{
643 struct ustat u;
644 mm_segment_t seg;
645 int ret;
646
647 seg = get_fs();
648 set_fs(KERNEL_DS);
649 ret = sys_ustat(dev, (struct ustat __user *)&u);
650 set_fs(seg);
651 if (ret < 0)
652 return ret;
653
654 if (!access_ok(VERIFY_WRITE, u32p, sizeof(struct ustat32)) ||
655 __put_user((__u32) u.f_tfree, &u32p->f_tfree) ||
656 __put_user((__u32) u.f_tinode, &u32p->f_tfree) ||
657 __copy_to_user(&u32p->f_fname, u.f_fname, sizeof(u.f_fname)) ||
658 __copy_to_user(&u32p->f_fpack, u.f_fpack, sizeof(u.f_fpack)))
659 ret = -EFAULT;
660 return ret;
661}
662
663asmlinkage long sys32_execve(char __user *name, compat_uptr_t __user *argv, 641asmlinkage long sys32_execve(char __user *name, compat_uptr_t __user *argv,
664 compat_uptr_t __user *envp, struct pt_regs *regs) 642 compat_uptr_t __user *envp, struct pt_regs *regs)
665{ 643{
diff --git a/arch/x86/include/asm/ia32.h b/arch/x86/include/asm/ia32.h
index 50ca486fd88c..1f7e62517284 100644
--- a/arch/x86/include/asm/ia32.h
+++ b/arch/x86/include/asm/ia32.h
@@ -129,13 +129,6 @@ typedef struct compat_siginfo {
129 } _sifields; 129 } _sifields;
130} compat_siginfo_t; 130} compat_siginfo_t;
131 131
132struct ustat32 {
133 __u32 f_tfree;
134 compat_ino_t f_tinode;
135 char f_fname[6];
136 char f_fpack[6];
137};
138
139#define IA32_STACK_TOP IA32_PAGE_OFFSET 132#define IA32_STACK_TOP IA32_PAGE_OFFSET
140 133
141#ifdef __KERNEL__ 134#ifdef __KERNEL__
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
index ffb08be2a530..72a6dcd1299b 100644
--- a/arch/x86/include/asm/sys_ia32.h
+++ b/arch/x86/include/asm/sys_ia32.h
@@ -70,8 +70,6 @@ struct old_utsname;
70asmlinkage long sys32_olduname(struct oldold_utsname __user *); 70asmlinkage long sys32_olduname(struct oldold_utsname __user *);
71long sys32_uname(struct old_utsname __user *); 71long sys32_uname(struct old_utsname __user *);
72 72
73long sys32_ustat(unsigned, struct ustat32 __user *);
74
75asmlinkage long sys32_execve(char __user *, compat_uptr_t __user *, 73asmlinkage long sys32_execve(char __user *, compat_uptr_t __user *,
76 compat_uptr_t __user *, struct pt_regs *); 74 compat_uptr_t __user *, struct pt_regs *);
77asmlinkage long sys32_clone(unsigned int, unsigned int, struct pt_regs *); 75asmlinkage long sys32_clone(unsigned int, unsigned int, struct pt_regs *);
diff --git a/block/cmd-filter.c b/block/cmd-filter.c
index 504b275e1b90..572bbc2f900d 100644
--- a/block/cmd-filter.c
+++ b/block/cmd-filter.c
@@ -22,6 +22,7 @@
22#include <linux/spinlock.h> 22#include <linux/spinlock.h>
23#include <linux/capability.h> 23#include <linux/capability.h>
24#include <linux/bitops.h> 24#include <linux/bitops.h>
25#include <linux/blkdev.h>
25 26
26#include <scsi/scsi.h> 27#include <scsi/scsi.h>
27#include <linux/cdrom.h> 28#include <linux/cdrom.h>
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 4373adb2119a..9d9490e22e07 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -26,6 +26,10 @@
26#define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12 26#define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12
27#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC 27#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC
28#define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE 28#define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE
29#define PCI_DEVICE_ID_INTEL_IGDGM_HB 0xA010
30#define PCI_DEVICE_ID_INTEL_IGDGM_IG 0xA011
31#define PCI_DEVICE_ID_INTEL_IGDG_HB 0xA000
32#define PCI_DEVICE_ID_INTEL_IGDG_IG 0xA001
29#define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0 33#define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0
30#define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2 34#define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2
31#define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0 35#define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0
@@ -60,7 +64,12 @@
60 64
61#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \ 65#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
62 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \ 66 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
63 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB) 67 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
68 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDGM_HB || \
69 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDG_HB)
70
71#define IS_IGD (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDGM_HB || \
72 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDG_HB)
64 73
65#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_E_HB || \ 74#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_E_HB || \
66 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \ 75 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
@@ -510,7 +519,7 @@ static void intel_i830_init_gtt_entries(void)
510 size = 512; 519 size = 512;
511 } 520 }
512 size += 4; /* add in BIOS popup space */ 521 size += 4; /* add in BIOS popup space */
513 } else if (IS_G33) { 522 } else if (IS_G33 && !IS_IGD) {
514 /* G33's GTT size defined in gmch_ctrl */ 523 /* G33's GTT size defined in gmch_ctrl */
515 switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) { 524 switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
516 case G33_PGETBL_SIZE_1M: 525 case G33_PGETBL_SIZE_1M:
@@ -526,7 +535,7 @@ static void intel_i830_init_gtt_entries(void)
526 size = 512; 535 size = 512;
527 } 536 }
528 size += 4; 537 size += 4;
529 } else if (IS_G4X) { 538 } else if (IS_G4X || IS_IGD) {
530 /* On 4 series hardware, GTT stolen is separate from graphics 539 /* On 4 series hardware, GTT stolen is separate from graphics
531 * stolen, ignore it in stolen gtt entries counting. However, 540 * stolen, ignore it in stolen gtt entries counting. However,
532 * 4KB of the stolen memory doesn't get mapped to the GTT. 541 * 4KB of the stolen memory doesn't get mapped to the GTT.
@@ -2161,6 +2170,10 @@ static const struct intel_driver_description {
2161 NULL, &intel_g33_driver }, 2170 NULL, &intel_g33_driver },
2162 { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33", 2171 { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33",
2163 NULL, &intel_g33_driver }, 2172 NULL, &intel_g33_driver },
2173 { PCI_DEVICE_ID_INTEL_IGDGM_HB, PCI_DEVICE_ID_INTEL_IGDGM_IG, 0, "IGD",
2174 NULL, &intel_g33_driver },
2175 { PCI_DEVICE_ID_INTEL_IGDG_HB, PCI_DEVICE_ID_INTEL_IGDG_IG, 0, "IGD",
2176 NULL, &intel_g33_driver },
2164 { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0, 2177 { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0,
2165 "Mobile Intel® GM45 Express", NULL, &intel_i965_driver }, 2178 "Mobile Intel® GM45 Express", NULL, &intel_i965_driver },
2166 { PCI_DEVICE_ID_INTEL_IGD_E_HB, PCI_DEVICE_ID_INTEL_IGD_E_IG, 0, 2179 { PCI_DEVICE_ID_INTEL_IGD_E_HB, PCI_DEVICE_ID_INTEL_IGD_E_IG, 0,
@@ -2355,6 +2368,8 @@ static struct pci_device_id agp_intel_pci_table[] = {
2355 ID(PCI_DEVICE_ID_INTEL_82945G_HB), 2368 ID(PCI_DEVICE_ID_INTEL_82945G_HB),
2356 ID(PCI_DEVICE_ID_INTEL_82945GM_HB), 2369 ID(PCI_DEVICE_ID_INTEL_82945GM_HB),
2357 ID(PCI_DEVICE_ID_INTEL_82945GME_HB), 2370 ID(PCI_DEVICE_ID_INTEL_82945GME_HB),
2371 ID(PCI_DEVICE_ID_INTEL_IGDGM_HB),
2372 ID(PCI_DEVICE_ID_INTEL_IGDG_HB),
2358 ID(PCI_DEVICE_ID_INTEL_82946GZ_HB), 2373 ID(PCI_DEVICE_ID_INTEL_82946GZ_HB),
2359 ID(PCI_DEVICE_ID_INTEL_82G35_HB), 2374 ID(PCI_DEVICE_ID_INTEL_82G35_HB),
2360 ID(PCI_DEVICE_ID_INTEL_82965Q_HB), 2375 ID(PCI_DEVICE_ID_INTEL_82965Q_HB),
diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c
index a5dd7a665aa8..8b8c8c22f0fc 100644
--- a/drivers/firewire/fw-card.c
+++ b/drivers/firewire/fw-card.c
@@ -63,8 +63,7 @@ static int descriptor_count;
63#define BIB_CMC ((1) << 30) 63#define BIB_CMC ((1) << 30)
64#define BIB_IMC ((1) << 31) 64#define BIB_IMC ((1) << 31)
65 65
66static u32 * 66static u32 *generate_config_rom(struct fw_card *card, size_t *config_rom_length)
67generate_config_rom(struct fw_card *card, size_t *config_rom_length)
68{ 67{
69 struct fw_descriptor *desc; 68 struct fw_descriptor *desc;
70 static u32 config_rom[256]; 69 static u32 config_rom[256];
@@ -128,8 +127,7 @@ generate_config_rom(struct fw_card *card, size_t *config_rom_length)
128 return config_rom; 127 return config_rom;
129} 128}
130 129
131static void 130static void update_config_roms(void)
132update_config_roms(void)
133{ 131{
134 struct fw_card *card; 132 struct fw_card *card;
135 u32 *config_rom; 133 u32 *config_rom;
@@ -141,8 +139,7 @@ update_config_roms(void)
141 } 139 }
142} 140}
143 141
144int 142int fw_core_add_descriptor(struct fw_descriptor *desc)
145fw_core_add_descriptor(struct fw_descriptor *desc)
146{ 143{
147 size_t i; 144 size_t i;
148 145
@@ -171,8 +168,7 @@ fw_core_add_descriptor(struct fw_descriptor *desc)
171 return 0; 168 return 0;
172} 169}
173 170
174void 171void fw_core_remove_descriptor(struct fw_descriptor *desc)
175fw_core_remove_descriptor(struct fw_descriptor *desc)
176{ 172{
177 mutex_lock(&card_mutex); 173 mutex_lock(&card_mutex);
178 174
@@ -185,12 +181,30 @@ fw_core_remove_descriptor(struct fw_descriptor *desc)
185 mutex_unlock(&card_mutex); 181 mutex_unlock(&card_mutex);
186} 182}
187 183
184static int set_broadcast_channel(struct device *dev, void *data)
185{
186 fw_device_set_broadcast_channel(fw_device(dev), (long)data);
187 return 0;
188}
189
190static void allocate_broadcast_channel(struct fw_card *card, int generation)
191{
192 int channel, bandwidth = 0;
193
194 fw_iso_resource_manage(card, generation, 1ULL << 31,
195 &channel, &bandwidth, true);
196 if (channel == 31) {
197 card->broadcast_channel_allocated = true;
198 device_for_each_child(card->device, (void *)(long)generation,
199 set_broadcast_channel);
200 }
201}
202
188static const char gap_count_table[] = { 203static const char gap_count_table[] = {
189 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40 204 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
190}; 205};
191 206
192void 207void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
193fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
194{ 208{
195 int scheduled; 209 int scheduled;
196 210
@@ -200,37 +214,38 @@ fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
200 fw_card_put(card); 214 fw_card_put(card);
201} 215}
202 216
203static void 217static void fw_card_bm_work(struct work_struct *work)
204fw_card_bm_work(struct work_struct *work)
205{ 218{
206 struct fw_card *card = container_of(work, struct fw_card, work.work); 219 struct fw_card *card = container_of(work, struct fw_card, work.work);
207 struct fw_device *root_device; 220 struct fw_device *root_device;
208 struct fw_node *root_node, *local_node; 221 struct fw_node *root_node;
209 unsigned long flags; 222 unsigned long flags;
210 int root_id, new_root_id, irm_id, gap_count, generation, grace, rcode; 223 int root_id, new_root_id, irm_id, local_id;
224 int gap_count, generation, grace, rcode;
211 bool do_reset = false; 225 bool do_reset = false;
212 bool root_device_is_running; 226 bool root_device_is_running;
213 bool root_device_is_cmc; 227 bool root_device_is_cmc;
214 __be32 lock_data[2]; 228 __be32 lock_data[2];
215 229
216 spin_lock_irqsave(&card->lock, flags); 230 spin_lock_irqsave(&card->lock, flags);
217 local_node = card->local_node;
218 root_node = card->root_node;
219 231
220 if (local_node == NULL) { 232 if (card->local_node == NULL) {
221 spin_unlock_irqrestore(&card->lock, flags); 233 spin_unlock_irqrestore(&card->lock, flags);
222 goto out_put_card; 234 goto out_put_card;
223 } 235 }
224 fw_node_get(local_node);
225 fw_node_get(root_node);
226 236
227 generation = card->generation; 237 generation = card->generation;
238 root_node = card->root_node;
239 fw_node_get(root_node);
228 root_device = root_node->data; 240 root_device = root_node->data;
229 root_device_is_running = root_device && 241 root_device_is_running = root_device &&
230 atomic_read(&root_device->state) == FW_DEVICE_RUNNING; 242 atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
231 root_device_is_cmc = root_device && root_device->cmc; 243 root_device_is_cmc = root_device && root_device->cmc;
232 root_id = root_node->node_id; 244 root_id = root_node->node_id;
233 grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 10)); 245 irm_id = card->irm_node->node_id;
246 local_id = card->local_node->node_id;
247
248 grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 8));
234 249
235 if (is_next_generation(generation, card->bm_generation) || 250 if (is_next_generation(generation, card->bm_generation) ||
236 (card->bm_generation != generation && grace)) { 251 (card->bm_generation != generation && grace)) {
@@ -246,16 +261,15 @@ fw_card_bm_work(struct work_struct *work)
246 * next generation. 261 * next generation.
247 */ 262 */
248 263
249 irm_id = card->irm_node->node_id;
250 if (!card->irm_node->link_on) { 264 if (!card->irm_node->link_on) {
251 new_root_id = local_node->node_id; 265 new_root_id = local_id;
252 fw_notify("IRM has link off, making local node (%02x) root.\n", 266 fw_notify("IRM has link off, making local node (%02x) root.\n",
253 new_root_id); 267 new_root_id);
254 goto pick_me; 268 goto pick_me;
255 } 269 }
256 270
257 lock_data[0] = cpu_to_be32(0x3f); 271 lock_data[0] = cpu_to_be32(0x3f);
258 lock_data[1] = cpu_to_be32(local_node->node_id); 272 lock_data[1] = cpu_to_be32(local_id);
259 273
260 spin_unlock_irqrestore(&card->lock, flags); 274 spin_unlock_irqrestore(&card->lock, flags);
261 275
@@ -269,9 +283,14 @@ fw_card_bm_work(struct work_struct *work)
269 goto out; 283 goto out;
270 284
271 if (rcode == RCODE_COMPLETE && 285 if (rcode == RCODE_COMPLETE &&
272 lock_data[0] != cpu_to_be32(0x3f)) 286 lock_data[0] != cpu_to_be32(0x3f)) {
273 /* Somebody else is BM, let them do the work. */ 287
288 /* Somebody else is BM. Only act as IRM. */
289 if (local_id == irm_id)
290 allocate_broadcast_channel(card, generation);
291
274 goto out; 292 goto out;
293 }
275 294
276 spin_lock_irqsave(&card->lock, flags); 295 spin_lock_irqsave(&card->lock, flags);
277 296
@@ -282,19 +301,18 @@ fw_card_bm_work(struct work_struct *work)
282 * do a bus reset and pick the local node as 301 * do a bus reset and pick the local node as
283 * root, and thus, IRM. 302 * root, and thus, IRM.
284 */ 303 */
285 new_root_id = local_node->node_id; 304 new_root_id = local_id;
286 fw_notify("BM lock failed, making local node (%02x) root.\n", 305 fw_notify("BM lock failed, making local node (%02x) root.\n",
287 new_root_id); 306 new_root_id);
288 goto pick_me; 307 goto pick_me;
289 } 308 }
290 } else if (card->bm_generation != generation) { 309 } else if (card->bm_generation != generation) {
291 /* 310 /*
292 * OK, we weren't BM in the last generation, and it's 311 * We weren't BM in the last generation, and the last
293 * less than 100ms since last bus reset. Reschedule 312 * bus reset is less than 125ms ago. Reschedule this job.
294 * this task 100ms from now.
295 */ 313 */
296 spin_unlock_irqrestore(&card->lock, flags); 314 spin_unlock_irqrestore(&card->lock, flags);
297 fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 10)); 315 fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
298 goto out; 316 goto out;
299 } 317 }
300 318
@@ -310,7 +328,7 @@ fw_card_bm_work(struct work_struct *work)
310 * Either link_on is false, or we failed to read the 328 * Either link_on is false, or we failed to read the
311 * config rom. In either case, pick another root. 329 * config rom. In either case, pick another root.
312 */ 330 */
313 new_root_id = local_node->node_id; 331 new_root_id = local_id;
314 } else if (!root_device_is_running) { 332 } else if (!root_device_is_running) {
315 /* 333 /*
316 * If we haven't probed this device yet, bail out now 334 * If we haven't probed this device yet, bail out now
@@ -332,7 +350,7 @@ fw_card_bm_work(struct work_struct *work)
332 * successfully read the config rom, but it's not 350 * successfully read the config rom, but it's not
333 * cycle master capable. 351 * cycle master capable.
334 */ 352 */
335 new_root_id = local_node->node_id; 353 new_root_id = local_id;
336 } 354 }
337 355
338 pick_me: 356 pick_me:
@@ -363,25 +381,28 @@ fw_card_bm_work(struct work_struct *work)
363 card->index, new_root_id, gap_count); 381 card->index, new_root_id, gap_count);
364 fw_send_phy_config(card, new_root_id, generation, gap_count); 382 fw_send_phy_config(card, new_root_id, generation, gap_count);
365 fw_core_initiate_bus_reset(card, 1); 383 fw_core_initiate_bus_reset(card, 1);
384 /* Will allocate broadcast channel after the reset. */
385 } else {
386 if (local_id == irm_id)
387 allocate_broadcast_channel(card, generation);
366 } 388 }
389
367 out: 390 out:
368 fw_node_put(root_node); 391 fw_node_put(root_node);
369 fw_node_put(local_node);
370 out_put_card: 392 out_put_card:
371 fw_card_put(card); 393 fw_card_put(card);
372} 394}
373 395
374static void 396static void flush_timer_callback(unsigned long data)
375flush_timer_callback(unsigned long data)
376{ 397{
377 struct fw_card *card = (struct fw_card *)data; 398 struct fw_card *card = (struct fw_card *)data;
378 399
379 fw_flush_transactions(card); 400 fw_flush_transactions(card);
380} 401}
381 402
382void 403void fw_card_initialize(struct fw_card *card,
383fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver, 404 const struct fw_card_driver *driver,
384 struct device *device) 405 struct device *device)
385{ 406{
386 static atomic_t index = ATOMIC_INIT(-1); 407 static atomic_t index = ATOMIC_INIT(-1);
387 408
@@ -406,13 +427,12 @@ fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver,
406} 427}
407EXPORT_SYMBOL(fw_card_initialize); 428EXPORT_SYMBOL(fw_card_initialize);
408 429
409int 430int fw_card_add(struct fw_card *card,
410fw_card_add(struct fw_card *card, 431 u32 max_receive, u32 link_speed, u64 guid)
411 u32 max_receive, u32 link_speed, u64 guid)
412{ 432{
413 u32 *config_rom; 433 u32 *config_rom;
414 size_t length; 434 size_t length;
415 int err; 435 int ret;
416 436
417 card->max_receive = max_receive; 437 card->max_receive = max_receive;
418 card->link_speed = link_speed; 438 card->link_speed = link_speed;
@@ -423,13 +443,14 @@ fw_card_add(struct fw_card *card,
423 list_add_tail(&card->link, &card_list); 443 list_add_tail(&card->link, &card_list);
424 mutex_unlock(&card_mutex); 444 mutex_unlock(&card_mutex);
425 445
426 err = card->driver->enable(card, config_rom, length); 446 ret = card->driver->enable(card, config_rom, length);
427 if (err < 0) { 447 if (ret < 0) {
428 mutex_lock(&card_mutex); 448 mutex_lock(&card_mutex);
429 list_del(&card->link); 449 list_del(&card->link);
430 mutex_unlock(&card_mutex); 450 mutex_unlock(&card_mutex);
431 } 451 }
432 return err; 452
453 return ret;
433} 454}
434EXPORT_SYMBOL(fw_card_add); 455EXPORT_SYMBOL(fw_card_add);
435 456
@@ -442,23 +463,20 @@ EXPORT_SYMBOL(fw_card_add);
442 * dummy driver just fails all IO. 463 * dummy driver just fails all IO.
443 */ 464 */
444 465
445static int 466static int dummy_enable(struct fw_card *card, u32 *config_rom, size_t length)
446dummy_enable(struct fw_card *card, u32 *config_rom, size_t length)
447{ 467{
448 BUG(); 468 BUG();
449 return -1; 469 return -1;
450} 470}
451 471
452static int 472static int dummy_update_phy_reg(struct fw_card *card, int address,
453dummy_update_phy_reg(struct fw_card *card, int address, 473 int clear_bits, int set_bits)
454 int clear_bits, int set_bits)
455{ 474{
456 return -ENODEV; 475 return -ENODEV;
457} 476}
458 477
459static int 478static int dummy_set_config_rom(struct fw_card *card,
460dummy_set_config_rom(struct fw_card *card, 479 u32 *config_rom, size_t length)
461 u32 *config_rom, size_t length)
462{ 480{
463 /* 481 /*
464 * We take the card out of card_list before setting the dummy 482 * We take the card out of card_list before setting the dummy
@@ -468,27 +486,23 @@ dummy_set_config_rom(struct fw_card *card,
468 return -1; 486 return -1;
469} 487}
470 488
471static void 489static void dummy_send_request(struct fw_card *card, struct fw_packet *packet)
472dummy_send_request(struct fw_card *card, struct fw_packet *packet)
473{ 490{
474 packet->callback(packet, card, -ENODEV); 491 packet->callback(packet, card, -ENODEV);
475} 492}
476 493
477static void 494static void dummy_send_response(struct fw_card *card, struct fw_packet *packet)
478dummy_send_response(struct fw_card *card, struct fw_packet *packet)
479{ 495{
480 packet->callback(packet, card, -ENODEV); 496 packet->callback(packet, card, -ENODEV);
481} 497}
482 498
483static int 499static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
484dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
485{ 500{
486 return -ENOENT; 501 return -ENOENT;
487} 502}
488 503
489static int 504static int dummy_enable_phys_dma(struct fw_card *card,
490dummy_enable_phys_dma(struct fw_card *card, 505 int node_id, int generation)
491 int node_id, int generation)
492{ 506{
493 return -ENODEV; 507 return -ENODEV;
494} 508}
@@ -503,16 +517,14 @@ static struct fw_card_driver dummy_driver = {
503 .enable_phys_dma = dummy_enable_phys_dma, 517 .enable_phys_dma = dummy_enable_phys_dma,
504}; 518};
505 519
506void 520void fw_card_release(struct kref *kref)
507fw_card_release(struct kref *kref)
508{ 521{
509 struct fw_card *card = container_of(kref, struct fw_card, kref); 522 struct fw_card *card = container_of(kref, struct fw_card, kref);
510 523
511 complete(&card->done); 524 complete(&card->done);
512} 525}
513 526
514void 527void fw_core_remove_card(struct fw_card *card)
515fw_core_remove_card(struct fw_card *card)
516{ 528{
517 card->driver->update_phy_reg(card, 4, 529 card->driver->update_phy_reg(card, 4,
518 PHY_LINK_ACTIVE | PHY_CONTENDER, 0); 530 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
@@ -536,8 +548,7 @@ fw_core_remove_card(struct fw_card *card)
536} 548}
537EXPORT_SYMBOL(fw_core_remove_card); 549EXPORT_SYMBOL(fw_core_remove_card);
538 550
539int 551int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset)
540fw_core_initiate_bus_reset(struct fw_card *card, int short_reset)
541{ 552{
542 int reg = short_reset ? 5 : 1; 553 int reg = short_reset ? 5 : 1;
543 int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET; 554 int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c
index ed03234cbea8..7eb6594cc3e5 100644
--- a/drivers/firewire/fw-cdev.c
+++ b/drivers/firewire/fw-cdev.c
@@ -18,87 +18,162 @@
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */ 19 */
20 20
21#include <linux/module.h> 21#include <linux/compat.h>
22#include <linux/kernel.h> 22#include <linux/delay.h>
23#include <linux/wait.h>
24#include <linux/errno.h>
25#include <linux/device.h> 23#include <linux/device.h>
26#include <linux/vmalloc.h> 24#include <linux/errno.h>
25#include <linux/firewire-cdev.h>
26#include <linux/idr.h>
27#include <linux/jiffies.h>
28#include <linux/kernel.h>
29#include <linux/kref.h>
30#include <linux/mm.h>
31#include <linux/module.h>
32#include <linux/mutex.h>
27#include <linux/poll.h> 33#include <linux/poll.h>
28#include <linux/preempt.h> 34#include <linux/preempt.h>
35#include <linux/spinlock.h>
29#include <linux/time.h> 36#include <linux/time.h>
30#include <linux/delay.h> 37#include <linux/vmalloc.h>
31#include <linux/mm.h> 38#include <linux/wait.h>
32#include <linux/idr.h> 39#include <linux/workqueue.h>
33#include <linux/compat.h> 40
34#include <linux/firewire-cdev.h>
35#include <asm/system.h> 41#include <asm/system.h>
36#include <asm/uaccess.h> 42#include <asm/uaccess.h>
37#include "fw-transaction.h" 43
38#include "fw-topology.h"
39#include "fw-device.h" 44#include "fw-device.h"
45#include "fw-topology.h"
46#include "fw-transaction.h"
47
48struct client {
49 u32 version;
50 struct fw_device *device;
51
52 spinlock_t lock;
53 bool in_shutdown;
54 struct idr resource_idr;
55 struct list_head event_list;
56 wait_queue_head_t wait;
57 u64 bus_reset_closure;
58
59 struct fw_iso_context *iso_context;
60 u64 iso_closure;
61 struct fw_iso_buffer buffer;
62 unsigned long vm_start;
40 63
41struct client;
42struct client_resource {
43 struct list_head link; 64 struct list_head link;
44 void (*release)(struct client *client, struct client_resource *r); 65 struct kref kref;
45 u32 handle;
46}; 66};
47 67
68static inline void client_get(struct client *client)
69{
70 kref_get(&client->kref);
71}
72
73static void client_release(struct kref *kref)
74{
75 struct client *client = container_of(kref, struct client, kref);
76
77 fw_device_put(client->device);
78 kfree(client);
79}
80
81static void client_put(struct client *client)
82{
83 kref_put(&client->kref, client_release);
84}
85
86struct client_resource;
87typedef void (*client_resource_release_fn_t)(struct client *,
88 struct client_resource *);
89struct client_resource {
90 client_resource_release_fn_t release;
91 int handle;
92};
93
94struct address_handler_resource {
95 struct client_resource resource;
96 struct fw_address_handler handler;
97 __u64 closure;
98 struct client *client;
99};
100
101struct outbound_transaction_resource {
102 struct client_resource resource;
103 struct fw_transaction transaction;
104};
105
106struct inbound_transaction_resource {
107 struct client_resource resource;
108 struct fw_request *request;
109 void *data;
110 size_t length;
111};
112
113struct descriptor_resource {
114 struct client_resource resource;
115 struct fw_descriptor descriptor;
116 u32 data[0];
117};
118
119struct iso_resource {
120 struct client_resource resource;
121 struct client *client;
122 /* Schedule work and access todo only with client->lock held. */
123 struct delayed_work work;
124 enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
125 ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
126 int generation;
127 u64 channels;
128 s32 bandwidth;
129 struct iso_resource_event *e_alloc, *e_dealloc;
130};
131
132static void schedule_iso_resource(struct iso_resource *);
133static void release_iso_resource(struct client *, struct client_resource *);
134
48/* 135/*
49 * dequeue_event() just kfree()'s the event, so the event has to be 136 * dequeue_event() just kfree()'s the event, so the event has to be
50 * the first field in the struct. 137 * the first field in a struct XYZ_event.
51 */ 138 */
52
53struct event { 139struct event {
54 struct { void *data; size_t size; } v[2]; 140 struct { void *data; size_t size; } v[2];
55 struct list_head link; 141 struct list_head link;
56}; 142};
57 143
58struct bus_reset { 144struct bus_reset_event {
59 struct event event; 145 struct event event;
60 struct fw_cdev_event_bus_reset reset; 146 struct fw_cdev_event_bus_reset reset;
61}; 147};
62 148
63struct response { 149struct outbound_transaction_event {
64 struct event event; 150 struct event event;
65 struct fw_transaction transaction;
66 struct client *client; 151 struct client *client;
67 struct client_resource resource; 152 struct outbound_transaction_resource r;
68 struct fw_cdev_event_response response; 153 struct fw_cdev_event_response response;
69}; 154};
70 155
71struct iso_interrupt { 156struct inbound_transaction_event {
72 struct event event; 157 struct event event;
73 struct fw_cdev_event_iso_interrupt interrupt; 158 struct fw_cdev_event_request request;
74}; 159};
75 160
76struct client { 161struct iso_interrupt_event {
77 u32 version; 162 struct event event;
78 struct fw_device *device; 163 struct fw_cdev_event_iso_interrupt interrupt;
79 spinlock_t lock; 164};
80 u32 resource_handle;
81 struct list_head resource_list;
82 struct list_head event_list;
83 wait_queue_head_t wait;
84 u64 bus_reset_closure;
85
86 struct fw_iso_context *iso_context;
87 u64 iso_closure;
88 struct fw_iso_buffer buffer;
89 unsigned long vm_start;
90 165
91 struct list_head link; 166struct iso_resource_event {
167 struct event event;
168 struct fw_cdev_event_iso_resource resource;
92}; 169};
93 170
94static inline void __user * 171static inline void __user *u64_to_uptr(__u64 value)
95u64_to_uptr(__u64 value)
96{ 172{
97 return (void __user *)(unsigned long)value; 173 return (void __user *)(unsigned long)value;
98} 174}
99 175
100static inline __u64 176static inline __u64 uptr_to_u64(void __user *ptr)
101uptr_to_u64(void __user *ptr)
102{ 177{
103 return (__u64)(unsigned long)ptr; 178 return (__u64)(unsigned long)ptr;
104} 179}
@@ -107,7 +182,6 @@ static int fw_device_op_open(struct inode *inode, struct file *file)
107{ 182{
108 struct fw_device *device; 183 struct fw_device *device;
109 struct client *client; 184 struct client *client;
110 unsigned long flags;
111 185
112 device = fw_device_get_by_devt(inode->i_rdev); 186 device = fw_device_get_by_devt(inode->i_rdev);
113 if (device == NULL) 187 if (device == NULL)
@@ -125,16 +199,17 @@ static int fw_device_op_open(struct inode *inode, struct file *file)
125 } 199 }
126 200
127 client->device = device; 201 client->device = device;
128 INIT_LIST_HEAD(&client->event_list);
129 INIT_LIST_HEAD(&client->resource_list);
130 spin_lock_init(&client->lock); 202 spin_lock_init(&client->lock);
203 idr_init(&client->resource_idr);
204 INIT_LIST_HEAD(&client->event_list);
131 init_waitqueue_head(&client->wait); 205 init_waitqueue_head(&client->wait);
206 kref_init(&client->kref);
132 207
133 file->private_data = client; 208 file->private_data = client;
134 209
135 spin_lock_irqsave(&device->card->lock, flags); 210 mutex_lock(&device->client_list_mutex);
136 list_add_tail(&client->link, &device->client_list); 211 list_add_tail(&client->link, &device->client_list);
137 spin_unlock_irqrestore(&device->card->lock, flags); 212 mutex_unlock(&device->client_list_mutex);
138 213
139 return 0; 214 return 0;
140} 215}
@@ -150,68 +225,69 @@ static void queue_event(struct client *client, struct event *event,
150 event->v[1].size = size1; 225 event->v[1].size = size1;
151 226
152 spin_lock_irqsave(&client->lock, flags); 227 spin_lock_irqsave(&client->lock, flags);
153 list_add_tail(&event->link, &client->event_list); 228 if (client->in_shutdown)
229 kfree(event);
230 else
231 list_add_tail(&event->link, &client->event_list);
154 spin_unlock_irqrestore(&client->lock, flags); 232 spin_unlock_irqrestore(&client->lock, flags);
155 233
156 wake_up_interruptible(&client->wait); 234 wake_up_interruptible(&client->wait);
157} 235}
158 236
159static int 237static int dequeue_event(struct client *client,
160dequeue_event(struct client *client, char __user *buffer, size_t count) 238 char __user *buffer, size_t count)
161{ 239{
162 unsigned long flags;
163 struct event *event; 240 struct event *event;
164 size_t size, total; 241 size_t size, total;
165 int i, retval; 242 int i, ret;
166 243
167 retval = wait_event_interruptible(client->wait, 244 ret = wait_event_interruptible(client->wait,
168 !list_empty(&client->event_list) || 245 !list_empty(&client->event_list) ||
169 fw_device_is_shutdown(client->device)); 246 fw_device_is_shutdown(client->device));
170 if (retval < 0) 247 if (ret < 0)
171 return retval; 248 return ret;
172 249
173 if (list_empty(&client->event_list) && 250 if (list_empty(&client->event_list) &&
174 fw_device_is_shutdown(client->device)) 251 fw_device_is_shutdown(client->device))
175 return -ENODEV; 252 return -ENODEV;
176 253
177 spin_lock_irqsave(&client->lock, flags); 254 spin_lock_irq(&client->lock);
178 event = container_of(client->event_list.next, struct event, link); 255 event = list_first_entry(&client->event_list, struct event, link);
179 list_del(&event->link); 256 list_del(&event->link);
180 spin_unlock_irqrestore(&client->lock, flags); 257 spin_unlock_irq(&client->lock);
181 258
182 total = 0; 259 total = 0;
183 for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) { 260 for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
184 size = min(event->v[i].size, count - total); 261 size = min(event->v[i].size, count - total);
185 if (copy_to_user(buffer + total, event->v[i].data, size)) { 262 if (copy_to_user(buffer + total, event->v[i].data, size)) {
186 retval = -EFAULT; 263 ret = -EFAULT;
187 goto out; 264 goto out;
188 } 265 }
189 total += size; 266 total += size;
190 } 267 }
191 retval = total; 268 ret = total;
192 269
193 out: 270 out:
194 kfree(event); 271 kfree(event);
195 272
196 return retval; 273 return ret;
197} 274}
198 275
199static ssize_t 276static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
200fw_device_op_read(struct file *file, 277 size_t count, loff_t *offset)
201 char __user *buffer, size_t count, loff_t *offset)
202{ 278{
203 struct client *client = file->private_data; 279 struct client *client = file->private_data;
204 280
205 return dequeue_event(client, buffer, count); 281 return dequeue_event(client, buffer, count);
206} 282}
207 283
208/* caller must hold card->lock so that node pointers can be dereferenced here */ 284static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
209static void 285 struct client *client)
210fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
211 struct client *client)
212{ 286{
213 struct fw_card *card = client->device->card; 287 struct fw_card *card = client->device->card;
214 288
289 spin_lock_irq(&card->lock);
290
215 event->closure = client->bus_reset_closure; 291 event->closure = client->bus_reset_closure;
216 event->type = FW_CDEV_EVENT_BUS_RESET; 292 event->type = FW_CDEV_EVENT_BUS_RESET;
217 event->generation = client->device->generation; 293 event->generation = client->device->generation;
@@ -220,39 +296,49 @@ fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
220 event->bm_node_id = 0; /* FIXME: We don't track the BM. */ 296 event->bm_node_id = 0; /* FIXME: We don't track the BM. */
221 event->irm_node_id = card->irm_node->node_id; 297 event->irm_node_id = card->irm_node->node_id;
222 event->root_node_id = card->root_node->node_id; 298 event->root_node_id = card->root_node->node_id;
299
300 spin_unlock_irq(&card->lock);
223} 301}
224 302
225static void 303static void for_each_client(struct fw_device *device,
226for_each_client(struct fw_device *device, 304 void (*callback)(struct client *client))
227 void (*callback)(struct client *client))
228{ 305{
229 struct fw_card *card = device->card;
230 struct client *c; 306 struct client *c;
231 unsigned long flags;
232
233 spin_lock_irqsave(&card->lock, flags);
234 307
308 mutex_lock(&device->client_list_mutex);
235 list_for_each_entry(c, &device->client_list, link) 309 list_for_each_entry(c, &device->client_list, link)
236 callback(c); 310 callback(c);
311 mutex_unlock(&device->client_list_mutex);
312}
313
314static int schedule_reallocations(int id, void *p, void *data)
315{
316 struct client_resource *r = p;
237 317
238 spin_unlock_irqrestore(&card->lock, flags); 318 if (r->release == release_iso_resource)
319 schedule_iso_resource(container_of(r,
320 struct iso_resource, resource));
321 return 0;
239} 322}
240 323
241static void 324static void queue_bus_reset_event(struct client *client)
242queue_bus_reset_event(struct client *client)
243{ 325{
244 struct bus_reset *bus_reset; 326 struct bus_reset_event *e;
245 327
246 bus_reset = kzalloc(sizeof(*bus_reset), GFP_ATOMIC); 328 e = kzalloc(sizeof(*e), GFP_KERNEL);
247 if (bus_reset == NULL) { 329 if (e == NULL) {
248 fw_notify("Out of memory when allocating bus reset event\n"); 330 fw_notify("Out of memory when allocating bus reset event\n");
249 return; 331 return;
250 } 332 }
251 333
252 fill_bus_reset_event(&bus_reset->reset, client); 334 fill_bus_reset_event(&e->reset, client);
335
336 queue_event(client, &e->event,
337 &e->reset, sizeof(e->reset), NULL, 0);
253 338
254 queue_event(client, &bus_reset->event, 339 spin_lock_irq(&client->lock);
255 &bus_reset->reset, sizeof(bus_reset->reset), NULL, 0); 340 idr_for_each(&client->resource_idr, schedule_reallocations, client);
341 spin_unlock_irq(&client->lock);
256} 342}
257 343
258void fw_device_cdev_update(struct fw_device *device) 344void fw_device_cdev_update(struct fw_device *device)
@@ -274,11 +360,11 @@ static int ioctl_get_info(struct client *client, void *buffer)
274{ 360{
275 struct fw_cdev_get_info *get_info = buffer; 361 struct fw_cdev_get_info *get_info = buffer;
276 struct fw_cdev_event_bus_reset bus_reset; 362 struct fw_cdev_event_bus_reset bus_reset;
277 struct fw_card *card = client->device->card;
278 unsigned long ret = 0; 363 unsigned long ret = 0;
279 364
280 client->version = get_info->version; 365 client->version = get_info->version;
281 get_info->version = FW_CDEV_VERSION; 366 get_info->version = FW_CDEV_VERSION;
367 get_info->card = client->device->card->index;
282 368
283 down_read(&fw_device_rwsem); 369 down_read(&fw_device_rwsem);
284 370
@@ -300,49 +386,61 @@ static int ioctl_get_info(struct client *client, void *buffer)
300 client->bus_reset_closure = get_info->bus_reset_closure; 386 client->bus_reset_closure = get_info->bus_reset_closure;
301 if (get_info->bus_reset != 0) { 387 if (get_info->bus_reset != 0) {
302 void __user *uptr = u64_to_uptr(get_info->bus_reset); 388 void __user *uptr = u64_to_uptr(get_info->bus_reset);
303 unsigned long flags;
304 389
305 spin_lock_irqsave(&card->lock, flags);
306 fill_bus_reset_event(&bus_reset, client); 390 fill_bus_reset_event(&bus_reset, client);
307 spin_unlock_irqrestore(&card->lock, flags);
308
309 if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset))) 391 if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
310 return -EFAULT; 392 return -EFAULT;
311 } 393 }
312 394
313 get_info->card = card->index;
314
315 return 0; 395 return 0;
316} 396}
317 397
318static void 398static int add_client_resource(struct client *client,
319add_client_resource(struct client *client, struct client_resource *resource) 399 struct client_resource *resource, gfp_t gfp_mask)
320{ 400{
321 unsigned long flags; 401 unsigned long flags;
402 int ret;
403
404 retry:
405 if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
406 return -ENOMEM;
322 407
323 spin_lock_irqsave(&client->lock, flags); 408 spin_lock_irqsave(&client->lock, flags);
324 list_add_tail(&resource->link, &client->resource_list); 409 if (client->in_shutdown)
325 resource->handle = client->resource_handle++; 410 ret = -ECANCELED;
411 else
412 ret = idr_get_new(&client->resource_idr, resource,
413 &resource->handle);
414 if (ret >= 0) {
415 client_get(client);
416 if (resource->release == release_iso_resource)
417 schedule_iso_resource(container_of(resource,
418 struct iso_resource, resource));
419 }
326 spin_unlock_irqrestore(&client->lock, flags); 420 spin_unlock_irqrestore(&client->lock, flags);
421
422 if (ret == -EAGAIN)
423 goto retry;
424
425 return ret < 0 ? ret : 0;
327} 426}
328 427
329static int 428static int release_client_resource(struct client *client, u32 handle,
330release_client_resource(struct client *client, u32 handle, 429 client_resource_release_fn_t release,
331 struct client_resource **resource) 430 struct client_resource **resource)
332{ 431{
333 struct client_resource *r; 432 struct client_resource *r;
334 unsigned long flags;
335 433
336 spin_lock_irqsave(&client->lock, flags); 434 spin_lock_irq(&client->lock);
337 list_for_each_entry(r, &client->resource_list, link) { 435 if (client->in_shutdown)
338 if (r->handle == handle) { 436 r = NULL;
339 list_del(&r->link); 437 else
340 break; 438 r = idr_find(&client->resource_idr, handle);
341 } 439 if (r && r->release == release)
342 } 440 idr_remove(&client->resource_idr, handle);
343 spin_unlock_irqrestore(&client->lock, flags); 441 spin_unlock_irq(&client->lock);
344 442
345 if (&r->link == &client->resource_list) 443 if (!(r && r->release == release))
346 return -EINVAL; 444 return -EINVAL;
347 445
348 if (resource) 446 if (resource)
@@ -350,203 +448,239 @@ release_client_resource(struct client *client, u32 handle,
350 else 448 else
351 r->release(client, r); 449 r->release(client, r);
352 450
451 client_put(client);
452
353 return 0; 453 return 0;
354} 454}
355 455
356static void 456static void release_transaction(struct client *client,
357release_transaction(struct client *client, struct client_resource *resource) 457 struct client_resource *resource)
358{ 458{
359 struct response *response = 459 struct outbound_transaction_resource *r = container_of(resource,
360 container_of(resource, struct response, resource); 460 struct outbound_transaction_resource, resource);
361 461
362 fw_cancel_transaction(client->device->card, &response->transaction); 462 fw_cancel_transaction(client->device->card, &r->transaction);
363} 463}
364 464
365static void 465static void complete_transaction(struct fw_card *card, int rcode,
366complete_transaction(struct fw_card *card, int rcode, 466 void *payload, size_t length, void *data)
367 void *payload, size_t length, void *data)
368{ 467{
369 struct response *response = data; 468 struct outbound_transaction_event *e = data;
370 struct client *client = response->client; 469 struct fw_cdev_event_response *rsp = &e->response;
470 struct client *client = e->client;
371 unsigned long flags; 471 unsigned long flags;
372 struct fw_cdev_event_response *r = &response->response;
373 472
374 if (length < r->length) 473 if (length < rsp->length)
375 r->length = length; 474 rsp->length = length;
376 if (rcode == RCODE_COMPLETE) 475 if (rcode == RCODE_COMPLETE)
377 memcpy(r->data, payload, r->length); 476 memcpy(rsp->data, payload, rsp->length);
378 477
379 spin_lock_irqsave(&client->lock, flags); 478 spin_lock_irqsave(&client->lock, flags);
380 list_del(&response->resource.link); 479 /*
480 * 1. If called while in shutdown, the idr tree must be left untouched.
481 * The idr handle will be removed and the client reference will be
482 * dropped later.
483 * 2. If the call chain was release_client_resource ->
484 * release_transaction -> complete_transaction (instead of a normal
485 * conclusion of the transaction), i.e. if this resource was already
486 * unregistered from the idr, the client reference will be dropped
487 * by release_client_resource and we must not drop it here.
488 */
489 if (!client->in_shutdown &&
490 idr_find(&client->resource_idr, e->r.resource.handle)) {
491 idr_remove(&client->resource_idr, e->r.resource.handle);
492 /* Drop the idr's reference */
493 client_put(client);
494 }
381 spin_unlock_irqrestore(&client->lock, flags); 495 spin_unlock_irqrestore(&client->lock, flags);
382 496
383 r->type = FW_CDEV_EVENT_RESPONSE; 497 rsp->type = FW_CDEV_EVENT_RESPONSE;
384 r->rcode = rcode; 498 rsp->rcode = rcode;
385 499
386 /* 500 /*
387 * In the case that sizeof(*r) doesn't align with the position of the 501 * In the case that sizeof(*rsp) doesn't align with the position of the
388 * data, and the read is short, preserve an extra copy of the data 502 * data, and the read is short, preserve an extra copy of the data
389 * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless 503 * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless
390 * for short reads and some apps depended on it, this is both safe 504 * for short reads and some apps depended on it, this is both safe
391 * and prudent for compatibility. 505 * and prudent for compatibility.
392 */ 506 */
393 if (r->length <= sizeof(*r) - offsetof(typeof(*r), data)) 507 if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
394 queue_event(client, &response->event, r, sizeof(*r), 508 queue_event(client, &e->event, rsp, sizeof(*rsp),
395 r->data, r->length); 509 rsp->data, rsp->length);
396 else 510 else
397 queue_event(client, &response->event, r, sizeof(*r) + r->length, 511 queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
398 NULL, 0); 512 NULL, 0);
513
514 /* Drop the transaction callback's reference */
515 client_put(client);
399} 516}
400 517
401static int ioctl_send_request(struct client *client, void *buffer) 518static int init_request(struct client *client,
519 struct fw_cdev_send_request *request,
520 int destination_id, int speed)
402{ 521{
403 struct fw_device *device = client->device; 522 struct outbound_transaction_event *e;
404 struct fw_cdev_send_request *request = buffer; 523 int ret;
405 struct response *response;
406 524
407 /* What is the biggest size we'll accept, really? */ 525 if (request->tcode != TCODE_STREAM_DATA &&
408 if (request->length > 4096) 526 (request->length > 4096 || request->length > 512 << speed))
409 return -EINVAL; 527 return -EIO;
410 528
411 response = kmalloc(sizeof(*response) + request->length, GFP_KERNEL); 529 e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
412 if (response == NULL) 530 if (e == NULL)
413 return -ENOMEM; 531 return -ENOMEM;
414 532
415 response->client = client; 533 e->client = client;
416 response->response.length = request->length; 534 e->response.length = request->length;
417 response->response.closure = request->closure; 535 e->response.closure = request->closure;
418 536
419 if (request->data && 537 if (request->data &&
420 copy_from_user(response->response.data, 538 copy_from_user(e->response.data,
421 u64_to_uptr(request->data), request->length)) { 539 u64_to_uptr(request->data), request->length)) {
422 kfree(response); 540 ret = -EFAULT;
423 return -EFAULT; 541 goto failed;
424 } 542 }
425 543
426 response->resource.release = release_transaction; 544 e->r.resource.release = release_transaction;
427 add_client_resource(client, &response->resource); 545 ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
546 if (ret < 0)
547 goto failed;
428 548
429 fw_send_request(device->card, &response->transaction, 549 /* Get a reference for the transaction callback */
430 request->tcode & 0x1f, 550 client_get(client);
431 device->node->node_id,
432 request->generation,
433 device->max_speed,
434 request->offset,
435 response->response.data, request->length,
436 complete_transaction, response);
437 551
438 if (request->data) 552 fw_send_request(client->device->card, &e->r.transaction,
439 return sizeof(request) + request->length; 553 request->tcode, destination_id, request->generation,
440 else 554 speed, request->offset, e->response.data,
441 return sizeof(request); 555 request->length, complete_transaction, e);
556 return 0;
557
558 failed:
559 kfree(e);
560
561 return ret;
442} 562}
443 563
444struct address_handler { 564static int ioctl_send_request(struct client *client, void *buffer)
445 struct fw_address_handler handler; 565{
446 __u64 closure; 566 struct fw_cdev_send_request *request = buffer;
447 struct client *client;
448 struct client_resource resource;
449};
450 567
451struct request { 568 switch (request->tcode) {
452 struct fw_request *request; 569 case TCODE_WRITE_QUADLET_REQUEST:
453 void *data; 570 case TCODE_WRITE_BLOCK_REQUEST:
454 size_t length; 571 case TCODE_READ_QUADLET_REQUEST:
455 struct client_resource resource; 572 case TCODE_READ_BLOCK_REQUEST:
456}; 573 case TCODE_LOCK_MASK_SWAP:
574 case TCODE_LOCK_COMPARE_SWAP:
575 case TCODE_LOCK_FETCH_ADD:
576 case TCODE_LOCK_LITTLE_ADD:
577 case TCODE_LOCK_BOUNDED_ADD:
578 case TCODE_LOCK_WRAP_ADD:
579 case TCODE_LOCK_VENDOR_DEPENDENT:
580 break;
581 default:
582 return -EINVAL;
583 }
457 584
458struct request_event { 585 return init_request(client, request, client->device->node_id,
459 struct event event; 586 client->device->max_speed);
460 struct fw_cdev_event_request request; 587}
461};
462 588
463static void 589static void release_request(struct client *client,
464release_request(struct client *client, struct client_resource *resource) 590 struct client_resource *resource)
465{ 591{
466 struct request *request = 592 struct inbound_transaction_resource *r = container_of(resource,
467 container_of(resource, struct request, resource); 593 struct inbound_transaction_resource, resource);
468 594
469 fw_send_response(client->device->card, request->request, 595 fw_send_response(client->device->card, r->request,
470 RCODE_CONFLICT_ERROR); 596 RCODE_CONFLICT_ERROR);
471 kfree(request); 597 kfree(r);
472} 598}
473 599
474static void 600static void handle_request(struct fw_card *card, struct fw_request *request,
475handle_request(struct fw_card *card, struct fw_request *r, 601 int tcode, int destination, int source,
476 int tcode, int destination, int source, 602 int generation, int speed,
477 int generation, int speed, 603 unsigned long long offset,
478 unsigned long long offset, 604 void *payload, size_t length, void *callback_data)
479 void *payload, size_t length, void *callback_data)
480{ 605{
481 struct address_handler *handler = callback_data; 606 struct address_handler_resource *handler = callback_data;
482 struct request *request; 607 struct inbound_transaction_resource *r;
483 struct request_event *e; 608 struct inbound_transaction_event *e;
484 struct client *client = handler->client; 609 int ret;
485 610
486 request = kmalloc(sizeof(*request), GFP_ATOMIC); 611 r = kmalloc(sizeof(*r), GFP_ATOMIC);
487 e = kmalloc(sizeof(*e), GFP_ATOMIC); 612 e = kmalloc(sizeof(*e), GFP_ATOMIC);
488 if (request == NULL || e == NULL) { 613 if (r == NULL || e == NULL)
489 kfree(request); 614 goto failed;
490 kfree(e);
491 fw_send_response(card, r, RCODE_CONFLICT_ERROR);
492 return;
493 }
494 615
495 request->request = r; 616 r->request = request;
496 request->data = payload; 617 r->data = payload;
497 request->length = length; 618 r->length = length;
498 619
499 request->resource.release = release_request; 620 r->resource.release = release_request;
500 add_client_resource(client, &request->resource); 621 ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
622 if (ret < 0)
623 goto failed;
501 624
502 e->request.type = FW_CDEV_EVENT_REQUEST; 625 e->request.type = FW_CDEV_EVENT_REQUEST;
503 e->request.tcode = tcode; 626 e->request.tcode = tcode;
504 e->request.offset = offset; 627 e->request.offset = offset;
505 e->request.length = length; 628 e->request.length = length;
506 e->request.handle = request->resource.handle; 629 e->request.handle = r->resource.handle;
507 e->request.closure = handler->closure; 630 e->request.closure = handler->closure;
508 631
509 queue_event(client, &e->event, 632 queue_event(handler->client, &e->event,
510 &e->request, sizeof(e->request), payload, length); 633 &e->request, sizeof(e->request), payload, length);
634 return;
635
636 failed:
637 kfree(r);
638 kfree(e);
639 fw_send_response(card, request, RCODE_CONFLICT_ERROR);
511} 640}
512 641
513static void 642static void release_address_handler(struct client *client,
514release_address_handler(struct client *client, 643 struct client_resource *resource)
515 struct client_resource *resource)
516{ 644{
517 struct address_handler *handler = 645 struct address_handler_resource *r =
518 container_of(resource, struct address_handler, resource); 646 container_of(resource, struct address_handler_resource, resource);
519 647
520 fw_core_remove_address_handler(&handler->handler); 648 fw_core_remove_address_handler(&r->handler);
521 kfree(handler); 649 kfree(r);
522} 650}
523 651
524static int ioctl_allocate(struct client *client, void *buffer) 652static int ioctl_allocate(struct client *client, void *buffer)
525{ 653{
526 struct fw_cdev_allocate *request = buffer; 654 struct fw_cdev_allocate *request = buffer;
527 struct address_handler *handler; 655 struct address_handler_resource *r;
528 struct fw_address_region region; 656 struct fw_address_region region;
657 int ret;
529 658
530 handler = kmalloc(sizeof(*handler), GFP_KERNEL); 659 r = kmalloc(sizeof(*r), GFP_KERNEL);
531 if (handler == NULL) 660 if (r == NULL)
532 return -ENOMEM; 661 return -ENOMEM;
533 662
534 region.start = request->offset; 663 region.start = request->offset;
535 region.end = request->offset + request->length; 664 region.end = request->offset + request->length;
536 handler->handler.length = request->length; 665 r->handler.length = request->length;
537 handler->handler.address_callback = handle_request; 666 r->handler.address_callback = handle_request;
538 handler->handler.callback_data = handler; 667 r->handler.callback_data = r;
539 handler->closure = request->closure; 668 r->closure = request->closure;
540 handler->client = client; 669 r->client = client;
541 670
542 if (fw_core_add_address_handler(&handler->handler, &region) < 0) { 671 ret = fw_core_add_address_handler(&r->handler, &region);
543 kfree(handler); 672 if (ret < 0) {
544 return -EBUSY; 673 kfree(r);
674 return ret;
545 } 675 }
546 676
547 handler->resource.release = release_address_handler; 677 r->resource.release = release_address_handler;
548 add_client_resource(client, &handler->resource); 678 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
549 request->handle = handler->resource.handle; 679 if (ret < 0) {
680 release_address_handler(client, &r->resource);
681 return ret;
682 }
683 request->handle = r->resource.handle;
550 684
551 return 0; 685 return 0;
552} 686}
@@ -555,18 +689,22 @@ static int ioctl_deallocate(struct client *client, void *buffer)
555{ 689{
556 struct fw_cdev_deallocate *request = buffer; 690 struct fw_cdev_deallocate *request = buffer;
557 691
558 return release_client_resource(client, request->handle, NULL); 692 return release_client_resource(client, request->handle,
693 release_address_handler, NULL);
559} 694}
560 695
561static int ioctl_send_response(struct client *client, void *buffer) 696static int ioctl_send_response(struct client *client, void *buffer)
562{ 697{
563 struct fw_cdev_send_response *request = buffer; 698 struct fw_cdev_send_response *request = buffer;
564 struct client_resource *resource; 699 struct client_resource *resource;
565 struct request *r; 700 struct inbound_transaction_resource *r;
566 701
567 if (release_client_resource(client, request->handle, &resource) < 0) 702 if (release_client_resource(client, request->handle,
703 release_request, &resource) < 0)
568 return -EINVAL; 704 return -EINVAL;
569 r = container_of(resource, struct request, resource); 705
706 r = container_of(resource, struct inbound_transaction_resource,
707 resource);
570 if (request->length < r->length) 708 if (request->length < r->length)
571 r->length = request->length; 709 r->length = request->length;
572 if (copy_from_user(r->data, u64_to_uptr(request->data), r->length)) 710 if (copy_from_user(r->data, u64_to_uptr(request->data), r->length))
@@ -588,85 +726,92 @@ static int ioctl_initiate_bus_reset(struct client *client, void *buffer)
588 return fw_core_initiate_bus_reset(client->device->card, short_reset); 726 return fw_core_initiate_bus_reset(client->device->card, short_reset);
589} 727}
590 728
591struct descriptor {
592 struct fw_descriptor d;
593 struct client_resource resource;
594 u32 data[0];
595};
596
597static void release_descriptor(struct client *client, 729static void release_descriptor(struct client *client,
598 struct client_resource *resource) 730 struct client_resource *resource)
599{ 731{
600 struct descriptor *descriptor = 732 struct descriptor_resource *r =
601 container_of(resource, struct descriptor, resource); 733 container_of(resource, struct descriptor_resource, resource);
602 734
603 fw_core_remove_descriptor(&descriptor->d); 735 fw_core_remove_descriptor(&r->descriptor);
604 kfree(descriptor); 736 kfree(r);
605} 737}
606 738
607static int ioctl_add_descriptor(struct client *client, void *buffer) 739static int ioctl_add_descriptor(struct client *client, void *buffer)
608{ 740{
609 struct fw_cdev_add_descriptor *request = buffer; 741 struct fw_cdev_add_descriptor *request = buffer;
610 struct descriptor *descriptor; 742 struct fw_card *card = client->device->card;
611 int retval; 743 struct descriptor_resource *r;
744 int ret;
745
746 /* Access policy: Allow this ioctl only on local nodes' device files. */
747 spin_lock_irq(&card->lock);
748 ret = client->device->node_id != card->local_node->node_id;
749 spin_unlock_irq(&card->lock);
750 if (ret)
751 return -ENOSYS;
612 752
613 if (request->length > 256) 753 if (request->length > 256)
614 return -EINVAL; 754 return -EINVAL;
615 755
616 descriptor = 756 r = kmalloc(sizeof(*r) + request->length * 4, GFP_KERNEL);
617 kmalloc(sizeof(*descriptor) + request->length * 4, GFP_KERNEL); 757 if (r == NULL)
618 if (descriptor == NULL)
619 return -ENOMEM; 758 return -ENOMEM;
620 759
621 if (copy_from_user(descriptor->data, 760 if (copy_from_user(r->data,
622 u64_to_uptr(request->data), request->length * 4)) { 761 u64_to_uptr(request->data), request->length * 4)) {
623 kfree(descriptor); 762 ret = -EFAULT;
624 return -EFAULT; 763 goto failed;
625 } 764 }
626 765
627 descriptor->d.length = request->length; 766 r->descriptor.length = request->length;
628 descriptor->d.immediate = request->immediate; 767 r->descriptor.immediate = request->immediate;
629 descriptor->d.key = request->key; 768 r->descriptor.key = request->key;
630 descriptor->d.data = descriptor->data; 769 r->descriptor.data = r->data;
631 770
632 retval = fw_core_add_descriptor(&descriptor->d); 771 ret = fw_core_add_descriptor(&r->descriptor);
633 if (retval < 0) { 772 if (ret < 0)
634 kfree(descriptor); 773 goto failed;
635 return retval;
636 }
637 774
638 descriptor->resource.release = release_descriptor; 775 r->resource.release = release_descriptor;
639 add_client_resource(client, &descriptor->resource); 776 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
640 request->handle = descriptor->resource.handle; 777 if (ret < 0) {
778 fw_core_remove_descriptor(&r->descriptor);
779 goto failed;
780 }
781 request->handle = r->resource.handle;
641 782
642 return 0; 783 return 0;
784 failed:
785 kfree(r);
786
787 return ret;
643} 788}
644 789
645static int ioctl_remove_descriptor(struct client *client, void *buffer) 790static int ioctl_remove_descriptor(struct client *client, void *buffer)
646{ 791{
647 struct fw_cdev_remove_descriptor *request = buffer; 792 struct fw_cdev_remove_descriptor *request = buffer;
648 793
649 return release_client_resource(client, request->handle, NULL); 794 return release_client_resource(client, request->handle,
795 release_descriptor, NULL);
650} 796}
651 797
652static void 798static void iso_callback(struct fw_iso_context *context, u32 cycle,
653iso_callback(struct fw_iso_context *context, u32 cycle, 799 size_t header_length, void *header, void *data)
654 size_t header_length, void *header, void *data)
655{ 800{
656 struct client *client = data; 801 struct client *client = data;
657 struct iso_interrupt *irq; 802 struct iso_interrupt_event *e;
658 803
659 irq = kzalloc(sizeof(*irq) + header_length, GFP_ATOMIC); 804 e = kzalloc(sizeof(*e) + header_length, GFP_ATOMIC);
660 if (irq == NULL) 805 if (e == NULL)
661 return; 806 return;
662 807
663 irq->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; 808 e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
664 irq->interrupt.closure = client->iso_closure; 809 e->interrupt.closure = client->iso_closure;
665 irq->interrupt.cycle = cycle; 810 e->interrupt.cycle = cycle;
666 irq->interrupt.header_length = header_length; 811 e->interrupt.header_length = header_length;
667 memcpy(irq->interrupt.header, header, header_length); 812 memcpy(e->interrupt.header, header, header_length);
668 queue_event(client, &irq->event, &irq->interrupt, 813 queue_event(client, &e->event, &e->interrupt,
669 sizeof(irq->interrupt) + header_length, NULL, 0); 814 sizeof(e->interrupt) + header_length, NULL, 0);
670} 815}
671 816
672static int ioctl_create_iso_context(struct client *client, void *buffer) 817static int ioctl_create_iso_context(struct client *client, void *buffer)
@@ -871,6 +1016,261 @@ static int ioctl_get_cycle_timer(struct client *client, void *buffer)
871 return 0; 1016 return 0;
872} 1017}
873 1018
1019static void iso_resource_work(struct work_struct *work)
1020{
1021 struct iso_resource_event *e;
1022 struct iso_resource *r =
1023 container_of(work, struct iso_resource, work.work);
1024 struct client *client = r->client;
1025 int generation, channel, bandwidth, todo;
1026 bool skip, free, success;
1027
1028 spin_lock_irq(&client->lock);
1029 generation = client->device->generation;
1030 todo = r->todo;
1031 /* Allow 1000ms grace period for other reallocations. */
1032 if (todo == ISO_RES_ALLOC &&
1033 time_is_after_jiffies(client->device->card->reset_jiffies + HZ)) {
1034 if (schedule_delayed_work(&r->work, DIV_ROUND_UP(HZ, 3)))
1035 client_get(client);
1036 skip = true;
1037 } else {
1038 /* We could be called twice within the same generation. */
1039 skip = todo == ISO_RES_REALLOC &&
1040 r->generation == generation;
1041 }
1042 free = todo == ISO_RES_DEALLOC ||
1043 todo == ISO_RES_ALLOC_ONCE ||
1044 todo == ISO_RES_DEALLOC_ONCE;
1045 r->generation = generation;
1046 spin_unlock_irq(&client->lock);
1047
1048 if (skip)
1049 goto out;
1050
1051 bandwidth = r->bandwidth;
1052
1053 fw_iso_resource_manage(client->device->card, generation,
1054 r->channels, &channel, &bandwidth,
1055 todo == ISO_RES_ALLOC ||
1056 todo == ISO_RES_REALLOC ||
1057 todo == ISO_RES_ALLOC_ONCE);
1058 /*
1059 * Is this generation outdated already? As long as this resource sticks
1060 * in the idr, it will be scheduled again for a newer generation or at
1061 * shutdown.
1062 */
1063 if (channel == -EAGAIN &&
1064 (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
1065 goto out;
1066
1067 success = channel >= 0 || bandwidth > 0;
1068
1069 spin_lock_irq(&client->lock);
1070 /*
1071 * Transit from allocation to reallocation, except if the client
1072 * requested deallocation in the meantime.
1073 */
1074 if (r->todo == ISO_RES_ALLOC)
1075 r->todo = ISO_RES_REALLOC;
1076 /*
1077 * Allocation or reallocation failure? Pull this resource out of the
1078 * idr and prepare for deletion, unless the client is shutting down.
1079 */
1080 if (r->todo == ISO_RES_REALLOC && !success &&
1081 !client->in_shutdown &&
1082 idr_find(&client->resource_idr, r->resource.handle)) {
1083 idr_remove(&client->resource_idr, r->resource.handle);
1084 client_put(client);
1085 free = true;
1086 }
1087 spin_unlock_irq(&client->lock);
1088
1089 if (todo == ISO_RES_ALLOC && channel >= 0)
1090 r->channels = 1ULL << channel;
1091
1092 if (todo == ISO_RES_REALLOC && success)
1093 goto out;
1094
1095 if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
1096 e = r->e_alloc;
1097 r->e_alloc = NULL;
1098 } else {
1099 e = r->e_dealloc;
1100 r->e_dealloc = NULL;
1101 }
1102 e->resource.handle = r->resource.handle;
1103 e->resource.channel = channel;
1104 e->resource.bandwidth = bandwidth;
1105
1106 queue_event(client, &e->event,
1107 &e->resource, sizeof(e->resource), NULL, 0);
1108
1109 if (free) {
1110 cancel_delayed_work(&r->work);
1111 kfree(r->e_alloc);
1112 kfree(r->e_dealloc);
1113 kfree(r);
1114 }
1115 out:
1116 client_put(client);
1117}
1118
1119static void schedule_iso_resource(struct iso_resource *r)
1120{
1121 client_get(r->client);
1122 if (!schedule_delayed_work(&r->work, 0))
1123 client_put(r->client);
1124}
1125
1126static void release_iso_resource(struct client *client,
1127 struct client_resource *resource)
1128{
1129 struct iso_resource *r =
1130 container_of(resource, struct iso_resource, resource);
1131
1132 spin_lock_irq(&client->lock);
1133 r->todo = ISO_RES_DEALLOC;
1134 schedule_iso_resource(r);
1135 spin_unlock_irq(&client->lock);
1136}
1137
1138static int init_iso_resource(struct client *client,
1139 struct fw_cdev_allocate_iso_resource *request, int todo)
1140{
1141 struct iso_resource_event *e1, *e2;
1142 struct iso_resource *r;
1143 int ret;
1144
1145 if ((request->channels == 0 && request->bandwidth == 0) ||
1146 request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
1147 request->bandwidth < 0)
1148 return -EINVAL;
1149
1150 r = kmalloc(sizeof(*r), GFP_KERNEL);
1151 e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
1152 e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
1153 if (r == NULL || e1 == NULL || e2 == NULL) {
1154 ret = -ENOMEM;
1155 goto fail;
1156 }
1157
1158 INIT_DELAYED_WORK(&r->work, iso_resource_work);
1159 r->client = client;
1160 r->todo = todo;
1161 r->generation = -1;
1162 r->channels = request->channels;
1163 r->bandwidth = request->bandwidth;
1164 r->e_alloc = e1;
1165 r->e_dealloc = e2;
1166
1167 e1->resource.closure = request->closure;
1168 e1->resource.type = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
1169 e2->resource.closure = request->closure;
1170 e2->resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
1171
1172 if (todo == ISO_RES_ALLOC) {
1173 r->resource.release = release_iso_resource;
1174 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
1175 if (ret < 0)
1176 goto fail;
1177 } else {
1178 r->resource.release = NULL;
1179 r->resource.handle = -1;
1180 schedule_iso_resource(r);
1181 }
1182 request->handle = r->resource.handle;
1183
1184 return 0;
1185 fail:
1186 kfree(r);
1187 kfree(e1);
1188 kfree(e2);
1189
1190 return ret;
1191}
1192
1193static int ioctl_allocate_iso_resource(struct client *client, void *buffer)
1194{
1195 struct fw_cdev_allocate_iso_resource *request = buffer;
1196
1197 return init_iso_resource(client, request, ISO_RES_ALLOC);
1198}
1199
1200static int ioctl_deallocate_iso_resource(struct client *client, void *buffer)
1201{
1202 struct fw_cdev_deallocate *request = buffer;
1203
1204 return release_client_resource(client, request->handle,
1205 release_iso_resource, NULL);
1206}
1207
1208static int ioctl_allocate_iso_resource_once(struct client *client, void *buffer)
1209{
1210 struct fw_cdev_allocate_iso_resource *request = buffer;
1211
1212 return init_iso_resource(client, request, ISO_RES_ALLOC_ONCE);
1213}
1214
1215static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffer)
1216{
1217 struct fw_cdev_allocate_iso_resource *request = buffer;
1218
1219 return init_iso_resource(client, request, ISO_RES_DEALLOC_ONCE);
1220}
1221
1222/*
1223 * Returns a speed code: Maximum speed to or from this device,
1224 * limited by the device's link speed, the local node's link speed,
1225 * and all PHY port speeds between the two links.
1226 */
1227static int ioctl_get_speed(struct client *client, void *buffer)
1228{
1229 return client->device->max_speed;
1230}
1231
1232static int ioctl_send_broadcast_request(struct client *client, void *buffer)
1233{
1234 struct fw_cdev_send_request *request = buffer;
1235
1236 switch (request->tcode) {
1237 case TCODE_WRITE_QUADLET_REQUEST:
1238 case TCODE_WRITE_BLOCK_REQUEST:
1239 break;
1240 default:
1241 return -EINVAL;
1242 }
1243
1244 /* Security policy: Only allow accesses to Units Space. */
1245 if (request->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
1246 return -EACCES;
1247
1248 return init_request(client, request, LOCAL_BUS | 0x3f, SCODE_100);
1249}
1250
1251static int ioctl_send_stream_packet(struct client *client, void *buffer)
1252{
1253 struct fw_cdev_send_stream_packet *p = buffer;
1254 struct fw_cdev_send_request request;
1255 int dest;
1256
1257 if (p->speed > client->device->card->link_speed ||
1258 p->length > 1024 << p->speed)
1259 return -EIO;
1260
1261 if (p->tag > 3 || p->channel > 63 || p->sy > 15)
1262 return -EINVAL;
1263
1264 dest = fw_stream_packet_destination_id(p->tag, p->channel, p->sy);
1265 request.tcode = TCODE_STREAM_DATA;
1266 request.length = p->length;
1267 request.closure = p->closure;
1268 request.data = p->data;
1269 request.generation = p->generation;
1270
1271 return init_request(client, &request, dest, p->speed);
1272}
1273
874static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { 1274static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
875 ioctl_get_info, 1275 ioctl_get_info,
876 ioctl_send_request, 1276 ioctl_send_request,
@@ -885,13 +1285,20 @@ static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
885 ioctl_start_iso, 1285 ioctl_start_iso,
886 ioctl_stop_iso, 1286 ioctl_stop_iso,
887 ioctl_get_cycle_timer, 1287 ioctl_get_cycle_timer,
1288 ioctl_allocate_iso_resource,
1289 ioctl_deallocate_iso_resource,
1290 ioctl_allocate_iso_resource_once,
1291 ioctl_deallocate_iso_resource_once,
1292 ioctl_get_speed,
1293 ioctl_send_broadcast_request,
1294 ioctl_send_stream_packet,
888}; 1295};
889 1296
890static int 1297static int dispatch_ioctl(struct client *client,
891dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg) 1298 unsigned int cmd, void __user *arg)
892{ 1299{
893 char buffer[256]; 1300 char buffer[256];
894 int retval; 1301 int ret;
895 1302
896 if (_IOC_TYPE(cmd) != '#' || 1303 if (_IOC_TYPE(cmd) != '#' ||
897 _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers)) 1304 _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
@@ -903,9 +1310,9 @@ dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg)
903 return -EFAULT; 1310 return -EFAULT;
904 } 1311 }
905 1312
906 retval = ioctl_handlers[_IOC_NR(cmd)](client, buffer); 1313 ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
907 if (retval < 0) 1314 if (ret < 0)
908 return retval; 1315 return ret;
909 1316
910 if (_IOC_DIR(cmd) & _IOC_READ) { 1317 if (_IOC_DIR(cmd) & _IOC_READ) {
911 if (_IOC_SIZE(cmd) > sizeof(buffer) || 1318 if (_IOC_SIZE(cmd) > sizeof(buffer) ||
@@ -913,12 +1320,11 @@ dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg)
913 return -EFAULT; 1320 return -EFAULT;
914 } 1321 }
915 1322
916 return retval; 1323 return ret;
917} 1324}
918 1325
919static long 1326static long fw_device_op_ioctl(struct file *file,
920fw_device_op_ioctl(struct file *file, 1327 unsigned int cmd, unsigned long arg)
921 unsigned int cmd, unsigned long arg)
922{ 1328{
923 struct client *client = file->private_data; 1329 struct client *client = file->private_data;
924 1330
@@ -929,9 +1335,8 @@ fw_device_op_ioctl(struct file *file,
929} 1335}
930 1336
931#ifdef CONFIG_COMPAT 1337#ifdef CONFIG_COMPAT
932static long 1338static long fw_device_op_compat_ioctl(struct file *file,
933fw_device_op_compat_ioctl(struct file *file, 1339 unsigned int cmd, unsigned long arg)
934 unsigned int cmd, unsigned long arg)
935{ 1340{
936 struct client *client = file->private_data; 1341 struct client *client = file->private_data;
937 1342
@@ -947,7 +1352,7 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
947 struct client *client = file->private_data; 1352 struct client *client = file->private_data;
948 enum dma_data_direction direction; 1353 enum dma_data_direction direction;
949 unsigned long size; 1354 unsigned long size;
950 int page_count, retval; 1355 int page_count, ret;
951 1356
952 if (fw_device_is_shutdown(client->device)) 1357 if (fw_device_is_shutdown(client->device))
953 return -ENODEV; 1358 return -ENODEV;
@@ -973,48 +1378,57 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
973 else 1378 else
974 direction = DMA_FROM_DEVICE; 1379 direction = DMA_FROM_DEVICE;
975 1380
976 retval = fw_iso_buffer_init(&client->buffer, client->device->card, 1381 ret = fw_iso_buffer_init(&client->buffer, client->device->card,
977 page_count, direction); 1382 page_count, direction);
978 if (retval < 0) 1383 if (ret < 0)
979 return retval; 1384 return ret;
980 1385
981 retval = fw_iso_buffer_map(&client->buffer, vma); 1386 ret = fw_iso_buffer_map(&client->buffer, vma);
982 if (retval < 0) 1387 if (ret < 0)
983 fw_iso_buffer_destroy(&client->buffer, client->device->card); 1388 fw_iso_buffer_destroy(&client->buffer, client->device->card);
984 1389
985 return retval; 1390 return ret;
1391}
1392
1393static int shutdown_resource(int id, void *p, void *data)
1394{
1395 struct client_resource *r = p;
1396 struct client *client = data;
1397
1398 r->release(client, r);
1399 client_put(client);
1400
1401 return 0;
986} 1402}
987 1403
988static int fw_device_op_release(struct inode *inode, struct file *file) 1404static int fw_device_op_release(struct inode *inode, struct file *file)
989{ 1405{
990 struct client *client = file->private_data; 1406 struct client *client = file->private_data;
991 struct event *e, *next_e; 1407 struct event *e, *next_e;
992 struct client_resource *r, *next_r;
993 unsigned long flags;
994 1408
995 if (client->buffer.pages) 1409 mutex_lock(&client->device->client_list_mutex);
996 fw_iso_buffer_destroy(&client->buffer, client->device->card); 1410 list_del(&client->link);
1411 mutex_unlock(&client->device->client_list_mutex);
997 1412
998 if (client->iso_context) 1413 if (client->iso_context)
999 fw_iso_context_destroy(client->iso_context); 1414 fw_iso_context_destroy(client->iso_context);
1000 1415
1001 list_for_each_entry_safe(r, next_r, &client->resource_list, link) 1416 if (client->buffer.pages)
1002 r->release(client, r); 1417 fw_iso_buffer_destroy(&client->buffer, client->device->card);
1003 1418
1004 /* 1419 /* Freeze client->resource_idr and client->event_list */
1005 * FIXME: We should wait for the async tasklets to stop 1420 spin_lock_irq(&client->lock);
1006 * running before freeing the memory. 1421 client->in_shutdown = true;
1007 */ 1422 spin_unlock_irq(&client->lock);
1423
1424 idr_for_each(&client->resource_idr, shutdown_resource, client);
1425 idr_remove_all(&client->resource_idr);
1426 idr_destroy(&client->resource_idr);
1008 1427
1009 list_for_each_entry_safe(e, next_e, &client->event_list, link) 1428 list_for_each_entry_safe(e, next_e, &client->event_list, link)
1010 kfree(e); 1429 kfree(e);
1011 1430
1012 spin_lock_irqsave(&client->device->card->lock, flags); 1431 client_put(client);
1013 list_del(&client->link);
1014 spin_unlock_irqrestore(&client->device->card->lock, flags);
1015
1016 fw_device_put(client->device);
1017 kfree(client);
1018 1432
1019 return 0; 1433 return 0;
1020} 1434}
diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c
index bf53acb45652..a47e2129d83d 100644
--- a/drivers/firewire/fw-device.c
+++ b/drivers/firewire/fw-device.c
@@ -18,22 +18,26 @@
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */ 19 */
20 20
21#include <linux/module.h> 21#include <linux/ctype.h>
22#include <linux/wait.h>
23#include <linux/errno.h>
24#include <linux/kthread.h>
25#include <linux/device.h>
26#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/device.h>
24#include <linux/errno.h>
27#include <linux/idr.h> 25#include <linux/idr.h>
28#include <linux/jiffies.h> 26#include <linux/jiffies.h>
29#include <linux/string.h> 27#include <linux/kobject.h>
28#include <linux/list.h>
29#include <linux/mutex.h>
30#include <linux/rwsem.h> 30#include <linux/rwsem.h>
31#include <linux/semaphore.h> 31#include <linux/semaphore.h>
32#include <linux/spinlock.h>
33#include <linux/string.h>
34#include <linux/workqueue.h>
35
32#include <asm/system.h> 36#include <asm/system.h>
33#include <linux/ctype.h> 37
34#include "fw-transaction.h"
35#include "fw-topology.h"
36#include "fw-device.h" 38#include "fw-device.h"
39#include "fw-topology.h"
40#include "fw-transaction.h"
37 41
38void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p) 42void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p)
39{ 43{
@@ -132,8 +136,7 @@ static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size)
132 vendor, model, specifier_id, version); 136 vendor, model, specifier_id, version);
133} 137}
134 138
135static int 139static int fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env)
136fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env)
137{ 140{
138 struct fw_unit *unit = fw_unit(dev); 141 struct fw_unit *unit = fw_unit(dev);
139 char modalias[64]; 142 char modalias[64];
@@ -152,27 +155,6 @@ struct bus_type fw_bus_type = {
152}; 155};
153EXPORT_SYMBOL(fw_bus_type); 156EXPORT_SYMBOL(fw_bus_type);
154 157
155static void fw_device_release(struct device *dev)
156{
157 struct fw_device *device = fw_device(dev);
158 struct fw_card *card = device->card;
159 unsigned long flags;
160
161 /*
162 * Take the card lock so we don't set this to NULL while a
163 * FW_NODE_UPDATED callback is being handled or while the
164 * bus manager work looks at this node.
165 */
166 spin_lock_irqsave(&card->lock, flags);
167 device->node->data = NULL;
168 spin_unlock_irqrestore(&card->lock, flags);
169
170 fw_node_put(device->node);
171 kfree(device->config_rom);
172 kfree(device);
173 fw_card_put(card);
174}
175
176int fw_device_enable_phys_dma(struct fw_device *device) 158int fw_device_enable_phys_dma(struct fw_device *device)
177{ 159{
178 int generation = device->generation; 160 int generation = device->generation;
@@ -191,8 +173,8 @@ struct config_rom_attribute {
191 u32 key; 173 u32 key;
192}; 174};
193 175
194static ssize_t 176static ssize_t show_immediate(struct device *dev,
195show_immediate(struct device *dev, struct device_attribute *dattr, char *buf) 177 struct device_attribute *dattr, char *buf)
196{ 178{
197 struct config_rom_attribute *attr = 179 struct config_rom_attribute *attr =
198 container_of(dattr, struct config_rom_attribute, attr); 180 container_of(dattr, struct config_rom_attribute, attr);
@@ -223,8 +205,8 @@ show_immediate(struct device *dev, struct device_attribute *dattr, char *buf)
223#define IMMEDIATE_ATTR(name, key) \ 205#define IMMEDIATE_ATTR(name, key) \
224 { __ATTR(name, S_IRUGO, show_immediate, NULL), key } 206 { __ATTR(name, S_IRUGO, show_immediate, NULL), key }
225 207
226static ssize_t 208static ssize_t show_text_leaf(struct device *dev,
227show_text_leaf(struct device *dev, struct device_attribute *dattr, char *buf) 209 struct device_attribute *dattr, char *buf)
228{ 210{
229 struct config_rom_attribute *attr = 211 struct config_rom_attribute *attr =
230 container_of(dattr, struct config_rom_attribute, attr); 212 container_of(dattr, struct config_rom_attribute, attr);
@@ -293,10 +275,9 @@ static struct config_rom_attribute config_rom_attributes[] = {
293 TEXT_LEAF_ATTR(hardware_version_name, CSR_HARDWARE_VERSION), 275 TEXT_LEAF_ATTR(hardware_version_name, CSR_HARDWARE_VERSION),
294}; 276};
295 277
296static void 278static void init_fw_attribute_group(struct device *dev,
297init_fw_attribute_group(struct device *dev, 279 struct device_attribute *attrs,
298 struct device_attribute *attrs, 280 struct fw_attribute_group *group)
299 struct fw_attribute_group *group)
300{ 281{
301 struct device_attribute *attr; 282 struct device_attribute *attr;
302 int i, j; 283 int i, j;
@@ -319,9 +300,8 @@ init_fw_attribute_group(struct device *dev,
319 dev->groups = group->groups; 300 dev->groups = group->groups;
320} 301}
321 302
322static ssize_t 303static ssize_t modalias_show(struct device *dev,
323modalias_show(struct device *dev, 304 struct device_attribute *attr, char *buf)
324 struct device_attribute *attr, char *buf)
325{ 305{
326 struct fw_unit *unit = fw_unit(dev); 306 struct fw_unit *unit = fw_unit(dev);
327 int length; 307 int length;
@@ -332,9 +312,8 @@ modalias_show(struct device *dev,
332 return length + 1; 312 return length + 1;
333} 313}
334 314
335static ssize_t 315static ssize_t rom_index_show(struct device *dev,
336rom_index_show(struct device *dev, 316 struct device_attribute *attr, char *buf)
337 struct device_attribute *attr, char *buf)
338{ 317{
339 struct fw_device *device = fw_device(dev->parent); 318 struct fw_device *device = fw_device(dev->parent);
340 struct fw_unit *unit = fw_unit(dev); 319 struct fw_unit *unit = fw_unit(dev);
@@ -349,8 +328,8 @@ static struct device_attribute fw_unit_attributes[] = {
349 __ATTR_NULL, 328 __ATTR_NULL,
350}; 329};
351 330
352static ssize_t 331static ssize_t config_rom_show(struct device *dev,
353config_rom_show(struct device *dev, struct device_attribute *attr, char *buf) 332 struct device_attribute *attr, char *buf)
354{ 333{
355 struct fw_device *device = fw_device(dev); 334 struct fw_device *device = fw_device(dev);
356 size_t length; 335 size_t length;
@@ -363,8 +342,8 @@ config_rom_show(struct device *dev, struct device_attribute *attr, char *buf)
363 return length; 342 return length;
364} 343}
365 344
366static ssize_t 345static ssize_t guid_show(struct device *dev,
367guid_show(struct device *dev, struct device_attribute *attr, char *buf) 346 struct device_attribute *attr, char *buf)
368{ 347{
369 struct fw_device *device = fw_device(dev); 348 struct fw_device *device = fw_device(dev);
370 int ret; 349 int ret;
@@ -383,8 +362,8 @@ static struct device_attribute fw_device_attributes[] = {
383 __ATTR_NULL, 362 __ATTR_NULL,
384}; 363};
385 364
386static int 365static int read_rom(struct fw_device *device,
387read_rom(struct fw_device *device, int generation, int index, u32 *data) 366 int generation, int index, u32 *data)
388{ 367{
389 int rcode; 368 int rcode;
390 369
@@ -539,7 +518,7 @@ static int read_bus_info_block(struct fw_device *device, int generation)
539 518
540 kfree(old_rom); 519 kfree(old_rom);
541 ret = 0; 520 ret = 0;
542 device->cmc = rom[2] & 1 << 30; 521 device->cmc = rom[2] >> 30 & 1;
543 out: 522 out:
544 kfree(rom); 523 kfree(rom);
545 524
@@ -679,11 +658,53 @@ static void fw_device_shutdown(struct work_struct *work)
679 fw_device_put(device); 658 fw_device_put(device);
680} 659}
681 660
661static void fw_device_release(struct device *dev)
662{
663 struct fw_device *device = fw_device(dev);
664 struct fw_card *card = device->card;
665 unsigned long flags;
666
667 /*
668 * Take the card lock so we don't set this to NULL while a
669 * FW_NODE_UPDATED callback is being handled or while the
670 * bus manager work looks at this node.
671 */
672 spin_lock_irqsave(&card->lock, flags);
673 device->node->data = NULL;
674 spin_unlock_irqrestore(&card->lock, flags);
675
676 fw_node_put(device->node);
677 kfree(device->config_rom);
678 kfree(device);
679 fw_card_put(card);
680}
681
682static struct device_type fw_device_type = { 682static struct device_type fw_device_type = {
683 .release = fw_device_release, 683 .release = fw_device_release,
684}; 684};
685 685
686static void fw_device_update(struct work_struct *work); 686static int update_unit(struct device *dev, void *data)
687{
688 struct fw_unit *unit = fw_unit(dev);
689 struct fw_driver *driver = (struct fw_driver *)dev->driver;
690
691 if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) {
692 down(&dev->sem);
693 driver->update(unit);
694 up(&dev->sem);
695 }
696
697 return 0;
698}
699
700static void fw_device_update(struct work_struct *work)
701{
702 struct fw_device *device =
703 container_of(work, struct fw_device, work.work);
704
705 fw_device_cdev_update(device);
706 device_for_each_child(&device->device, NULL, update_unit);
707}
687 708
688/* 709/*
689 * If a device was pending for deletion because its node went away but its 710 * If a device was pending for deletion because its node went away but its
@@ -735,12 +756,50 @@ static int lookup_existing_device(struct device *dev, void *data)
735 return match; 756 return match;
736} 757}
737 758
759enum { BC_UNKNOWN = 0, BC_UNIMPLEMENTED, BC_IMPLEMENTED, };
760
761void fw_device_set_broadcast_channel(struct fw_device *device, int generation)
762{
763 struct fw_card *card = device->card;
764 __be32 data;
765 int rcode;
766
767 if (!card->broadcast_channel_allocated)
768 return;
769
770 if (device->bc_implemented == BC_UNKNOWN) {
771 rcode = fw_run_transaction(card, TCODE_READ_QUADLET_REQUEST,
772 device->node_id, generation, device->max_speed,
773 CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL,
774 &data, 4);
775 switch (rcode) {
776 case RCODE_COMPLETE:
777 if (data & cpu_to_be32(1 << 31)) {
778 device->bc_implemented = BC_IMPLEMENTED;
779 break;
780 }
781 /* else fall through to case address error */
782 case RCODE_ADDRESS_ERROR:
783 device->bc_implemented = BC_UNIMPLEMENTED;
784 }
785 }
786
787 if (device->bc_implemented == BC_IMPLEMENTED) {
788 data = cpu_to_be32(BROADCAST_CHANNEL_INITIAL |
789 BROADCAST_CHANNEL_VALID);
790 fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
791 device->node_id, generation, device->max_speed,
792 CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL,
793 &data, 4);
794 }
795}
796
738static void fw_device_init(struct work_struct *work) 797static void fw_device_init(struct work_struct *work)
739{ 798{
740 struct fw_device *device = 799 struct fw_device *device =
741 container_of(work, struct fw_device, work.work); 800 container_of(work, struct fw_device, work.work);
742 struct device *revived_dev; 801 struct device *revived_dev;
743 int minor, err; 802 int minor, ret;
744 803
745 /* 804 /*
746 * All failure paths here set node->data to NULL, so that we 805 * All failure paths here set node->data to NULL, so that we
@@ -776,12 +835,12 @@ static void fw_device_init(struct work_struct *work)
776 835
777 fw_device_get(device); 836 fw_device_get(device);
778 down_write(&fw_device_rwsem); 837 down_write(&fw_device_rwsem);
779 err = idr_pre_get(&fw_device_idr, GFP_KERNEL) ? 838 ret = idr_pre_get(&fw_device_idr, GFP_KERNEL) ?
780 idr_get_new(&fw_device_idr, device, &minor) : 839 idr_get_new(&fw_device_idr, device, &minor) :
781 -ENOMEM; 840 -ENOMEM;
782 up_write(&fw_device_rwsem); 841 up_write(&fw_device_rwsem);
783 842
784 if (err < 0) 843 if (ret < 0)
785 goto error; 844 goto error;
786 845
787 device->device.bus = &fw_bus_type; 846 device->device.bus = &fw_bus_type;
@@ -828,6 +887,8 @@ static void fw_device_init(struct work_struct *work)
828 device->config_rom[3], device->config_rom[4], 887 device->config_rom[3], device->config_rom[4],
829 1 << device->max_speed); 888 1 << device->max_speed);
830 device->config_rom_retries = 0; 889 device->config_rom_retries = 0;
890
891 fw_device_set_broadcast_channel(device, device->generation);
831 } 892 }
832 893
833 /* 894 /*
@@ -851,29 +912,6 @@ static void fw_device_init(struct work_struct *work)
851 put_device(&device->device); /* our reference */ 912 put_device(&device->device); /* our reference */
852} 913}
853 914
854static int update_unit(struct device *dev, void *data)
855{
856 struct fw_unit *unit = fw_unit(dev);
857 struct fw_driver *driver = (struct fw_driver *)dev->driver;
858
859 if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) {
860 down(&dev->sem);
861 driver->update(unit);
862 up(&dev->sem);
863 }
864
865 return 0;
866}
867
868static void fw_device_update(struct work_struct *work)
869{
870 struct fw_device *device =
871 container_of(work, struct fw_device, work.work);
872
873 fw_device_cdev_update(device);
874 device_for_each_child(&device->device, NULL, update_unit);
875}
876
877enum { 915enum {
878 REREAD_BIB_ERROR, 916 REREAD_BIB_ERROR,
879 REREAD_BIB_GONE, 917 REREAD_BIB_GONE,
@@ -894,7 +932,7 @@ static int reread_bus_info_block(struct fw_device *device, int generation)
894 if (i == 0 && q == 0) 932 if (i == 0 && q == 0)
895 return REREAD_BIB_GONE; 933 return REREAD_BIB_GONE;
896 934
897 if (i > device->config_rom_length || q != device->config_rom[i]) 935 if (q != device->config_rom[i])
898 return REREAD_BIB_CHANGED; 936 return REREAD_BIB_CHANGED;
899 } 937 }
900 938
@@ -1004,6 +1042,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
1004 device->node = fw_node_get(node); 1042 device->node = fw_node_get(node);
1005 device->node_id = node->node_id; 1043 device->node_id = node->node_id;
1006 device->generation = card->generation; 1044 device->generation = card->generation;
1045 mutex_init(&device->client_list_mutex);
1007 INIT_LIST_HEAD(&device->client_list); 1046 INIT_LIST_HEAD(&device->client_list);
1008 1047
1009 /* 1048 /*
diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h
index 8ef6ec2ca21c..97588937c018 100644
--- a/drivers/firewire/fw-device.h
+++ b/drivers/firewire/fw-device.h
@@ -19,10 +19,17 @@
19#ifndef __fw_device_h 19#ifndef __fw_device_h
20#define __fw_device_h 20#define __fw_device_h
21 21
22#include <linux/device.h>
22#include <linux/fs.h> 23#include <linux/fs.h>
23#include <linux/cdev.h>
24#include <linux/idr.h> 24#include <linux/idr.h>
25#include <linux/kernel.h>
26#include <linux/list.h>
27#include <linux/mutex.h>
25#include <linux/rwsem.h> 28#include <linux/rwsem.h>
29#include <linux/sysfs.h>
30#include <linux/types.h>
31#include <linux/workqueue.h>
32
26#include <asm/atomic.h> 33#include <asm/atomic.h>
27 34
28enum fw_device_state { 35enum fw_device_state {
@@ -38,6 +45,9 @@ struct fw_attribute_group {
38 struct attribute *attrs[11]; 45 struct attribute *attrs[11];
39}; 46};
40 47
48struct fw_node;
49struct fw_card;
50
41/* 51/*
42 * Note, fw_device.generation always has to be read before fw_device.node_id. 52 * Note, fw_device.generation always has to be read before fw_device.node_id.
43 * Use SMP memory barriers to ensure this. Otherwise requests will be sent 53 * Use SMP memory barriers to ensure this. Otherwise requests will be sent
@@ -61,13 +71,18 @@ struct fw_device {
61 int node_id; 71 int node_id;
62 int generation; 72 int generation;
63 unsigned max_speed; 73 unsigned max_speed;
64 bool cmc;
65 struct fw_card *card; 74 struct fw_card *card;
66 struct device device; 75 struct device device;
76
77 struct mutex client_list_mutex;
67 struct list_head client_list; 78 struct list_head client_list;
79
68 u32 *config_rom; 80 u32 *config_rom;
69 size_t config_rom_length; 81 size_t config_rom_length;
70 int config_rom_retries; 82 int config_rom_retries;
83 unsigned cmc:1;
84 unsigned bc_implemented:2;
85
71 struct delayed_work work; 86 struct delayed_work work;
72 struct fw_attribute_group attribute_group; 87 struct fw_attribute_group attribute_group;
73}; 88};
@@ -96,6 +111,7 @@ static inline void fw_device_put(struct fw_device *device)
96 111
97struct fw_device *fw_device_get_by_devt(dev_t devt); 112struct fw_device *fw_device_get_by_devt(dev_t devt);
98int fw_device_enable_phys_dma(struct fw_device *device); 113int fw_device_enable_phys_dma(struct fw_device *device);
114void fw_device_set_broadcast_channel(struct fw_device *device, int generation);
99 115
100void fw_device_cdev_update(struct fw_device *device); 116void fw_device_cdev_update(struct fw_device *device);
101void fw_device_cdev_remove(struct fw_device *device); 117void fw_device_cdev_remove(struct fw_device *device);
@@ -176,8 +192,7 @@ struct fw_driver {
176 const struct fw_device_id *id_table; 192 const struct fw_device_id *id_table;
177}; 193};
178 194
179static inline struct fw_driver * 195static inline struct fw_driver *fw_driver(struct device_driver *drv)
180fw_driver(struct device_driver *drv)
181{ 196{
182 return container_of(drv, struct fw_driver, driver); 197 return container_of(drv, struct fw_driver, driver);
183} 198}
diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/fw-iso.c
index e14c03dc0065..2baf1007253e 100644
--- a/drivers/firewire/fw-iso.c
+++ b/drivers/firewire/fw-iso.c
@@ -1,5 +1,7 @@
1/* 1/*
2 * Isochronous IO functionality 2 * Isochronous I/O functionality:
3 * - Isochronous DMA context management
4 * - Isochronous bus resource management (channels, bandwidth), client side
3 * 5 *
4 * Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net> 6 * Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net>
5 * 7 *
@@ -18,21 +20,25 @@
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 20 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */ 21 */
20 22
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/dma-mapping.h> 23#include <linux/dma-mapping.h>
24#include <linux/vmalloc.h> 24#include <linux/errno.h>
25#include <linux/firewire-constants.h>
26#include <linux/kernel.h>
25#include <linux/mm.h> 27#include <linux/mm.h>
28#include <linux/spinlock.h>
29#include <linux/vmalloc.h>
26 30
27#include "fw-transaction.h"
28#include "fw-topology.h" 31#include "fw-topology.h"
29#include "fw-device.h" 32#include "fw-transaction.h"
30 33
31int 34/*
32fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, 35 * Isochronous DMA context management
33 int page_count, enum dma_data_direction direction) 36 */
37
38int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
39 int page_count, enum dma_data_direction direction)
34{ 40{
35 int i, j, retval = -ENOMEM; 41 int i, j;
36 dma_addr_t address; 42 dma_addr_t address;
37 43
38 buffer->page_count = page_count; 44 buffer->page_count = page_count;
@@ -69,19 +75,21 @@ fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
69 kfree(buffer->pages); 75 kfree(buffer->pages);
70 out: 76 out:
71 buffer->pages = NULL; 77 buffer->pages = NULL;
72 return retval; 78
79 return -ENOMEM;
73} 80}
74 81
75int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma) 82int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma)
76{ 83{
77 unsigned long uaddr; 84 unsigned long uaddr;
78 int i, retval; 85 int i, err;
79 86
80 uaddr = vma->vm_start; 87 uaddr = vma->vm_start;
81 for (i = 0; i < buffer->page_count; i++) { 88 for (i = 0; i < buffer->page_count; i++) {
82 retval = vm_insert_page(vma, uaddr, buffer->pages[i]); 89 err = vm_insert_page(vma, uaddr, buffer->pages[i]);
83 if (retval) 90 if (err)
84 return retval; 91 return err;
92
85 uaddr += PAGE_SIZE; 93 uaddr += PAGE_SIZE;
86 } 94 }
87 95
@@ -105,14 +113,14 @@ void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
105 buffer->pages = NULL; 113 buffer->pages = NULL;
106} 114}
107 115
108struct fw_iso_context * 116struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
109fw_iso_context_create(struct fw_card *card, int type, 117 int type, int channel, int speed, size_t header_size,
110 int channel, int speed, size_t header_size, 118 fw_iso_callback_t callback, void *callback_data)
111 fw_iso_callback_t callback, void *callback_data)
112{ 119{
113 struct fw_iso_context *ctx; 120 struct fw_iso_context *ctx;
114 121
115 ctx = card->driver->allocate_iso_context(card, type, header_size); 122 ctx = card->driver->allocate_iso_context(card,
123 type, channel, header_size);
116 if (IS_ERR(ctx)) 124 if (IS_ERR(ctx))
117 return ctx; 125 return ctx;
118 126
@@ -134,25 +142,186 @@ void fw_iso_context_destroy(struct fw_iso_context *ctx)
134 card->driver->free_iso_context(ctx); 142 card->driver->free_iso_context(ctx);
135} 143}
136 144
137int 145int fw_iso_context_start(struct fw_iso_context *ctx,
138fw_iso_context_start(struct fw_iso_context *ctx, int cycle, int sync, int tags) 146 int cycle, int sync, int tags)
139{ 147{
140 return ctx->card->driver->start_iso(ctx, cycle, sync, tags); 148 return ctx->card->driver->start_iso(ctx, cycle, sync, tags);
141} 149}
142 150
143int 151int fw_iso_context_queue(struct fw_iso_context *ctx,
144fw_iso_context_queue(struct fw_iso_context *ctx, 152 struct fw_iso_packet *packet,
145 struct fw_iso_packet *packet, 153 struct fw_iso_buffer *buffer,
146 struct fw_iso_buffer *buffer, 154 unsigned long payload)
147 unsigned long payload)
148{ 155{
149 struct fw_card *card = ctx->card; 156 struct fw_card *card = ctx->card;
150 157
151 return card->driver->queue_iso(ctx, packet, buffer, payload); 158 return card->driver->queue_iso(ctx, packet, buffer, payload);
152} 159}
153 160
154int 161int fw_iso_context_stop(struct fw_iso_context *ctx)
155fw_iso_context_stop(struct fw_iso_context *ctx)
156{ 162{
157 return ctx->card->driver->stop_iso(ctx); 163 return ctx->card->driver->stop_iso(ctx);
158} 164}
165
166/*
167 * Isochronous bus resource management (channels, bandwidth), client side
168 */
169
170static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
171 int bandwidth, bool allocate)
172{
173 __be32 data[2];
174 int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0;
175
176 /*
177 * On a 1394a IRM with low contention, try < 1 is enough.
178 * On a 1394-1995 IRM, we need at least try < 2.
179 * Let's just do try < 5.
180 */
181 for (try = 0; try < 5; try++) {
182 new = allocate ? old - bandwidth : old + bandwidth;
183 if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL)
184 break;
185
186 data[0] = cpu_to_be32(old);
187 data[1] = cpu_to_be32(new);
188 switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
189 irm_id, generation, SCODE_100,
190 CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE,
191 data, sizeof(data))) {
192 case RCODE_GENERATION:
193 /* A generation change frees all bandwidth. */
194 return allocate ? -EAGAIN : bandwidth;
195
196 case RCODE_COMPLETE:
197 if (be32_to_cpup(data) == old)
198 return bandwidth;
199
200 old = be32_to_cpup(data);
201 /* Fall through. */
202 }
203 }
204
205 return -EIO;
206}
207
208static int manage_channel(struct fw_card *card, int irm_id, int generation,
209 u32 channels_mask, u64 offset, bool allocate)
210{
211 __be32 data[2], c, all, old;
212 int i, retry = 5;
213
214 old = all = allocate ? cpu_to_be32(~0) : 0;
215
216 for (i = 0; i < 32; i++) {
217 if (!(channels_mask & 1 << i))
218 continue;
219
220 c = cpu_to_be32(1 << (31 - i));
221 if ((old & c) != (all & c))
222 continue;
223
224 data[0] = old;
225 data[1] = old ^ c;
226 switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
227 irm_id, generation, SCODE_100,
228 offset, data, sizeof(data))) {
229 case RCODE_GENERATION:
230 /* A generation change frees all channels. */
231 return allocate ? -EAGAIN : i;
232
233 case RCODE_COMPLETE:
234 if (data[0] == old)
235 return i;
236
237 old = data[0];
238
239 /* Is the IRM 1394a-2000 compliant? */
240 if ((data[0] & c) == (data[1] & c))
241 continue;
242
243 /* 1394-1995 IRM, fall through to retry. */
244 default:
245 if (retry--)
246 i--;
247 }
248 }
249
250 return -EIO;
251}
252
253static void deallocate_channel(struct fw_card *card, int irm_id,
254 int generation, int channel)
255{
256 u32 mask;
257 u64 offset;
258
259 mask = channel < 32 ? 1 << channel : 1 << (channel - 32);
260 offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI :
261 CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO;
262
263 manage_channel(card, irm_id, generation, mask, offset, false);
264}
265
266/**
267 * fw_iso_resource_manage - Allocate or deallocate a channel and/or bandwidth
268 *
269 * In parameters: card, generation, channels_mask, bandwidth, allocate
270 * Out parameters: channel, bandwidth
271 * This function blocks (sleeps) during communication with the IRM.
272 *
273 * Allocates or deallocates at most one channel out of channels_mask.
274 * channels_mask is a bitfield with MSB for channel 63 and LSB for channel 0.
275 * (Note, the IRM's CHANNELS_AVAILABLE is a big-endian bitfield with MSB for
276 * channel 0 and LSB for channel 63.)
277 * Allocates or deallocates as many bandwidth allocation units as specified.
278 *
279 * Returns channel < 0 if no channel was allocated or deallocated.
280 * Returns bandwidth = 0 if no bandwidth was allocated or deallocated.
281 *
282 * If generation is stale, deallocations succeed but allocations fail with
283 * channel = -EAGAIN.
284 *
285 * If channel allocation fails, no bandwidth will be allocated either.
286 * If bandwidth allocation fails, no channel will be allocated either.
287 * But deallocations of channel and bandwidth are tried independently
288 * of each other's success.
289 */
290void fw_iso_resource_manage(struct fw_card *card, int generation,
291 u64 channels_mask, int *channel, int *bandwidth,
292 bool allocate)
293{
294 u32 channels_hi = channels_mask; /* channels 31...0 */
295 u32 channels_lo = channels_mask >> 32; /* channels 63...32 */
296 int irm_id, ret, c = -EINVAL;
297
298 spin_lock_irq(&card->lock);
299 irm_id = card->irm_node->node_id;
300 spin_unlock_irq(&card->lock);
301
302 if (channels_hi)
303 c = manage_channel(card, irm_id, generation, channels_hi,
304 CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, allocate);
305 if (channels_lo && c < 0) {
306 c = manage_channel(card, irm_id, generation, channels_lo,
307 CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, allocate);
308 if (c >= 0)
309 c += 32;
310 }
311 *channel = c;
312
313 if (allocate && channels_mask != 0 && c < 0)
314 *bandwidth = 0;
315
316 if (*bandwidth == 0)
317 return;
318
319 ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate);
320 if (ret < 0)
321 *bandwidth = 0;
322
323 if (allocate && ret < 0 && c >= 0) {
324 deallocate_channel(card, irm_id, generation, c);
325 *channel = ret;
326 }
327}
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
index 6d19828a93a5..1180d0be0bb4 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/fw-ohci.c
@@ -205,6 +205,7 @@ struct fw_ohci {
205 205
206 u32 it_context_mask; 206 u32 it_context_mask;
207 struct iso_context *it_context_list; 207 struct iso_context *it_context_list;
208 u64 ir_context_channels;
208 u32 ir_context_mask; 209 u32 ir_context_mask;
209 struct iso_context *ir_context_list; 210 struct iso_context *ir_context_list;
210}; 211};
@@ -441,9 +442,8 @@ static inline void flush_writes(const struct fw_ohci *ohci)
441 reg_read(ohci, OHCI1394_Version); 442 reg_read(ohci, OHCI1394_Version);
442} 443}
443 444
444static int 445static int ohci_update_phy_reg(struct fw_card *card, int addr,
445ohci_update_phy_reg(struct fw_card *card, int addr, 446 int clear_bits, int set_bits)
446 int clear_bits, int set_bits)
447{ 447{
448 struct fw_ohci *ohci = fw_ohci(card); 448 struct fw_ohci *ohci = fw_ohci(card);
449 u32 val, old; 449 u32 val, old;
@@ -658,8 +658,8 @@ static void ar_context_tasklet(unsigned long data)
658 } 658 }
659} 659}
660 660
661static int 661static int ar_context_init(struct ar_context *ctx,
662ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 regs) 662 struct fw_ohci *ohci, u32 regs)
663{ 663{
664 struct ar_buffer ab; 664 struct ar_buffer ab;
665 665
@@ -690,8 +690,7 @@ static void ar_context_run(struct ar_context *ctx)
690 flush_writes(ctx->ohci); 690 flush_writes(ctx->ohci);
691} 691}
692 692
693static struct descriptor * 693static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
694find_branch_descriptor(struct descriptor *d, int z)
695{ 694{
696 int b, key; 695 int b, key;
697 696
@@ -751,8 +750,7 @@ static void context_tasklet(unsigned long data)
751 * Allocate a new buffer and add it to the list of free buffers for this 750 * Allocate a new buffer and add it to the list of free buffers for this
752 * context. Must be called with ohci->lock held. 751 * context. Must be called with ohci->lock held.
753 */ 752 */
754static int 753static int context_add_buffer(struct context *ctx)
755context_add_buffer(struct context *ctx)
756{ 754{
757 struct descriptor_buffer *desc; 755 struct descriptor_buffer *desc;
758 dma_addr_t uninitialized_var(bus_addr); 756 dma_addr_t uninitialized_var(bus_addr);
@@ -781,9 +779,8 @@ context_add_buffer(struct context *ctx)
781 return 0; 779 return 0;
782} 780}
783 781
784static int 782static int context_init(struct context *ctx, struct fw_ohci *ohci,
785context_init(struct context *ctx, struct fw_ohci *ohci, 783 u32 regs, descriptor_callback_t callback)
786 u32 regs, descriptor_callback_t callback)
787{ 784{
788 ctx->ohci = ohci; 785 ctx->ohci = ohci;
789 ctx->regs = regs; 786 ctx->regs = regs;
@@ -814,8 +811,7 @@ context_init(struct context *ctx, struct fw_ohci *ohci,
814 return 0; 811 return 0;
815} 812}
816 813
817static void 814static void context_release(struct context *ctx)
818context_release(struct context *ctx)
819{ 815{
820 struct fw_card *card = &ctx->ohci->card; 816 struct fw_card *card = &ctx->ohci->card;
821 struct descriptor_buffer *desc, *tmp; 817 struct descriptor_buffer *desc, *tmp;
@@ -827,8 +823,8 @@ context_release(struct context *ctx)
827} 823}
828 824
829/* Must be called with ohci->lock held */ 825/* Must be called with ohci->lock held */
830static struct descriptor * 826static struct descriptor *context_get_descriptors(struct context *ctx,
831context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus) 827 int z, dma_addr_t *d_bus)
832{ 828{
833 struct descriptor *d = NULL; 829 struct descriptor *d = NULL;
834 struct descriptor_buffer *desc = ctx->buffer_tail; 830 struct descriptor_buffer *desc = ctx->buffer_tail;
@@ -912,8 +908,8 @@ struct driver_data {
912 * Must always be called with the ochi->lock held to ensure proper 908 * Must always be called with the ochi->lock held to ensure proper
913 * generation handling and locking around packet queue manipulation. 909 * generation handling and locking around packet queue manipulation.
914 */ 910 */
915static int 911static int at_context_queue_packet(struct context *ctx,
916at_context_queue_packet(struct context *ctx, struct fw_packet *packet) 912 struct fw_packet *packet)
917{ 913{
918 struct fw_ohci *ohci = ctx->ohci; 914 struct fw_ohci *ohci = ctx->ohci;
919 dma_addr_t d_bus, uninitialized_var(payload_bus); 915 dma_addr_t d_bus, uninitialized_var(payload_bus);
@@ -940,7 +936,9 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
940 */ 936 */
941 937
942 header = (__le32 *) &d[1]; 938 header = (__le32 *) &d[1];
943 if (packet->header_length > 8) { 939 switch (packet->header_length) {
940 case 16:
941 case 12:
944 header[0] = cpu_to_le32((packet->header[0] & 0xffff) | 942 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
945 (packet->speed << 16)); 943 (packet->speed << 16));
946 header[1] = cpu_to_le32((packet->header[1] & 0xffff) | 944 header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
@@ -954,12 +952,27 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
954 header[3] = (__force __le32) packet->header[3]; 952 header[3] = (__force __le32) packet->header[3];
955 953
956 d[0].req_count = cpu_to_le16(packet->header_length); 954 d[0].req_count = cpu_to_le16(packet->header_length);
957 } else { 955 break;
956
957 case 8:
958 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) | 958 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
959 (packet->speed << 16)); 959 (packet->speed << 16));
960 header[1] = cpu_to_le32(packet->header[0]); 960 header[1] = cpu_to_le32(packet->header[0]);
961 header[2] = cpu_to_le32(packet->header[1]); 961 header[2] = cpu_to_le32(packet->header[1]);
962 d[0].req_count = cpu_to_le16(12); 962 d[0].req_count = cpu_to_le16(12);
963 break;
964
965 case 4:
966 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
967 (packet->speed << 16));
968 header[1] = cpu_to_le32(packet->header[0] & 0xffff0000);
969 d[0].req_count = cpu_to_le16(8);
970 break;
971
972 default:
973 /* BUG(); */
974 packet->ack = RCODE_SEND_ERROR;
975 return -1;
963 } 976 }
964 977
965 driver_data = (struct driver_data *) &d[3]; 978 driver_data = (struct driver_data *) &d[3];
@@ -1095,8 +1108,8 @@ static int handle_at_packet(struct context *context,
1095#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff) 1108#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
1096#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff) 1109#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
1097 1110
1098static void 1111static void handle_local_rom(struct fw_ohci *ohci,
1099handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr) 1112 struct fw_packet *packet, u32 csr)
1100{ 1113{
1101 struct fw_packet response; 1114 struct fw_packet response;
1102 int tcode, length, i; 1115 int tcode, length, i;
@@ -1122,8 +1135,8 @@ handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
1122 fw_core_handle_response(&ohci->card, &response); 1135 fw_core_handle_response(&ohci->card, &response);
1123} 1136}
1124 1137
1125static void 1138static void handle_local_lock(struct fw_ohci *ohci,
1126handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr) 1139 struct fw_packet *packet, u32 csr)
1127{ 1140{
1128 struct fw_packet response; 1141 struct fw_packet response;
1129 int tcode, length, ext_tcode, sel; 1142 int tcode, length, ext_tcode, sel;
@@ -1164,8 +1177,7 @@ handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
1164 fw_core_handle_response(&ohci->card, &response); 1177 fw_core_handle_response(&ohci->card, &response);
1165} 1178}
1166 1179
1167static void 1180static void handle_local_request(struct context *ctx, struct fw_packet *packet)
1168handle_local_request(struct context *ctx, struct fw_packet *packet)
1169{ 1181{
1170 u64 offset; 1182 u64 offset;
1171 u32 csr; 1183 u32 csr;
@@ -1205,11 +1217,10 @@ handle_local_request(struct context *ctx, struct fw_packet *packet)
1205 } 1217 }
1206} 1218}
1207 1219
1208static void 1220static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
1209at_context_transmit(struct context *ctx, struct fw_packet *packet)
1210{ 1221{
1211 unsigned long flags; 1222 unsigned long flags;
1212 int retval; 1223 int ret;
1213 1224
1214 spin_lock_irqsave(&ctx->ohci->lock, flags); 1225 spin_lock_irqsave(&ctx->ohci->lock, flags);
1215 1226
@@ -1220,10 +1231,10 @@ at_context_transmit(struct context *ctx, struct fw_packet *packet)
1220 return; 1231 return;
1221 } 1232 }
1222 1233
1223 retval = at_context_queue_packet(ctx, packet); 1234 ret = at_context_queue_packet(ctx, packet);
1224 spin_unlock_irqrestore(&ctx->ohci->lock, flags); 1235 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1225 1236
1226 if (retval < 0) 1237 if (ret < 0)
1227 packet->callback(packet, &ctx->ohci->card, packet->ack); 1238 packet->callback(packet, &ctx->ohci->card, packet->ack);
1228 1239
1229} 1240}
@@ -1590,12 +1601,12 @@ static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
1590 return 0; 1601 return 0;
1591} 1602}
1592 1603
1593static int 1604static int ohci_set_config_rom(struct fw_card *card,
1594ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length) 1605 u32 *config_rom, size_t length)
1595{ 1606{
1596 struct fw_ohci *ohci; 1607 struct fw_ohci *ohci;
1597 unsigned long flags; 1608 unsigned long flags;
1598 int retval = -EBUSY; 1609 int ret = -EBUSY;
1599 __be32 *next_config_rom; 1610 __be32 *next_config_rom;
1600 dma_addr_t uninitialized_var(next_config_rom_bus); 1611 dma_addr_t uninitialized_var(next_config_rom_bus);
1601 1612
@@ -1649,7 +1660,7 @@ ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length)
1649 1660
1650 reg_write(ohci, OHCI1394_ConfigROMmap, 1661 reg_write(ohci, OHCI1394_ConfigROMmap,
1651 ohci->next_config_rom_bus); 1662 ohci->next_config_rom_bus);
1652 retval = 0; 1663 ret = 0;
1653 } 1664 }
1654 1665
1655 spin_unlock_irqrestore(&ohci->lock, flags); 1666 spin_unlock_irqrestore(&ohci->lock, flags);
@@ -1661,13 +1672,13 @@ ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length)
1661 * controller could need to access it before the bus reset 1672 * controller could need to access it before the bus reset
1662 * takes effect. 1673 * takes effect.
1663 */ 1674 */
1664 if (retval == 0) 1675 if (ret == 0)
1665 fw_core_initiate_bus_reset(&ohci->card, 1); 1676 fw_core_initiate_bus_reset(&ohci->card, 1);
1666 else 1677 else
1667 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 1678 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1668 next_config_rom, next_config_rom_bus); 1679 next_config_rom, next_config_rom_bus);
1669 1680
1670 return retval; 1681 return ret;
1671} 1682}
1672 1683
1673static void ohci_send_request(struct fw_card *card, struct fw_packet *packet) 1684static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
@@ -1689,7 +1700,7 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
1689 struct fw_ohci *ohci = fw_ohci(card); 1700 struct fw_ohci *ohci = fw_ohci(card);
1690 struct context *ctx = &ohci->at_request_ctx; 1701 struct context *ctx = &ohci->at_request_ctx;
1691 struct driver_data *driver_data = packet->driver_data; 1702 struct driver_data *driver_data = packet->driver_data;
1692 int retval = -ENOENT; 1703 int ret = -ENOENT;
1693 1704
1694 tasklet_disable(&ctx->tasklet); 1705 tasklet_disable(&ctx->tasklet);
1695 1706
@@ -1704,23 +1715,22 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
1704 driver_data->packet = NULL; 1715 driver_data->packet = NULL;
1705 packet->ack = RCODE_CANCELLED; 1716 packet->ack = RCODE_CANCELLED;
1706 packet->callback(packet, &ohci->card, packet->ack); 1717 packet->callback(packet, &ohci->card, packet->ack);
1707 retval = 0; 1718 ret = 0;
1708
1709 out: 1719 out:
1710 tasklet_enable(&ctx->tasklet); 1720 tasklet_enable(&ctx->tasklet);
1711 1721
1712 return retval; 1722 return ret;
1713} 1723}
1714 1724
1715static int 1725static int ohci_enable_phys_dma(struct fw_card *card,
1716ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation) 1726 int node_id, int generation)
1717{ 1727{
1718#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA 1728#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
1719 return 0; 1729 return 0;
1720#else 1730#else
1721 struct fw_ohci *ohci = fw_ohci(card); 1731 struct fw_ohci *ohci = fw_ohci(card);
1722 unsigned long flags; 1732 unsigned long flags;
1723 int n, retval = 0; 1733 int n, ret = 0;
1724 1734
1725 /* 1735 /*
1726 * FIXME: Make sure this bitmask is cleared when we clear the busReset 1736 * FIXME: Make sure this bitmask is cleared when we clear the busReset
@@ -1730,7 +1740,7 @@ ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
1730 spin_lock_irqsave(&ohci->lock, flags); 1740 spin_lock_irqsave(&ohci->lock, flags);
1731 1741
1732 if (ohci->generation != generation) { 1742 if (ohci->generation != generation) {
1733 retval = -ESTALE; 1743 ret = -ESTALE;
1734 goto out; 1744 goto out;
1735 } 1745 }
1736 1746
@@ -1748,12 +1758,12 @@ ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
1748 flush_writes(ohci); 1758 flush_writes(ohci);
1749 out: 1759 out:
1750 spin_unlock_irqrestore(&ohci->lock, flags); 1760 spin_unlock_irqrestore(&ohci->lock, flags);
1751 return retval; 1761
1762 return ret;
1752#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */ 1763#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
1753} 1764}
1754 1765
1755static u64 1766static u64 ohci_get_bus_time(struct fw_card *card)
1756ohci_get_bus_time(struct fw_card *card)
1757{ 1767{
1758 struct fw_ohci *ohci = fw_ohci(card); 1768 struct fw_ohci *ohci = fw_ohci(card);
1759 u32 cycle_time; 1769 u32 cycle_time;
@@ -1765,6 +1775,28 @@ ohci_get_bus_time(struct fw_card *card)
1765 return bus_time; 1775 return bus_time;
1766} 1776}
1767 1777
1778static void copy_iso_headers(struct iso_context *ctx, void *p)
1779{
1780 int i = ctx->header_length;
1781
1782 if (i + ctx->base.header_size > PAGE_SIZE)
1783 return;
1784
1785 /*
1786 * The iso header is byteswapped to little endian by
1787 * the controller, but the remaining header quadlets
1788 * are big endian. We want to present all the headers
1789 * as big endian, so we have to swap the first quadlet.
1790 */
1791 if (ctx->base.header_size > 0)
1792 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
1793 if (ctx->base.header_size > 4)
1794 *(u32 *) (ctx->header + i + 4) = __swab32(*(u32 *) p);
1795 if (ctx->base.header_size > 8)
1796 memcpy(ctx->header + i + 8, p + 8, ctx->base.header_size - 8);
1797 ctx->header_length += ctx->base.header_size;
1798}
1799
1768static int handle_ir_dualbuffer_packet(struct context *context, 1800static int handle_ir_dualbuffer_packet(struct context *context,
1769 struct descriptor *d, 1801 struct descriptor *d,
1770 struct descriptor *last) 1802 struct descriptor *last)
@@ -1775,7 +1807,6 @@ static int handle_ir_dualbuffer_packet(struct context *context,
1775 __le32 *ir_header; 1807 __le32 *ir_header;
1776 size_t header_length; 1808 size_t header_length;
1777 void *p, *end; 1809 void *p, *end;
1778 int i;
1779 1810
1780 if (db->first_res_count != 0 && db->second_res_count != 0) { 1811 if (db->first_res_count != 0 && db->second_res_count != 0) {
1781 if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) { 1812 if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) {
@@ -1788,25 +1819,14 @@ static int handle_ir_dualbuffer_packet(struct context *context,
1788 header_length = le16_to_cpu(db->first_req_count) - 1819 header_length = le16_to_cpu(db->first_req_count) -
1789 le16_to_cpu(db->first_res_count); 1820 le16_to_cpu(db->first_res_count);
1790 1821
1791 i = ctx->header_length;
1792 p = db + 1; 1822 p = db + 1;
1793 end = p + header_length; 1823 end = p + header_length;
1794 while (p < end && i + ctx->base.header_size <= PAGE_SIZE) { 1824 while (p < end) {
1795 /* 1825 copy_iso_headers(ctx, p);
1796 * The iso header is byteswapped to little endian by
1797 * the controller, but the remaining header quadlets
1798 * are big endian. We want to present all the headers
1799 * as big endian, so we have to swap the first
1800 * quadlet.
1801 */
1802 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
1803 memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
1804 i += ctx->base.header_size;
1805 ctx->excess_bytes += 1826 ctx->excess_bytes +=
1806 (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff; 1827 (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff;
1807 p += ctx->base.header_size + 4; 1828 p += max(ctx->base.header_size, (size_t)8);
1808 } 1829 }
1809 ctx->header_length = i;
1810 1830
1811 ctx->excess_bytes -= le16_to_cpu(db->second_req_count) - 1831 ctx->excess_bytes -= le16_to_cpu(db->second_req_count) -
1812 le16_to_cpu(db->second_res_count); 1832 le16_to_cpu(db->second_res_count);
@@ -1832,7 +1852,6 @@ static int handle_ir_packet_per_buffer(struct context *context,
1832 struct descriptor *pd; 1852 struct descriptor *pd;
1833 __le32 *ir_header; 1853 __le32 *ir_header;
1834 void *p; 1854 void *p;
1835 int i;
1836 1855
1837 for (pd = d; pd <= last; pd++) { 1856 for (pd = d; pd <= last; pd++) {
1838 if (pd->transfer_status) 1857 if (pd->transfer_status)
@@ -1842,21 +1861,8 @@ static int handle_ir_packet_per_buffer(struct context *context,
1842 /* Descriptor(s) not done yet, stop iteration */ 1861 /* Descriptor(s) not done yet, stop iteration */
1843 return 0; 1862 return 0;
1844 1863
1845 i = ctx->header_length; 1864 p = last + 1;
1846 p = last + 1; 1865 copy_iso_headers(ctx, p);
1847
1848 if (ctx->base.header_size > 0 &&
1849 i + ctx->base.header_size <= PAGE_SIZE) {
1850 /*
1851 * The iso header is byteswapped to little endian by
1852 * the controller, but the remaining header quadlets
1853 * are big endian. We want to present all the headers
1854 * as big endian, so we have to swap the first quadlet.
1855 */
1856 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
1857 memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
1858 ctx->header_length += ctx->base.header_size;
1859 }
1860 1866
1861 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { 1867 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
1862 ir_header = (__le32 *) p; 1868 ir_header = (__le32 *) p;
@@ -1888,21 +1894,24 @@ static int handle_it_packet(struct context *context,
1888 return 1; 1894 return 1;
1889} 1895}
1890 1896
1891static struct fw_iso_context * 1897static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
1892ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size) 1898 int type, int channel, size_t header_size)
1893{ 1899{
1894 struct fw_ohci *ohci = fw_ohci(card); 1900 struct fw_ohci *ohci = fw_ohci(card);
1895 struct iso_context *ctx, *list; 1901 struct iso_context *ctx, *list;
1896 descriptor_callback_t callback; 1902 descriptor_callback_t callback;
1903 u64 *channels, dont_care = ~0ULL;
1897 u32 *mask, regs; 1904 u32 *mask, regs;
1898 unsigned long flags; 1905 unsigned long flags;
1899 int index, retval = -ENOMEM; 1906 int index, ret = -ENOMEM;
1900 1907
1901 if (type == FW_ISO_CONTEXT_TRANSMIT) { 1908 if (type == FW_ISO_CONTEXT_TRANSMIT) {
1909 channels = &dont_care;
1902 mask = &ohci->it_context_mask; 1910 mask = &ohci->it_context_mask;
1903 list = ohci->it_context_list; 1911 list = ohci->it_context_list;
1904 callback = handle_it_packet; 1912 callback = handle_it_packet;
1905 } else { 1913 } else {
1914 channels = &ohci->ir_context_channels;
1906 mask = &ohci->ir_context_mask; 1915 mask = &ohci->ir_context_mask;
1907 list = ohci->ir_context_list; 1916 list = ohci->ir_context_list;
1908 if (ohci->use_dualbuffer) 1917 if (ohci->use_dualbuffer)
@@ -1912,9 +1921,11 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
1912 } 1921 }
1913 1922
1914 spin_lock_irqsave(&ohci->lock, flags); 1923 spin_lock_irqsave(&ohci->lock, flags);
1915 index = ffs(*mask) - 1; 1924 index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
1916 if (index >= 0) 1925 if (index >= 0) {
1926 *channels &= ~(1ULL << channel);
1917 *mask &= ~(1 << index); 1927 *mask &= ~(1 << index);
1928 }
1918 spin_unlock_irqrestore(&ohci->lock, flags); 1929 spin_unlock_irqrestore(&ohci->lock, flags);
1919 1930
1920 if (index < 0) 1931 if (index < 0)
@@ -1932,8 +1943,8 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
1932 if (ctx->header == NULL) 1943 if (ctx->header == NULL)
1933 goto out; 1944 goto out;
1934 1945
1935 retval = context_init(&ctx->context, ohci, regs, callback); 1946 ret = context_init(&ctx->context, ohci, regs, callback);
1936 if (retval < 0) 1947 if (ret < 0)
1937 goto out_with_header; 1948 goto out_with_header;
1938 1949
1939 return &ctx->base; 1950 return &ctx->base;
@@ -1945,7 +1956,7 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
1945 *mask |= 1 << index; 1956 *mask |= 1 << index;
1946 spin_unlock_irqrestore(&ohci->lock, flags); 1957 spin_unlock_irqrestore(&ohci->lock, flags);
1947 1958
1948 return ERR_PTR(retval); 1959 return ERR_PTR(ret);
1949} 1960}
1950 1961
1951static int ohci_start_iso(struct fw_iso_context *base, 1962static int ohci_start_iso(struct fw_iso_context *base,
@@ -2024,16 +2035,16 @@ static void ohci_free_iso_context(struct fw_iso_context *base)
2024 } else { 2035 } else {
2025 index = ctx - ohci->ir_context_list; 2036 index = ctx - ohci->ir_context_list;
2026 ohci->ir_context_mask |= 1 << index; 2037 ohci->ir_context_mask |= 1 << index;
2038 ohci->ir_context_channels |= 1ULL << base->channel;
2027 } 2039 }
2028 2040
2029 spin_unlock_irqrestore(&ohci->lock, flags); 2041 spin_unlock_irqrestore(&ohci->lock, flags);
2030} 2042}
2031 2043
2032static int 2044static int ohci_queue_iso_transmit(struct fw_iso_context *base,
2033ohci_queue_iso_transmit(struct fw_iso_context *base, 2045 struct fw_iso_packet *packet,
2034 struct fw_iso_packet *packet, 2046 struct fw_iso_buffer *buffer,
2035 struct fw_iso_buffer *buffer, 2047 unsigned long payload)
2036 unsigned long payload)
2037{ 2048{
2038 struct iso_context *ctx = container_of(base, struct iso_context, base); 2049 struct iso_context *ctx = container_of(base, struct iso_context, base);
2039 struct descriptor *d, *last, *pd; 2050 struct descriptor *d, *last, *pd;
@@ -2128,11 +2139,10 @@ ohci_queue_iso_transmit(struct fw_iso_context *base,
2128 return 0; 2139 return 0;
2129} 2140}
2130 2141
2131static int 2142static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
2132ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, 2143 struct fw_iso_packet *packet,
2133 struct fw_iso_packet *packet, 2144 struct fw_iso_buffer *buffer,
2134 struct fw_iso_buffer *buffer, 2145 unsigned long payload)
2135 unsigned long payload)
2136{ 2146{
2137 struct iso_context *ctx = container_of(base, struct iso_context, base); 2147 struct iso_context *ctx = container_of(base, struct iso_context, base);
2138 struct db_descriptor *db = NULL; 2148 struct db_descriptor *db = NULL;
@@ -2151,11 +2161,11 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
2151 z = 2; 2161 z = 2;
2152 2162
2153 /* 2163 /*
2154 * The OHCI controller puts the status word in the header 2164 * The OHCI controller puts the isochronous header and trailer in the
2155 * buffer too, so we need 4 extra bytes per packet. 2165 * buffer, so we need at least 8 bytes.
2156 */ 2166 */
2157 packet_count = p->header_length / ctx->base.header_size; 2167 packet_count = p->header_length / ctx->base.header_size;
2158 header_size = packet_count * (ctx->base.header_size + 4); 2168 header_size = packet_count * max(ctx->base.header_size, (size_t)8);
2159 2169
2160 /* Get header size in number of descriptors. */ 2170 /* Get header size in number of descriptors. */
2161 header_z = DIV_ROUND_UP(header_size, sizeof(*d)); 2171 header_z = DIV_ROUND_UP(header_size, sizeof(*d));
@@ -2173,7 +2183,8 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
2173 db = (struct db_descriptor *) d; 2183 db = (struct db_descriptor *) d;
2174 db->control = cpu_to_le16(DESCRIPTOR_STATUS | 2184 db->control = cpu_to_le16(DESCRIPTOR_STATUS |
2175 DESCRIPTOR_BRANCH_ALWAYS); 2185 DESCRIPTOR_BRANCH_ALWAYS);
2176 db->first_size = cpu_to_le16(ctx->base.header_size + 4); 2186 db->first_size =
2187 cpu_to_le16(max(ctx->base.header_size, (size_t)8));
2177 if (p->skip && rest == p->payload_length) { 2188 if (p->skip && rest == p->payload_length) {
2178 db->control |= cpu_to_le16(DESCRIPTOR_WAIT); 2189 db->control |= cpu_to_le16(DESCRIPTOR_WAIT);
2179 db->first_req_count = db->first_size; 2190 db->first_req_count = db->first_size;
@@ -2208,11 +2219,10 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
2208 return 0; 2219 return 0;
2209} 2220}
2210 2221
2211static int 2222static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
2212ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, 2223 struct fw_iso_packet *packet,
2213 struct fw_iso_packet *packet, 2224 struct fw_iso_buffer *buffer,
2214 struct fw_iso_buffer *buffer, 2225 unsigned long payload)
2215 unsigned long payload)
2216{ 2226{
2217 struct iso_context *ctx = container_of(base, struct iso_context, base); 2227 struct iso_context *ctx = container_of(base, struct iso_context, base);
2218 struct descriptor *d = NULL, *pd = NULL; 2228 struct descriptor *d = NULL, *pd = NULL;
@@ -2223,11 +2233,11 @@ ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
2223 int page, offset, packet_count, header_size, payload_per_buffer; 2233 int page, offset, packet_count, header_size, payload_per_buffer;
2224 2234
2225 /* 2235 /*
2226 * The OHCI controller puts the status word in the 2236 * The OHCI controller puts the isochronous header and trailer in the
2227 * buffer too, so we need 4 extra bytes per packet. 2237 * buffer, so we need at least 8 bytes.
2228 */ 2238 */
2229 packet_count = p->header_length / ctx->base.header_size; 2239 packet_count = p->header_length / ctx->base.header_size;
2230 header_size = ctx->base.header_size + 4; 2240 header_size = max(ctx->base.header_size, (size_t)8);
2231 2241
2232 /* Get header size in number of descriptors. */ 2242 /* Get header size in number of descriptors. */
2233 header_z = DIV_ROUND_UP(header_size, sizeof(*d)); 2243 header_z = DIV_ROUND_UP(header_size, sizeof(*d));
@@ -2286,29 +2296,27 @@ ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
2286 return 0; 2296 return 0;
2287} 2297}
2288 2298
2289static int 2299static int ohci_queue_iso(struct fw_iso_context *base,
2290ohci_queue_iso(struct fw_iso_context *base, 2300 struct fw_iso_packet *packet,
2291 struct fw_iso_packet *packet, 2301 struct fw_iso_buffer *buffer,
2292 struct fw_iso_buffer *buffer, 2302 unsigned long payload)
2293 unsigned long payload)
2294{ 2303{
2295 struct iso_context *ctx = container_of(base, struct iso_context, base); 2304 struct iso_context *ctx = container_of(base, struct iso_context, base);
2296 unsigned long flags; 2305 unsigned long flags;
2297 int retval; 2306 int ret;
2298 2307
2299 spin_lock_irqsave(&ctx->context.ohci->lock, flags); 2308 spin_lock_irqsave(&ctx->context.ohci->lock, flags);
2300 if (base->type == FW_ISO_CONTEXT_TRANSMIT) 2309 if (base->type == FW_ISO_CONTEXT_TRANSMIT)
2301 retval = ohci_queue_iso_transmit(base, packet, buffer, payload); 2310 ret = ohci_queue_iso_transmit(base, packet, buffer, payload);
2302 else if (ctx->context.ohci->use_dualbuffer) 2311 else if (ctx->context.ohci->use_dualbuffer)
2303 retval = ohci_queue_iso_receive_dualbuffer(base, packet, 2312 ret = ohci_queue_iso_receive_dualbuffer(base, packet,
2304 buffer, payload); 2313 buffer, payload);
2305 else 2314 else
2306 retval = ohci_queue_iso_receive_packet_per_buffer(base, packet, 2315 ret = ohci_queue_iso_receive_packet_per_buffer(base, packet,
2307 buffer, 2316 buffer, payload);
2308 payload);
2309 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); 2317 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
2310 2318
2311 return retval; 2319 return ret;
2312} 2320}
2313 2321
2314static const struct fw_card_driver ohci_driver = { 2322static const struct fw_card_driver ohci_driver = {
@@ -2357,8 +2365,8 @@ static void ohci_pmac_off(struct pci_dev *dev)
2357#define ohci_pmac_off(dev) 2365#define ohci_pmac_off(dev)
2358#endif /* CONFIG_PPC_PMAC */ 2366#endif /* CONFIG_PPC_PMAC */
2359 2367
2360static int __devinit 2368static int __devinit pci_probe(struct pci_dev *dev,
2361pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) 2369 const struct pci_device_id *ent)
2362{ 2370{
2363 struct fw_ohci *ohci; 2371 struct fw_ohci *ohci;
2364 u32 bus_options, max_receive, link_speed, version; 2372 u32 bus_options, max_receive, link_speed, version;
@@ -2440,6 +2448,7 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
2440 ohci->it_context_list = kzalloc(size, GFP_KERNEL); 2448 ohci->it_context_list = kzalloc(size, GFP_KERNEL);
2441 2449
2442 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0); 2450 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
2451 ohci->ir_context_channels = ~0ULL;
2443 ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); 2452 ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
2444 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0); 2453 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
2445 size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask); 2454 size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask);
@@ -2467,11 +2476,12 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
2467 reg_read(ohci, OHCI1394_GUIDLo); 2476 reg_read(ohci, OHCI1394_GUIDLo);
2468 2477
2469 err = fw_card_add(&ohci->card, max_receive, link_speed, guid); 2478 err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
2470 if (err < 0) 2479 if (err)
2471 goto fail_self_id; 2480 goto fail_self_id;
2472 2481
2473 fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n", 2482 fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
2474 dev_name(&dev->dev), version >> 16, version & 0xff); 2483 dev_name(&dev->dev), version >> 16, version & 0xff);
2484
2475 return 0; 2485 return 0;
2476 2486
2477 fail_self_id: 2487 fail_self_id:
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
index c71c4419d9e8..2bcf51557c72 100644
--- a/drivers/firewire/fw-sbp2.c
+++ b/drivers/firewire/fw-sbp2.c
@@ -392,20 +392,18 @@ static const struct {
392 } 392 }
393}; 393};
394 394
395static void 395static void free_orb(struct kref *kref)
396free_orb(struct kref *kref)
397{ 396{
398 struct sbp2_orb *orb = container_of(kref, struct sbp2_orb, kref); 397 struct sbp2_orb *orb = container_of(kref, struct sbp2_orb, kref);
399 398
400 kfree(orb); 399 kfree(orb);
401} 400}
402 401
403static void 402static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
404sbp2_status_write(struct fw_card *card, struct fw_request *request, 403 int tcode, int destination, int source,
405 int tcode, int destination, int source, 404 int generation, int speed,
406 int generation, int speed, 405 unsigned long long offset,
407 unsigned long long offset, 406 void *payload, size_t length, void *callback_data)
408 void *payload, size_t length, void *callback_data)
409{ 407{
410 struct sbp2_logical_unit *lu = callback_data; 408 struct sbp2_logical_unit *lu = callback_data;
411 struct sbp2_orb *orb; 409 struct sbp2_orb *orb;
@@ -451,9 +449,8 @@ sbp2_status_write(struct fw_card *card, struct fw_request *request,
451 fw_send_response(card, request, RCODE_COMPLETE); 449 fw_send_response(card, request, RCODE_COMPLETE);
452} 450}
453 451
454static void 452static void complete_transaction(struct fw_card *card, int rcode,
455complete_transaction(struct fw_card *card, int rcode, 453 void *payload, size_t length, void *data)
456 void *payload, size_t length, void *data)
457{ 454{
458 struct sbp2_orb *orb = data; 455 struct sbp2_orb *orb = data;
459 unsigned long flags; 456 unsigned long flags;
@@ -482,9 +479,8 @@ complete_transaction(struct fw_card *card, int rcode,
482 kref_put(&orb->kref, free_orb); 479 kref_put(&orb->kref, free_orb);
483} 480}
484 481
485static void 482static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
486sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu, 483 int node_id, int generation, u64 offset)
487 int node_id, int generation, u64 offset)
488{ 484{
489 struct fw_device *device = fw_device(lu->tgt->unit->device.parent); 485 struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
490 unsigned long flags; 486 unsigned long flags;
@@ -531,8 +527,8 @@ static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
531 return retval; 527 return retval;
532} 528}
533 529
534static void 530static void complete_management_orb(struct sbp2_orb *base_orb,
535complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) 531 struct sbp2_status *status)
536{ 532{
537 struct sbp2_management_orb *orb = 533 struct sbp2_management_orb *orb =
538 container_of(base_orb, struct sbp2_management_orb, base); 534 container_of(base_orb, struct sbp2_management_orb, base);
@@ -542,10 +538,9 @@ complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
542 complete(&orb->done); 538 complete(&orb->done);
543} 539}
544 540
545static int 541static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
546sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, 542 int generation, int function,
547 int generation, int function, int lun_or_login_id, 543 int lun_or_login_id, void *response)
548 void *response)
549{ 544{
550 struct fw_device *device = fw_device(lu->tgt->unit->device.parent); 545 struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
551 struct sbp2_management_orb *orb; 546 struct sbp2_management_orb *orb;
@@ -652,9 +647,8 @@ static void sbp2_agent_reset(struct sbp2_logical_unit *lu)
652 &d, sizeof(d)); 647 &d, sizeof(d));
653} 648}
654 649
655static void 650static void complete_agent_reset_write_no_wait(struct fw_card *card,
656complete_agent_reset_write_no_wait(struct fw_card *card, int rcode, 651 int rcode, void *payload, size_t length, void *data)
657 void *payload, size_t length, void *data)
658{ 652{
659 kfree(data); 653 kfree(data);
660} 654}
@@ -1299,8 +1293,7 @@ static void sbp2_unmap_scatterlist(struct device *card_device,
1299 sizeof(orb->page_table), DMA_TO_DEVICE); 1293 sizeof(orb->page_table), DMA_TO_DEVICE);
1300} 1294}
1301 1295
1302static unsigned int 1296static unsigned int sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
1303sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
1304{ 1297{
1305 int sam_status; 1298 int sam_status;
1306 1299
@@ -1337,8 +1330,8 @@ sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
1337 } 1330 }
1338} 1331}
1339 1332
1340static void 1333static void complete_command_orb(struct sbp2_orb *base_orb,
1341complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) 1334 struct sbp2_status *status)
1342{ 1335{
1343 struct sbp2_command_orb *orb = 1336 struct sbp2_command_orb *orb =
1344 container_of(base_orb, struct sbp2_command_orb, base); 1337 container_of(base_orb, struct sbp2_command_orb, base);
@@ -1384,9 +1377,8 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
1384 orb->done(orb->cmd); 1377 orb->done(orb->cmd);
1385} 1378}
1386 1379
1387static int 1380static int sbp2_map_scatterlist(struct sbp2_command_orb *orb,
1388sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device, 1381 struct fw_device *device, struct sbp2_logical_unit *lu)
1389 struct sbp2_logical_unit *lu)
1390{ 1382{
1391 struct scatterlist *sg = scsi_sglist(orb->cmd); 1383 struct scatterlist *sg = scsi_sglist(orb->cmd);
1392 int i, n; 1384 int i, n;
@@ -1584,9 +1576,8 @@ static int sbp2_scsi_abort(struct scsi_cmnd *cmd)
1584 * This is the concatenation of target port identifier and logical unit 1576 * This is the concatenation of target port identifier and logical unit
1585 * identifier as per SAM-2...SAM-4 annex A. 1577 * identifier as per SAM-2...SAM-4 annex A.
1586 */ 1578 */
1587static ssize_t 1579static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev,
1588sbp2_sysfs_ieee1394_id_show(struct device *dev, struct device_attribute *attr, 1580 struct device_attribute *attr, char *buf)
1589 char *buf)
1590{ 1581{
1591 struct scsi_device *sdev = to_scsi_device(dev); 1582 struct scsi_device *sdev = to_scsi_device(dev);
1592 struct sbp2_logical_unit *lu; 1583 struct sbp2_logical_unit *lu;
diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c
index 8dd6703b55cd..d0deecc4de93 100644
--- a/drivers/firewire/fw-topology.c
+++ b/drivers/firewire/fw-topology.c
@@ -314,9 +314,8 @@ typedef void (*fw_node_callback_t)(struct fw_card * card,
314 struct fw_node * node, 314 struct fw_node * node,
315 struct fw_node * parent); 315 struct fw_node * parent);
316 316
317static void 317static void for_each_fw_node(struct fw_card *card, struct fw_node *root,
318for_each_fw_node(struct fw_card *card, struct fw_node *root, 318 fw_node_callback_t callback)
319 fw_node_callback_t callback)
320{ 319{
321 struct list_head list; 320 struct list_head list;
322 struct fw_node *node, *next, *child, *parent; 321 struct fw_node *node, *next, *child, *parent;
@@ -349,9 +348,8 @@ for_each_fw_node(struct fw_card *card, struct fw_node *root,
349 fw_node_put(node); 348 fw_node_put(node);
350} 349}
351 350
352static void 351static void report_lost_node(struct fw_card *card,
353report_lost_node(struct fw_card *card, 352 struct fw_node *node, struct fw_node *parent)
354 struct fw_node *node, struct fw_node *parent)
355{ 353{
356 fw_node_event(card, node, FW_NODE_DESTROYED); 354 fw_node_event(card, node, FW_NODE_DESTROYED);
357 fw_node_put(node); 355 fw_node_put(node);
@@ -360,9 +358,8 @@ report_lost_node(struct fw_card *card,
360 card->bm_retries = 0; 358 card->bm_retries = 0;
361} 359}
362 360
363static void 361static void report_found_node(struct fw_card *card,
364report_found_node(struct fw_card *card, 362 struct fw_node *node, struct fw_node *parent)
365 struct fw_node *node, struct fw_node *parent)
366{ 363{
367 int b_path = (node->phy_speed == SCODE_BETA); 364 int b_path = (node->phy_speed == SCODE_BETA);
368 365
@@ -415,8 +412,7 @@ static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
415 * found, lost or updated. Update the nodes in the card topology tree 412 * found, lost or updated. Update the nodes in the card topology tree
416 * as we go. 413 * as we go.
417 */ 414 */
418static void 415static void update_tree(struct fw_card *card, struct fw_node *root)
419update_tree(struct fw_card *card, struct fw_node *root)
420{ 416{
421 struct list_head list0, list1; 417 struct list_head list0, list1;
422 struct fw_node *node0, *node1, *next1; 418 struct fw_node *node0, *node1, *next1;
@@ -497,8 +493,8 @@ update_tree(struct fw_card *card, struct fw_node *root)
497 } 493 }
498} 494}
499 495
500static void 496static void update_topology_map(struct fw_card *card,
501update_topology_map(struct fw_card *card, u32 *self_ids, int self_id_count) 497 u32 *self_ids, int self_id_count)
502{ 498{
503 int node_count; 499 int node_count;
504 500
@@ -510,10 +506,8 @@ update_topology_map(struct fw_card *card, u32 *self_ids, int self_id_count)
510 fw_compute_block_crc(card->topology_map); 506 fw_compute_block_crc(card->topology_map);
511} 507}
512 508
513void 509void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
514fw_core_handle_bus_reset(struct fw_card *card, 510 int self_id_count, u32 *self_ids)
515 int node_id, int generation,
516 int self_id_count, u32 * self_ids)
517{ 511{
518 struct fw_node *local_node; 512 struct fw_node *local_node;
519 unsigned long flags; 513 unsigned long flags;
@@ -532,6 +526,7 @@ fw_core_handle_bus_reset(struct fw_card *card,
532 526
533 spin_lock_irqsave(&card->lock, flags); 527 spin_lock_irqsave(&card->lock, flags);
534 528
529 card->broadcast_channel_allocated = false;
535 card->node_id = node_id; 530 card->node_id = node_id;
536 /* 531 /*
537 * Update node_id before generation to prevent anybody from using 532 * Update node_id before generation to prevent anybody from using
diff --git a/drivers/firewire/fw-topology.h b/drivers/firewire/fw-topology.h
index addb9f8ea776..3c497bb4fae4 100644
--- a/drivers/firewire/fw-topology.h
+++ b/drivers/firewire/fw-topology.h
@@ -19,6 +19,11 @@
19#ifndef __fw_topology_h 19#ifndef __fw_topology_h
20#define __fw_topology_h 20#define __fw_topology_h
21 21
22#include <linux/list.h>
23#include <linux/slab.h>
24
25#include <asm/atomic.h>
26
22enum { 27enum {
23 FW_NODE_CREATED, 28 FW_NODE_CREATED,
24 FW_NODE_UPDATED, 29 FW_NODE_UPDATED,
@@ -51,26 +56,22 @@ struct fw_node {
51 struct fw_node *ports[0]; 56 struct fw_node *ports[0];
52}; 57};
53 58
54static inline struct fw_node * 59static inline struct fw_node *fw_node_get(struct fw_node *node)
55fw_node_get(struct fw_node *node)
56{ 60{
57 atomic_inc(&node->ref_count); 61 atomic_inc(&node->ref_count);
58 62
59 return node; 63 return node;
60} 64}
61 65
62static inline void 66static inline void fw_node_put(struct fw_node *node)
63fw_node_put(struct fw_node *node)
64{ 67{
65 if (atomic_dec_and_test(&node->ref_count)) 68 if (atomic_dec_and_test(&node->ref_count))
66 kfree(node); 69 kfree(node);
67} 70}
68 71
69void 72struct fw_card;
70fw_destroy_nodes(struct fw_card *card); 73void fw_destroy_nodes(struct fw_card *card);
71
72int
73fw_compute_block_crc(u32 *block);
74 74
75int fw_compute_block_crc(u32 *block);
75 76
76#endif /* __fw_topology_h */ 77#endif /* __fw_topology_h */
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c
index 699ac041f39a..283dac6d327d 100644
--- a/drivers/firewire/fw-transaction.c
+++ b/drivers/firewire/fw-transaction.c
@@ -64,10 +64,8 @@
64#define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23)) 64#define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23))
65#define PHY_IDENTIFIER(id) ((id) << 30) 65#define PHY_IDENTIFIER(id) ((id) << 30)
66 66
67static int 67static int close_transaction(struct fw_transaction *transaction,
68close_transaction(struct fw_transaction *transaction, 68 struct fw_card *card, int rcode)
69 struct fw_card *card, int rcode,
70 u32 *payload, size_t length)
71{ 69{
72 struct fw_transaction *t; 70 struct fw_transaction *t;
73 unsigned long flags; 71 unsigned long flags;
@@ -83,7 +81,7 @@ close_transaction(struct fw_transaction *transaction,
83 spin_unlock_irqrestore(&card->lock, flags); 81 spin_unlock_irqrestore(&card->lock, flags);
84 82
85 if (&t->link != &card->transaction_list) { 83 if (&t->link != &card->transaction_list) {
86 t->callback(card, rcode, payload, length, t->callback_data); 84 t->callback(card, rcode, NULL, 0, t->callback_data);
87 return 0; 85 return 0;
88 } 86 }
89 87
@@ -94,9 +92,8 @@ close_transaction(struct fw_transaction *transaction,
94 * Only valid for transactions that are potentially pending (ie have 92 * Only valid for transactions that are potentially pending (ie have
95 * been sent). 93 * been sent).
96 */ 94 */
97int 95int fw_cancel_transaction(struct fw_card *card,
98fw_cancel_transaction(struct fw_card *card, 96 struct fw_transaction *transaction)
99 struct fw_transaction *transaction)
100{ 97{
101 /* 98 /*
102 * Cancel the packet transmission if it's still queued. That 99 * Cancel the packet transmission if it's still queued. That
@@ -112,20 +109,19 @@ fw_cancel_transaction(struct fw_card *card,
112 * if the transaction is still pending and remove it in that case. 109 * if the transaction is still pending and remove it in that case.
113 */ 110 */
114 111
115 return close_transaction(transaction, card, RCODE_CANCELLED, NULL, 0); 112 return close_transaction(transaction, card, RCODE_CANCELLED);
116} 113}
117EXPORT_SYMBOL(fw_cancel_transaction); 114EXPORT_SYMBOL(fw_cancel_transaction);
118 115
119static void 116static void transmit_complete_callback(struct fw_packet *packet,
120transmit_complete_callback(struct fw_packet *packet, 117 struct fw_card *card, int status)
121 struct fw_card *card, int status)
122{ 118{
123 struct fw_transaction *t = 119 struct fw_transaction *t =
124 container_of(packet, struct fw_transaction, packet); 120 container_of(packet, struct fw_transaction, packet);
125 121
126 switch (status) { 122 switch (status) {
127 case ACK_COMPLETE: 123 case ACK_COMPLETE:
128 close_transaction(t, card, RCODE_COMPLETE, NULL, 0); 124 close_transaction(t, card, RCODE_COMPLETE);
129 break; 125 break;
130 case ACK_PENDING: 126 case ACK_PENDING:
131 t->timestamp = packet->timestamp; 127 t->timestamp = packet->timestamp;
@@ -133,31 +129,42 @@ transmit_complete_callback(struct fw_packet *packet,
133 case ACK_BUSY_X: 129 case ACK_BUSY_X:
134 case ACK_BUSY_A: 130 case ACK_BUSY_A:
135 case ACK_BUSY_B: 131 case ACK_BUSY_B:
136 close_transaction(t, card, RCODE_BUSY, NULL, 0); 132 close_transaction(t, card, RCODE_BUSY);
137 break; 133 break;
138 case ACK_DATA_ERROR: 134 case ACK_DATA_ERROR:
139 close_transaction(t, card, RCODE_DATA_ERROR, NULL, 0); 135 close_transaction(t, card, RCODE_DATA_ERROR);
140 break; 136 break;
141 case ACK_TYPE_ERROR: 137 case ACK_TYPE_ERROR:
142 close_transaction(t, card, RCODE_TYPE_ERROR, NULL, 0); 138 close_transaction(t, card, RCODE_TYPE_ERROR);
143 break; 139 break;
144 default: 140 default:
145 /* 141 /*
146 * In this case the ack is really a juju specific 142 * In this case the ack is really a juju specific
147 * rcode, so just forward that to the callback. 143 * rcode, so just forward that to the callback.
148 */ 144 */
149 close_transaction(t, card, status, NULL, 0); 145 close_transaction(t, card, status);
150 break; 146 break;
151 } 147 }
152} 148}
153 149
154static void 150static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
155fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
156 int destination_id, int source_id, int generation, int speed, 151 int destination_id, int source_id, int generation, int speed,
157 unsigned long long offset, void *payload, size_t length) 152 unsigned long long offset, void *payload, size_t length)
158{ 153{
159 int ext_tcode; 154 int ext_tcode;
160 155
156 if (tcode == TCODE_STREAM_DATA) {
157 packet->header[0] =
158 HEADER_DATA_LENGTH(length) |
159 destination_id |
160 HEADER_TCODE(TCODE_STREAM_DATA);
161 packet->header_length = 4;
162 packet->payload = payload;
163 packet->payload_length = length;
164
165 goto common;
166 }
167
161 if (tcode > 0x10) { 168 if (tcode > 0x10) {
162 ext_tcode = tcode & ~0x10; 169 ext_tcode = tcode & ~0x10;
163 tcode = TCODE_LOCK_REQUEST; 170 tcode = TCODE_LOCK_REQUEST;
@@ -204,7 +211,7 @@ fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
204 packet->payload_length = 0; 211 packet->payload_length = 0;
205 break; 212 break;
206 } 213 }
207 214 common:
208 packet->speed = speed; 215 packet->speed = speed;
209 packet->generation = generation; 216 packet->generation = generation;
210 packet->ack = 0; 217 packet->ack = 0;
@@ -246,13 +253,14 @@ fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
246 * @param callback function to be called when the transaction is completed 253 * @param callback function to be called when the transaction is completed
247 * @param callback_data pointer to arbitrary data, which will be 254 * @param callback_data pointer to arbitrary data, which will be
248 * passed to the callback 255 * passed to the callback
256 *
257 * In case of asynchronous stream packets i.e. TCODE_STREAM_DATA, the caller
258 * needs to synthesize @destination_id with fw_stream_packet_destination_id().
249 */ 259 */
250void 260void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
251fw_send_request(struct fw_card *card, struct fw_transaction *t, 261 int destination_id, int generation, int speed,
252 int tcode, int destination_id, int generation, int speed, 262 unsigned long long offset, void *payload, size_t length,
253 unsigned long long offset, 263 fw_transaction_callback_t callback, void *callback_data)
254 void *payload, size_t length,
255 fw_transaction_callback_t callback, void *callback_data)
256{ 264{
257 unsigned long flags; 265 unsigned long flags;
258 int tlabel; 266 int tlabel;
@@ -322,16 +330,16 @@ static void transaction_callback(struct fw_card *card, int rcode,
322 * Returns the RCODE. 330 * Returns the RCODE.
323 */ 331 */
324int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, 332int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
325 int generation, int speed, unsigned long long offset, 333 int generation, int speed, unsigned long long offset,
326 void *data, size_t length) 334 void *payload, size_t length)
327{ 335{
328 struct transaction_callback_data d; 336 struct transaction_callback_data d;
329 struct fw_transaction t; 337 struct fw_transaction t;
330 338
331 init_completion(&d.done); 339 init_completion(&d.done);
332 d.payload = data; 340 d.payload = payload;
333 fw_send_request(card, &t, tcode, destination_id, generation, speed, 341 fw_send_request(card, &t, tcode, destination_id, generation, speed,
334 offset, data, length, transaction_callback, &d); 342 offset, payload, length, transaction_callback, &d);
335 wait_for_completion(&d.done); 343 wait_for_completion(&d.done);
336 344
337 return d.rcode; 345 return d.rcode;
@@ -399,9 +407,8 @@ void fw_flush_transactions(struct fw_card *card)
399 } 407 }
400} 408}
401 409
402static struct fw_address_handler * 410static struct fw_address_handler *lookup_overlapping_address_handler(
403lookup_overlapping_address_handler(struct list_head *list, 411 struct list_head *list, unsigned long long offset, size_t length)
404 unsigned long long offset, size_t length)
405{ 412{
406 struct fw_address_handler *handler; 413 struct fw_address_handler *handler;
407 414
@@ -414,9 +421,8 @@ lookup_overlapping_address_handler(struct list_head *list,
414 return NULL; 421 return NULL;
415} 422}
416 423
417static struct fw_address_handler * 424static struct fw_address_handler *lookup_enclosing_address_handler(
418lookup_enclosing_address_handler(struct list_head *list, 425 struct list_head *list, unsigned long long offset, size_t length)
419 unsigned long long offset, size_t length)
420{ 426{
421 struct fw_address_handler *handler; 427 struct fw_address_handler *handler;
422 428
@@ -449,36 +455,44 @@ const struct fw_address_region fw_unit_space_region =
449#endif /* 0 */ 455#endif /* 0 */
450 456
451/** 457/**
452 * Allocate a range of addresses in the node space of the OHCI 458 * fw_core_add_address_handler - register for incoming requests
453 * controller. When a request is received that falls within the 459 * @handler: callback
454 * specified address range, the specified callback is invoked. The 460 * @region: region in the IEEE 1212 node space address range
455 * parameters passed to the callback give the details of the 461 *
456 * particular request. 462 * region->start, ->end, and handler->length have to be quadlet-aligned.
463 *
464 * When a request is received that falls within the specified address range,
465 * the specified callback is invoked. The parameters passed to the callback
466 * give the details of the particular request.
457 * 467 *
458 * Return value: 0 on success, non-zero otherwise. 468 * Return value: 0 on success, non-zero otherwise.
459 * The start offset of the handler's address region is determined by 469 * The start offset of the handler's address region is determined by
460 * fw_core_add_address_handler() and is returned in handler->offset. 470 * fw_core_add_address_handler() and is returned in handler->offset.
461 * The offset is quadlet-aligned.
462 */ 471 */
463int 472int fw_core_add_address_handler(struct fw_address_handler *handler,
464fw_core_add_address_handler(struct fw_address_handler *handler, 473 const struct fw_address_region *region)
465 const struct fw_address_region *region)
466{ 474{
467 struct fw_address_handler *other; 475 struct fw_address_handler *other;
468 unsigned long flags; 476 unsigned long flags;
469 int ret = -EBUSY; 477 int ret = -EBUSY;
470 478
479 if (region->start & 0xffff000000000003ULL ||
480 region->end & 0xffff000000000003ULL ||
481 region->start >= region->end ||
482 handler->length & 3 ||
483 handler->length == 0)
484 return -EINVAL;
485
471 spin_lock_irqsave(&address_handler_lock, flags); 486 spin_lock_irqsave(&address_handler_lock, flags);
472 487
473 handler->offset = roundup(region->start, 4); 488 handler->offset = region->start;
474 while (handler->offset + handler->length <= region->end) { 489 while (handler->offset + handler->length <= region->end) {
475 other = 490 other =
476 lookup_overlapping_address_handler(&address_handler_list, 491 lookup_overlapping_address_handler(&address_handler_list,
477 handler->offset, 492 handler->offset,
478 handler->length); 493 handler->length);
479 if (other != NULL) { 494 if (other != NULL) {
480 handler->offset = 495 handler->offset += other->length;
481 roundup(other->offset + other->length, 4);
482 } else { 496 } else {
483 list_add_tail(&handler->link, &address_handler_list); 497 list_add_tail(&handler->link, &address_handler_list);
484 ret = 0; 498 ret = 0;
@@ -493,12 +507,7 @@ fw_core_add_address_handler(struct fw_address_handler *handler,
493EXPORT_SYMBOL(fw_core_add_address_handler); 507EXPORT_SYMBOL(fw_core_add_address_handler);
494 508
495/** 509/**
496 * Deallocate a range of addresses allocated with fw_allocate. This 510 * fw_core_remove_address_handler - unregister an address handler
497 * will call the associated callback one last time with a the special
498 * tcode TCODE_DEALLOCATE, to let the client destroy the registered
499 * callback data. For convenience, the callback parameters offset and
500 * length are set to the start and the length respectively for the
501 * deallocated region, payload is set to NULL.
502 */ 511 */
503void fw_core_remove_address_handler(struct fw_address_handler *handler) 512void fw_core_remove_address_handler(struct fw_address_handler *handler)
504{ 513{
@@ -518,9 +527,8 @@ struct fw_request {
518 u32 data[0]; 527 u32 data[0];
519}; 528};
520 529
521static void 530static void free_response_callback(struct fw_packet *packet,
522free_response_callback(struct fw_packet *packet, 531 struct fw_card *card, int status)
523 struct fw_card *card, int status)
524{ 532{
525 struct fw_request *request; 533 struct fw_request *request;
526 534
@@ -528,9 +536,8 @@ free_response_callback(struct fw_packet *packet,
528 kfree(request); 536 kfree(request);
529} 537}
530 538
531void 539void fw_fill_response(struct fw_packet *response, u32 *request_header,
532fw_fill_response(struct fw_packet *response, u32 *request_header, 540 int rcode, void *payload, size_t length)
533 int rcode, void *payload, size_t length)
534{ 541{
535 int tcode, tlabel, extended_tcode, source, destination; 542 int tcode, tlabel, extended_tcode, source, destination;
536 543
@@ -588,8 +595,7 @@ fw_fill_response(struct fw_packet *response, u32 *request_header,
588} 595}
589EXPORT_SYMBOL(fw_fill_response); 596EXPORT_SYMBOL(fw_fill_response);
590 597
591static struct fw_request * 598static struct fw_request *allocate_request(struct fw_packet *p)
592allocate_request(struct fw_packet *p)
593{ 599{
594 struct fw_request *request; 600 struct fw_request *request;
595 u32 *data, length; 601 u32 *data, length;
@@ -649,8 +655,8 @@ allocate_request(struct fw_packet *p)
649 return request; 655 return request;
650} 656}
651 657
652void 658void fw_send_response(struct fw_card *card,
653fw_send_response(struct fw_card *card, struct fw_request *request, int rcode) 659 struct fw_request *request, int rcode)
654{ 660{
655 /* unified transaction or broadcast transaction: don't respond */ 661 /* unified transaction or broadcast transaction: don't respond */
656 if (request->ack != ACK_PENDING || 662 if (request->ack != ACK_PENDING ||
@@ -670,8 +676,7 @@ fw_send_response(struct fw_card *card, struct fw_request *request, int rcode)
670} 676}
671EXPORT_SYMBOL(fw_send_response); 677EXPORT_SYMBOL(fw_send_response);
672 678
673void 679void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
674fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
675{ 680{
676 struct fw_address_handler *handler; 681 struct fw_address_handler *handler;
677 struct fw_request *request; 682 struct fw_request *request;
@@ -719,8 +724,7 @@ fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
719} 724}
720EXPORT_SYMBOL(fw_core_handle_request); 725EXPORT_SYMBOL(fw_core_handle_request);
721 726
722void 727void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
723fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
724{ 728{
725 struct fw_transaction *t; 729 struct fw_transaction *t;
726 unsigned long flags; 730 unsigned long flags;
@@ -793,12 +797,10 @@ static const struct fw_address_region topology_map_region =
793 { .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP, 797 { .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP,
794 .end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, }; 798 .end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, };
795 799
796static void 800static void handle_topology_map(struct fw_card *card, struct fw_request *request,
797handle_topology_map(struct fw_card *card, struct fw_request *request, 801 int tcode, int destination, int source, int generation,
798 int tcode, int destination, int source, 802 int speed, unsigned long long offset,
799 int generation, int speed, 803 void *payload, size_t length, void *callback_data)
800 unsigned long long offset,
801 void *payload, size_t length, void *callback_data)
802{ 804{
803 int i, start, end; 805 int i, start, end;
804 __be32 *map; 806 __be32 *map;
@@ -832,12 +834,10 @@ static const struct fw_address_region registers_region =
832 { .start = CSR_REGISTER_BASE, 834 { .start = CSR_REGISTER_BASE,
833 .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, }; 835 .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, };
834 836
835static void 837static void handle_registers(struct fw_card *card, struct fw_request *request,
836handle_registers(struct fw_card *card, struct fw_request *request, 838 int tcode, int destination, int source, int generation,
837 int tcode, int destination, int source, 839 int speed, unsigned long long offset,
838 int generation, int speed, 840 void *payload, size_t length, void *callback_data)
839 unsigned long long offset,
840 void *payload, size_t length, void *callback_data)
841{ 841{
842 int reg = offset & ~CSR_REGISTER_BASE; 842 int reg = offset & ~CSR_REGISTER_BASE;
843 unsigned long long bus_time; 843 unsigned long long bus_time;
@@ -939,11 +939,11 @@ static struct fw_descriptor model_id_descriptor = {
939 939
940static int __init fw_core_init(void) 940static int __init fw_core_init(void)
941{ 941{
942 int retval; 942 int ret;
943 943
944 retval = bus_register(&fw_bus_type); 944 ret = bus_register(&fw_bus_type);
945 if (retval < 0) 945 if (ret < 0)
946 return retval; 946 return ret;
947 947
948 fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops); 948 fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops);
949 if (fw_cdev_major < 0) { 949 if (fw_cdev_major < 0) {
@@ -951,19 +951,10 @@ static int __init fw_core_init(void)
951 return fw_cdev_major; 951 return fw_cdev_major;
952 } 952 }
953 953
954 retval = fw_core_add_address_handler(&topology_map, 954 fw_core_add_address_handler(&topology_map, &topology_map_region);
955 &topology_map_region); 955 fw_core_add_address_handler(&registers, &registers_region);
956 BUG_ON(retval < 0); 956 fw_core_add_descriptor(&vendor_id_descriptor);
957 957 fw_core_add_descriptor(&model_id_descriptor);
958 retval = fw_core_add_address_handler(&registers,
959 &registers_region);
960 BUG_ON(retval < 0);
961
962 /* Add the vendor textual descriptor. */
963 retval = fw_core_add_descriptor(&vendor_id_descriptor);
964 BUG_ON(retval < 0);
965 retval = fw_core_add_descriptor(&model_id_descriptor);
966 BUG_ON(retval < 0);
967 958
968 return 0; 959 return 0;
969} 960}
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h
index 1d78e9cc5940..dfa799068f89 100644
--- a/drivers/firewire/fw-transaction.h
+++ b/drivers/firewire/fw-transaction.h
@@ -82,14 +82,14 @@
82#define CSR_SPEED_MAP 0x2000 82#define CSR_SPEED_MAP 0x2000
83#define CSR_SPEED_MAP_END 0x3000 83#define CSR_SPEED_MAP_END 0x3000
84 84
85#define BANDWIDTH_AVAILABLE_INITIAL 4915
85#define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31) 86#define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31)
86#define BROADCAST_CHANNEL_VALID (1 << 30) 87#define BROADCAST_CHANNEL_VALID (1 << 30)
87 88
88#define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args) 89#define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args)
89#define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args) 90#define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
90 91
91static inline void 92static inline void fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
92fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
93{ 93{
94 u32 *dst = _dst; 94 u32 *dst = _dst;
95 __be32 *src = _src; 95 __be32 *src = _src;
@@ -99,8 +99,7 @@ fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
99 dst[i] = be32_to_cpu(src[i]); 99 dst[i] = be32_to_cpu(src[i]);
100} 100}
101 101
102static inline void 102static inline void fw_memcpy_to_be32(void *_dst, void *_src, size_t size)
103fw_memcpy_to_be32(void *_dst, void *_src, size_t size)
104{ 103{
105 fw_memcpy_from_be32(_dst, _src, size); 104 fw_memcpy_from_be32(_dst, _src, size);
106} 105}
@@ -125,8 +124,7 @@ typedef void (*fw_packet_callback_t)(struct fw_packet *packet,
125 struct fw_card *card, int status); 124 struct fw_card *card, int status);
126 125
127typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode, 126typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
128 void *data, 127 void *data, size_t length,
129 size_t length,
130 void *callback_data); 128 void *callback_data);
131 129
132/* 130/*
@@ -141,12 +139,6 @@ typedef void (*fw_address_callback_t)(struct fw_card *card,
141 void *data, size_t length, 139 void *data, size_t length,
142 void *callback_data); 140 void *callback_data);
143 141
144typedef void (*fw_bus_reset_callback_t)(struct fw_card *handle,
145 int node_id, int generation,
146 u32 *self_ids,
147 int self_id_count,
148 void *callback_data);
149
150struct fw_packet { 142struct fw_packet {
151 int speed; 143 int speed;
152 int generation; 144 int generation;
@@ -187,12 +179,6 @@ struct fw_transaction {
187 void *callback_data; 179 void *callback_data;
188}; 180};
189 181
190static inline struct fw_packet *
191fw_packet(struct list_head *l)
192{
193 return list_entry(l, struct fw_packet, link);
194}
195
196struct fw_address_handler { 182struct fw_address_handler {
197 u64 offset; 183 u64 offset;
198 size_t length; 184 size_t length;
@@ -201,7 +187,6 @@ struct fw_address_handler {
201 struct list_head link; 187 struct list_head link;
202}; 188};
203 189
204
205struct fw_address_region { 190struct fw_address_region {
206 u64 start; 191 u64 start;
207 u64 end; 192 u64 end;
@@ -255,6 +240,7 @@ struct fw_card {
255 int bm_retries; 240 int bm_retries;
256 int bm_generation; 241 int bm_generation;
257 242
243 bool broadcast_channel_allocated;
258 u32 broadcast_channel; 244 u32 broadcast_channel;
259 u32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4]; 245 u32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
260}; 246};
@@ -315,10 +301,8 @@ struct fw_iso_packet {
315struct fw_iso_context; 301struct fw_iso_context;
316 302
317typedef void (*fw_iso_callback_t)(struct fw_iso_context *context, 303typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
318 u32 cycle, 304 u32 cycle, size_t header_length,
319 size_t header_length, 305 void *header, void *data);
320 void *header,
321 void *data);
322 306
323/* 307/*
324 * An iso buffer is just a set of pages mapped for DMA in the 308 * An iso buffer is just a set of pages mapped for DMA in the
@@ -344,36 +328,25 @@ struct fw_iso_context {
344 void *callback_data; 328 void *callback_data;
345}; 329};
346 330
347int 331int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
348fw_iso_buffer_init(struct fw_iso_buffer *buffer, 332 int page_count, enum dma_data_direction direction);
349 struct fw_card *card, 333int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
350 int page_count, 334void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
351 enum dma_data_direction direction); 335
352int 336struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
353fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma); 337 int type, int channel, int speed, size_t header_size,
354void 338 fw_iso_callback_t callback, void *callback_data);
355fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card); 339int fw_iso_context_queue(struct fw_iso_context *ctx,
356 340 struct fw_iso_packet *packet,
357struct fw_iso_context * 341 struct fw_iso_buffer *buffer,
358fw_iso_context_create(struct fw_card *card, int type, 342 unsigned long payload);
359 int channel, int speed, size_t header_size, 343int fw_iso_context_start(struct fw_iso_context *ctx,
360 fw_iso_callback_t callback, void *callback_data); 344 int cycle, int sync, int tags);
361 345int fw_iso_context_stop(struct fw_iso_context *ctx);
362void 346void fw_iso_context_destroy(struct fw_iso_context *ctx);
363fw_iso_context_destroy(struct fw_iso_context *ctx); 347
364 348void fw_iso_resource_manage(struct fw_card *card, int generation,
365int 349 u64 channels_mask, int *channel, int *bandwidth, bool allocate);
366fw_iso_context_queue(struct fw_iso_context *ctx,
367 struct fw_iso_packet *packet,
368 struct fw_iso_buffer *buffer,
369 unsigned long payload);
370
371int
372fw_iso_context_start(struct fw_iso_context *ctx,
373 int cycle, int sync, int tags);
374
375int
376fw_iso_context_stop(struct fw_iso_context *ctx);
377 350
378struct fw_card_driver { 351struct fw_card_driver {
379 /* 352 /*
@@ -415,7 +388,7 @@ struct fw_card_driver {
415 388
416 struct fw_iso_context * 389 struct fw_iso_context *
417 (*allocate_iso_context)(struct fw_card *card, 390 (*allocate_iso_context)(struct fw_card *card,
418 int type, size_t header_size); 391 int type, int channel, size_t header_size);
419 void (*free_iso_context)(struct fw_iso_context *ctx); 392 void (*free_iso_context)(struct fw_iso_context *ctx);
420 393
421 int (*start_iso)(struct fw_iso_context *ctx, 394 int (*start_iso)(struct fw_iso_context *ctx,
@@ -429,54 +402,45 @@ struct fw_card_driver {
429 int (*stop_iso)(struct fw_iso_context *ctx); 402 int (*stop_iso)(struct fw_iso_context *ctx);
430}; 403};
431 404
432int 405int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
433fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
434 406
435void 407void fw_send_request(struct fw_card *card, struct fw_transaction *t,
436fw_send_request(struct fw_card *card, struct fw_transaction *t,
437 int tcode, int destination_id, int generation, int speed, 408 int tcode, int destination_id, int generation, int speed,
438 unsigned long long offset, void *data, size_t length, 409 unsigned long long offset, void *payload, size_t length,
439 fw_transaction_callback_t callback, void *callback_data); 410 fw_transaction_callback_t callback, void *callback_data);
440
441int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
442 int generation, int speed, unsigned long long offset,
443 void *data, size_t length);
444
445int fw_cancel_transaction(struct fw_card *card, 411int fw_cancel_transaction(struct fw_card *card,
446 struct fw_transaction *transaction); 412 struct fw_transaction *transaction);
447
448void fw_flush_transactions(struct fw_card *card); 413void fw_flush_transactions(struct fw_card *card);
449 414int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
415 int generation, int speed, unsigned long long offset,
416 void *payload, size_t length);
450void fw_send_phy_config(struct fw_card *card, 417void fw_send_phy_config(struct fw_card *card,
451 int node_id, int generation, int gap_count); 418 int node_id, int generation, int gap_count);
452 419
420static inline int fw_stream_packet_destination_id(int tag, int channel, int sy)
421{
422 return tag << 14 | channel << 8 | sy;
423}
424
453/* 425/*
454 * Called by the topology code to inform the device code of node 426 * Called by the topology code to inform the device code of node
455 * activity; found, lost, or updated nodes. 427 * activity; found, lost, or updated nodes.
456 */ 428 */
457void 429void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
458fw_node_event(struct fw_card *card, struct fw_node *node, int event);
459 430
460/* API used by card level drivers */ 431/* API used by card level drivers */
461 432
462void 433void fw_card_initialize(struct fw_card *card,
463fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver, 434 const struct fw_card_driver *driver, struct device *device);
464 struct device *device); 435int fw_card_add(struct fw_card *card,
465int 436 u32 max_receive, u32 link_speed, u64 guid);
466fw_card_add(struct fw_card *card, 437void fw_core_remove_card(struct fw_card *card);
467 u32 max_receive, u32 link_speed, u64 guid); 438void fw_core_handle_bus_reset(struct fw_card *card, int node_id,
468 439 int generation, int self_id_count, u32 *self_ids);
469void 440void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
470fw_core_remove_card(struct fw_card *card); 441void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
471 442
472void 443extern int fw_irm_set_broadcast_channel_register(struct device *dev,
473fw_core_handle_bus_reset(struct fw_card *card, 444 void *data);
474 int node_id, int generation,
475 int self_id_count, u32 *self_ids);
476void
477fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
478
479void
480fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
481 445
482#endif /* __fw_transaction_h */ 446#endif /* __fw_transaction_h */
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 30022c4a5c12..4ec5061fa584 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -10,7 +10,8 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
10 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ 10 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
11 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ 11 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
12 drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \ 12 drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
13 drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o 13 drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o \
14 drm_info.o drm_debugfs.o
14 15
15drm-$(CONFIG_COMPAT) += drm_ioc32.o 16drm-$(CONFIG_COMPAT) += drm_ioc32.o
16 17
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
new file mode 100644
index 000000000000..c77c6c6d9d2c
--- /dev/null
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -0,0 +1,235 @@
1/**
2 * \file drm_debugfs.c
3 * debugfs support for DRM
4 *
5 * \author Ben Gamari <bgamari@gmail.com>
6 */
7
8/*
9 * Created: Sun Dec 21 13:08:50 2008 by bgamari@gmail.com
10 *
11 * Copyright 2008 Ben Gamari <bgamari@gmail.com>
12 *
13 * Permission is hereby granted, free of charge, to any person obtaining a
14 * copy of this software and associated documentation files (the "Software"),
15 * to deal in the Software without restriction, including without limitation
16 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17 * and/or sell copies of the Software, and to permit persons to whom the
18 * Software is furnished to do so, subject to the following conditions:
19 *
20 * The above copyright notice and this permission notice (including the next
21 * paragraph) shall be included in all copies or substantial portions of the
22 * Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
27 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
28 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
29 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
30 * OTHER DEALINGS IN THE SOFTWARE.
31 */
32
33#include <linux/debugfs.h>
34#include <linux/seq_file.h>
35#include "drmP.h"
36
37#if defined(CONFIG_DEBUG_FS)
38
39/***************************************************
40 * Initialization, etc.
41 **************************************************/
42
43static struct drm_info_list drm_debugfs_list[] = {
44 {"name", drm_name_info, 0},
45 {"vm", drm_vm_info, 0},
46 {"clients", drm_clients_info, 0},
47 {"queues", drm_queues_info, 0},
48 {"bufs", drm_bufs_info, 0},
49 {"gem_names", drm_gem_name_info, DRIVER_GEM},
50 {"gem_objects", drm_gem_object_info, DRIVER_GEM},
51#if DRM_DEBUG_CODE
52 {"vma", drm_vma_info, 0},
53#endif
54};
55#define DRM_DEBUGFS_ENTRIES ARRAY_SIZE(drm_debugfs_list)
56
57
58static int drm_debugfs_open(struct inode *inode, struct file *file)
59{
60 struct drm_info_node *node = inode->i_private;
61
62 return single_open(file, node->info_ent->show, node);
63}
64
65
66static const struct file_operations drm_debugfs_fops = {
67 .owner = THIS_MODULE,
68 .open = drm_debugfs_open,
69 .read = seq_read,
70 .llseek = seq_lseek,
71 .release = single_release,
72};
73
74
75/**
76 * Initialize a given set of debugfs files for a device
77 *
78 * \param files The array of files to create
79 * \param count The number of files given
80 * \param root DRI debugfs dir entry.
81 * \param minor device minor number
82 * \return Zero on success, non-zero on failure
83 *
84 * Create a given set of debugfs files represented by an array of
85 * gdm_debugfs_lists in the given root directory.
86 */
87int drm_debugfs_create_files(struct drm_info_list *files, int count,
88 struct dentry *root, struct drm_minor *minor)
89{
90 struct drm_device *dev = minor->dev;
91 struct dentry *ent;
92 struct drm_info_node *tmp;
93 char name[64];
94 int i, ret;
95
96 for (i = 0; i < count; i++) {
97 u32 features = files[i].driver_features;
98
99 if (features != 0 &&
100 (dev->driver->driver_features & features) != features)
101 continue;
102
103 tmp = drm_alloc(sizeof(struct drm_info_node),
104 _DRM_DRIVER);
105 ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO,
106 root, tmp, &drm_debugfs_fops);
107 if (!ent) {
108 DRM_ERROR("Cannot create /debugfs/dri/%s/%s\n",
109 name, files[i].name);
110 drm_free(tmp, sizeof(struct drm_info_node),
111 _DRM_DRIVER);
112 ret = -1;
113 goto fail;
114 }
115
116 tmp->minor = minor;
117 tmp->dent = ent;
118 tmp->info_ent = &files[i];
119 list_add(&(tmp->list), &(minor->debugfs_nodes.list));
120 }
121 return 0;
122
123fail:
124 drm_debugfs_remove_files(files, count, minor);
125 return ret;
126}
127EXPORT_SYMBOL(drm_debugfs_create_files);
128
129/**
130 * Initialize the DRI debugfs filesystem for a device
131 *
132 * \param dev DRM device
133 * \param minor device minor number
134 * \param root DRI debugfs dir entry.
135 *
136 * Create the DRI debugfs root entry "/debugfs/dri", the device debugfs root entry
137 * "/debugfs/dri/%minor%/", and each entry in debugfs_list as
138 * "/debugfs/dri/%minor%/%name%".
139 */
140int drm_debugfs_init(struct drm_minor *minor, int minor_id,
141 struct dentry *root)
142{
143 struct drm_device *dev = minor->dev;
144 char name[64];
145 int ret;
146
147 INIT_LIST_HEAD(&minor->debugfs_nodes.list);
148 sprintf(name, "%d", minor_id);
149 minor->debugfs_root = debugfs_create_dir(name, root);
150 if (!minor->debugfs_root) {
151 DRM_ERROR("Cannot create /debugfs/dri/%s\n", name);
152 return -1;
153 }
154
155 ret = drm_debugfs_create_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES,
156 minor->debugfs_root, minor);
157 if (ret) {
158 debugfs_remove(minor->debugfs_root);
159 minor->debugfs_root = NULL;
160 DRM_ERROR("Failed to create core drm debugfs files\n");
161 return ret;
162 }
163
164 if (dev->driver->debugfs_init) {
165 ret = dev->driver->debugfs_init(minor);
166 if (ret) {
167 DRM_ERROR("DRM: Driver failed to initialize "
168 "/debugfs/dri.\n");
169 return ret;
170 }
171 }
172 return 0;
173}
174
175
176/**
177 * Remove a list of debugfs files
178 *
179 * \param files The list of files
180 * \param count The number of files
181 * \param minor The minor of which we should remove the files
182 * \return always zero.
183 *
184 * Remove all debugfs entries created by debugfs_init().
185 */
186int drm_debugfs_remove_files(struct drm_info_list *files, int count,
187 struct drm_minor *minor)
188{
189 struct list_head *pos, *q;
190 struct drm_info_node *tmp;
191 int i;
192
193 for (i = 0; i < count; i++) {
194 list_for_each_safe(pos, q, &minor->debugfs_nodes.list) {
195 tmp = list_entry(pos, struct drm_info_node, list);
196 if (tmp->info_ent == &files[i]) {
197 debugfs_remove(tmp->dent);
198 list_del(pos);
199 drm_free(tmp, sizeof(struct drm_info_node),
200 _DRM_DRIVER);
201 }
202 }
203 }
204 return 0;
205}
206EXPORT_SYMBOL(drm_debugfs_remove_files);
207
208/**
209 * Cleanup the debugfs filesystem resources.
210 *
211 * \param minor device minor number.
212 * \return always zero.
213 *
214 * Remove all debugfs entries created by debugfs_init().
215 */
216int drm_debugfs_cleanup(struct drm_minor *minor)
217{
218 struct drm_device *dev = minor->dev;
219
220 if (!minor->debugfs_root)
221 return 0;
222
223 if (dev->driver->debugfs_cleanup)
224 dev->driver->debugfs_cleanup(minor);
225
226 drm_debugfs_remove_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES, minor);
227
228 debugfs_remove(minor->debugfs_root);
229 minor->debugfs_root = NULL;
230
231 return 0;
232}
233
234#endif /* CONFIG_DEBUG_FS */
235
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 14c7a23dc157..ed32edb17166 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -46,9 +46,11 @@
46 * OTHER DEALINGS IN THE SOFTWARE. 46 * OTHER DEALINGS IN THE SOFTWARE.
47 */ 47 */
48 48
49#include <linux/debugfs.h>
49#include "drmP.h" 50#include "drmP.h"
50#include "drm_core.h" 51#include "drm_core.h"
51 52
53
52static int drm_version(struct drm_device *dev, void *data, 54static int drm_version(struct drm_device *dev, void *data,
53 struct drm_file *file_priv); 55 struct drm_file *file_priv);
54 56
@@ -178,7 +180,7 @@ int drm_lastclose(struct drm_device * dev)
178 180
179 /* Clear AGP information */ 181 /* Clear AGP information */
180 if (drm_core_has_AGP(dev) && dev->agp && 182 if (drm_core_has_AGP(dev) && dev->agp &&
181 !drm_core_check_feature(dev, DRIVER_MODESET)) { 183 !drm_core_check_feature(dev, DRIVER_MODESET)) {
182 struct drm_agp_mem *entry, *tempe; 184 struct drm_agp_mem *entry, *tempe;
183 185
184 /* Remove AGP resources, but leave dev->agp 186 /* Remove AGP resources, but leave dev->agp
@@ -382,6 +384,13 @@ static int __init drm_core_init(void)
382 goto err_p3; 384 goto err_p3;
383 } 385 }
384 386
387 drm_debugfs_root = debugfs_create_dir("dri", NULL);
388 if (!drm_debugfs_root) {
389 DRM_ERROR("Cannot create /debugfs/dri\n");
390 ret = -1;
391 goto err_p3;
392 }
393
385 drm_mem_init(); 394 drm_mem_init();
386 395
387 DRM_INFO("Initialized %s %d.%d.%d %s\n", 396 DRM_INFO("Initialized %s %d.%d.%d %s\n",
@@ -400,6 +409,7 @@ err_p1:
400static void __exit drm_core_exit(void) 409static void __exit drm_core_exit(void)
401{ 410{
402 remove_proc_entry("dri", NULL); 411 remove_proc_entry("dri", NULL);
412 debugfs_remove(drm_debugfs_root);
403 drm_sysfs_destroy(); 413 drm_sysfs_destroy();
404 414
405 unregister_chrdev(DRM_MAJOR, "drm"); 415 unregister_chrdev(DRM_MAJOR, "drm");
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
new file mode 100644
index 000000000000..fc98952b9033
--- /dev/null
+++ b/drivers/gpu/drm/drm_info.c
@@ -0,0 +1,328 @@
1/**
2 * \file drm_info.c
3 * DRM info file implementations
4 *
5 * \author Ben Gamari <bgamari@gmail.com>
6 */
7
8/*
9 * Created: Sun Dec 21 13:09:50 2008 by bgamari@gmail.com
10 *
11 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
12 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
13 * Copyright 2008 Ben Gamari <bgamari@gmail.com>
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36#include <linux/seq_file.h>
37#include "drmP.h"
38
39/**
40 * Called when "/proc/dri/.../name" is read.
41 *
42 * Prints the device name together with the bus id if available.
43 */
44int drm_name_info(struct seq_file *m, void *data)
45{
46 struct drm_info_node *node = (struct drm_info_node *) m->private;
47 struct drm_minor *minor = node->minor;
48 struct drm_device *dev = minor->dev;
49 struct drm_master *master = minor->master;
50
51 if (!master)
52 return 0;
53
54 if (master->unique) {
55 seq_printf(m, "%s %s %s\n",
56 dev->driver->pci_driver.name,
57 pci_name(dev->pdev), master->unique);
58 } else {
59 seq_printf(m, "%s %s\n", dev->driver->pci_driver.name,
60 pci_name(dev->pdev));
61 }
62
63 return 0;
64}
65
66/**
67 * Called when "/proc/dri/.../vm" is read.
68 *
69 * Prints information about all mappings in drm_device::maplist.
70 */
71int drm_vm_info(struct seq_file *m, void *data)
72{
73 struct drm_info_node *node = (struct drm_info_node *) m->private;
74 struct drm_device *dev = node->minor->dev;
75 struct drm_map *map;
76 struct drm_map_list *r_list;
77
78 /* Hardcoded from _DRM_FRAME_BUFFER,
79 _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
80 _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
81 const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
82 const char *type;
83 int i;
84
85 mutex_lock(&dev->struct_mutex);
86 seq_printf(m, "slot offset size type flags address mtrr\n\n");
87 i = 0;
88 list_for_each_entry(r_list, &dev->maplist, head) {
89 map = r_list->map;
90 if (!map)
91 continue;
92 if (map->type < 0 || map->type > 5)
93 type = "??";
94 else
95 type = types[map->type];
96
97 seq_printf(m, "%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
98 i,
99 map->offset,
100 map->size, type, map->flags,
101 (unsigned long) r_list->user_token);
102 if (map->mtrr < 0)
103 seq_printf(m, "none\n");
104 else
105 seq_printf(m, "%4d\n", map->mtrr);
106 i++;
107 }
108 mutex_unlock(&dev->struct_mutex);
109 return 0;
110}
111
112/**
113 * Called when "/proc/dri/.../queues" is read.
114 */
115int drm_queues_info(struct seq_file *m, void *data)
116{
117 struct drm_info_node *node = (struct drm_info_node *) m->private;
118 struct drm_device *dev = node->minor->dev;
119 int i;
120 struct drm_queue *q;
121
122 mutex_lock(&dev->struct_mutex);
123 seq_printf(m, " ctx/flags use fin"
124 " blk/rw/rwf wait flushed queued"
125 " locks\n\n");
126 for (i = 0; i < dev->queue_count; i++) {
127 q = dev->queuelist[i];
128 atomic_inc(&q->use_count);
129 seq_printf(m, "%5d/0x%03x %5d %5d"
130 " %5d/%c%c/%c%c%c %5Zd\n",
131 i,
132 q->flags,
133 atomic_read(&q->use_count),
134 atomic_read(&q->finalization),
135 atomic_read(&q->block_count),
136 atomic_read(&q->block_read) ? 'r' : '-',
137 atomic_read(&q->block_write) ? 'w' : '-',
138 waitqueue_active(&q->read_queue) ? 'r' : '-',
139 waitqueue_active(&q->write_queue) ? 'w' : '-',
140 waitqueue_active(&q->flush_queue) ? 'f' : '-',
141 DRM_BUFCOUNT(&q->waitlist));
142 atomic_dec(&q->use_count);
143 }
144 mutex_unlock(&dev->struct_mutex);
145 return 0;
146}
147
148/**
149 * Called when "/proc/dri/.../bufs" is read.
150 */
151int drm_bufs_info(struct seq_file *m, void *data)
152{
153 struct drm_info_node *node = (struct drm_info_node *) m->private;
154 struct drm_device *dev = node->minor->dev;
155 struct drm_device_dma *dma;
156 int i, seg_pages;
157
158 mutex_lock(&dev->struct_mutex);
159 dma = dev->dma;
160 if (!dma) {
161 mutex_unlock(&dev->struct_mutex);
162 return 0;
163 }
164
165 seq_printf(m, " o size count free segs pages kB\n\n");
166 for (i = 0; i <= DRM_MAX_ORDER; i++) {
167 if (dma->bufs[i].buf_count) {
168 seg_pages = dma->bufs[i].seg_count * (1 << dma->bufs[i].page_order);
169 seq_printf(m, "%2d %8d %5d %5d %5d %5d %5ld\n",
170 i,
171 dma->bufs[i].buf_size,
172 dma->bufs[i].buf_count,
173 atomic_read(&dma->bufs[i].freelist.count),
174 dma->bufs[i].seg_count,
175 seg_pages,
176 seg_pages * PAGE_SIZE / 1024);
177 }
178 }
179 seq_printf(m, "\n");
180 for (i = 0; i < dma->buf_count; i++) {
181 if (i && !(i % 32))
182 seq_printf(m, "\n");
183 seq_printf(m, " %d", dma->buflist[i]->list);
184 }
185 seq_printf(m, "\n");
186 mutex_unlock(&dev->struct_mutex);
187 return 0;
188}
189
190/**
191 * Called when "/proc/dri/.../vblank" is read.
192 */
193int drm_vblank_info(struct seq_file *m, void *data)
194{
195 struct drm_info_node *node = (struct drm_info_node *) m->private;
196 struct drm_device *dev = node->minor->dev;
197 int crtc;
198
199 mutex_lock(&dev->struct_mutex);
200 for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
201 seq_printf(m, "CRTC %d enable: %d\n",
202 crtc, atomic_read(&dev->vblank_refcount[crtc]));
203 seq_printf(m, "CRTC %d counter: %d\n",
204 crtc, drm_vblank_count(dev, crtc));
205 seq_printf(m, "CRTC %d last wait: %d\n",
206 crtc, dev->last_vblank_wait[crtc]);
207 seq_printf(m, "CRTC %d in modeset: %d\n",
208 crtc, dev->vblank_inmodeset[crtc]);
209 }
210 mutex_unlock(&dev->struct_mutex);
211 return 0;
212}
213
214/**
215 * Called when "/proc/dri/.../clients" is read.
216 *
217 */
218int drm_clients_info(struct seq_file *m, void *data)
219{
220 struct drm_info_node *node = (struct drm_info_node *) m->private;
221 struct drm_device *dev = node->minor->dev;
222 struct drm_file *priv;
223
224 mutex_lock(&dev->struct_mutex);
225 seq_printf(m, "a dev pid uid magic ioctls\n\n");
226 list_for_each_entry(priv, &dev->filelist, lhead) {
227 seq_printf(m, "%c %3d %5d %5d %10u %10lu\n",
228 priv->authenticated ? 'y' : 'n',
229 priv->minor->index,
230 priv->pid,
231 priv->uid, priv->magic, priv->ioctl_count);
232 }
233 mutex_unlock(&dev->struct_mutex);
234 return 0;
235}
236
237
238int drm_gem_one_name_info(int id, void *ptr, void *data)
239{
240 struct drm_gem_object *obj = ptr;
241 struct seq_file *m = data;
242
243 seq_printf(m, "name %d size %zd\n", obj->name, obj->size);
244
245 seq_printf(m, "%6d %8zd %7d %8d\n",
246 obj->name, obj->size,
247 atomic_read(&obj->handlecount.refcount),
248 atomic_read(&obj->refcount.refcount));
249 return 0;
250}
251
252int drm_gem_name_info(struct seq_file *m, void *data)
253{
254 struct drm_info_node *node = (struct drm_info_node *) m->private;
255 struct drm_device *dev = node->minor->dev;
256
257 seq_printf(m, " name size handles refcount\n");
258 idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m);
259 return 0;
260}
261
262int drm_gem_object_info(struct seq_file *m, void* data)
263{
264 struct drm_info_node *node = (struct drm_info_node *) m->private;
265 struct drm_device *dev = node->minor->dev;
266
267 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
268 seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
269 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
270 seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
271 seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
272 seq_printf(m, "%d gtt total\n", dev->gtt_total);
273 return 0;
274}
275
276#if DRM_DEBUG_CODE
277
278int drm_vma_info(struct seq_file *m, void *data)
279{
280 struct drm_info_node *node = (struct drm_info_node *) m->private;
281 struct drm_device *dev = node->minor->dev;
282 struct drm_vma_entry *pt;
283 struct vm_area_struct *vma;
284#if defined(__i386__)
285 unsigned int pgprot;
286#endif
287
288 mutex_lock(&dev->struct_mutex);
289 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08lx\n",
290 atomic_read(&dev->vma_count),
291 high_memory, virt_to_phys(high_memory));
292
293 list_for_each_entry(pt, &dev->vmalist, head) {
294 vma = pt->vma;
295 if (!vma)
296 continue;
297 seq_printf(m,
298 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
299 pt->pid, vma->vm_start, vma->vm_end,
300 vma->vm_flags & VM_READ ? 'r' : '-',
301 vma->vm_flags & VM_WRITE ? 'w' : '-',
302 vma->vm_flags & VM_EXEC ? 'x' : '-',
303 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
304 vma->vm_flags & VM_LOCKED ? 'l' : '-',
305 vma->vm_flags & VM_IO ? 'i' : '-',
306 vma->vm_pgoff);
307
308#if defined(__i386__)
309 pgprot = pgprot_val(vma->vm_page_prot);
310 seq_printf(m, " %c%c%c%c%c%c%c%c%c",
311 pgprot & _PAGE_PRESENT ? 'p' : '-',
312 pgprot & _PAGE_RW ? 'w' : 'r',
313 pgprot & _PAGE_USER ? 'u' : 's',
314 pgprot & _PAGE_PWT ? 't' : 'b',
315 pgprot & _PAGE_PCD ? 'u' : 'c',
316 pgprot & _PAGE_ACCESSED ? 'a' : '-',
317 pgprot & _PAGE_DIRTY ? 'd' : '-',
318 pgprot & _PAGE_PSE ? 'm' : 'k',
319 pgprot & _PAGE_GLOBAL ? 'g' : 'l');
320#endif
321 seq_printf(m, "\n");
322 }
323 mutex_unlock(&dev->struct_mutex);
324 return 0;
325}
326
327#endif
328
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
index 8df849f66830..9b3c5af61e98 100644
--- a/drivers/gpu/drm/drm_proc.c
+++ b/drivers/gpu/drm/drm_proc.c
@@ -37,697 +37,196 @@
37 * OTHER DEALINGS IN THE SOFTWARE. 37 * OTHER DEALINGS IN THE SOFTWARE.
38 */ 38 */
39 39
40#include <linux/seq_file.h>
40#include "drmP.h" 41#include "drmP.h"
41 42
42static int drm_name_info(char *buf, char **start, off_t offset, 43
43 int request, int *eof, void *data); 44/***************************************************
44static int drm_vm_info(char *buf, char **start, off_t offset, 45 * Initialization, etc.
45 int request, int *eof, void *data); 46 **************************************************/
46static int drm_clients_info(char *buf, char **start, off_t offset,
47 int request, int *eof, void *data);
48static int drm_queues_info(char *buf, char **start, off_t offset,
49 int request, int *eof, void *data);
50static int drm_bufs_info(char *buf, char **start, off_t offset,
51 int request, int *eof, void *data);
52static int drm_vblank_info(char *buf, char **start, off_t offset,
53 int request, int *eof, void *data);
54static int drm_gem_name_info(char *buf, char **start, off_t offset,
55 int request, int *eof, void *data);
56static int drm_gem_object_info(char *buf, char **start, off_t offset,
57 int request, int *eof, void *data);
58#if DRM_DEBUG_CODE
59static int drm_vma_info(char *buf, char **start, off_t offset,
60 int request, int *eof, void *data);
61#endif
62 47
63/** 48/**
64 * Proc file list. 49 * Proc file list.
65 */ 50 */
66static struct drm_proc_list { 51static struct drm_info_list drm_proc_list[] = {
67 const char *name; /**< file name */
68 int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/
69 u32 driver_features; /**< Required driver features for this entry */
70} drm_proc_list[] = {
71 {"name", drm_name_info, 0}, 52 {"name", drm_name_info, 0},
72 {"mem", drm_mem_info, 0},
73 {"vm", drm_vm_info, 0}, 53 {"vm", drm_vm_info, 0},
74 {"clients", drm_clients_info, 0}, 54 {"clients", drm_clients_info, 0},
75 {"queues", drm_queues_info, 0}, 55 {"queues", drm_queues_info, 0},
76 {"bufs", drm_bufs_info, 0}, 56 {"bufs", drm_bufs_info, 0},
77 {"vblank", drm_vblank_info, 0},
78 {"gem_names", drm_gem_name_info, DRIVER_GEM}, 57 {"gem_names", drm_gem_name_info, DRIVER_GEM},
79 {"gem_objects", drm_gem_object_info, DRIVER_GEM}, 58 {"gem_objects", drm_gem_object_info, DRIVER_GEM},
80#if DRM_DEBUG_CODE 59#if DRM_DEBUG_CODE
81 {"vma", drm_vma_info}, 60 {"vma", drm_vma_info, 0},
82#endif 61#endif
83}; 62};
84
85#define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list) 63#define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list)
86 64
65static int drm_proc_open(struct inode *inode, struct file *file)
66{
67 struct drm_info_node* node = PDE(inode)->data;
68
69 return single_open(file, node->info_ent->show, node);
70}
71
72static const struct file_operations drm_proc_fops = {
73 .owner = THIS_MODULE,
74 .open = drm_proc_open,
75 .read = seq_read,
76 .llseek = seq_lseek,
77 .release = single_release,
78};
79
80
87/** 81/**
88 * Initialize the DRI proc filesystem for a device. 82 * Initialize a given set of proc files for a device
89 * 83 *
90 * \param dev DRM device. 84 * \param files The array of files to create
91 * \param minor device minor number. 85 * \param count The number of files given
92 * \param root DRI proc dir entry. 86 * \param root DRI proc dir entry.
93 * \param dev_root resulting DRI device proc dir entry. 87 * \param minor device minor number
94 * \return root entry pointer on success, or NULL on failure. 88 * \return Zero on success, non-zero on failure
95 * 89 *
96 * Create the DRI proc root entry "/proc/dri", the device proc root entry 90 * Create a given set of proc files represented by an array of
97 * "/proc/dri/%minor%/", and each entry in proc_list as 91 * gdm_proc_lists in the given root directory.
98 * "/proc/dri/%minor%/%name%".
99 */ 92 */
100int drm_proc_init(struct drm_minor *minor, int minor_id, 93int drm_proc_create_files(struct drm_info_list *files, int count,
101 struct proc_dir_entry *root) 94 struct proc_dir_entry *root, struct drm_minor *minor)
102{ 95{
103 struct drm_device *dev = minor->dev; 96 struct drm_device *dev = minor->dev;
104 struct proc_dir_entry *ent; 97 struct proc_dir_entry *ent;
105 int i, j, ret; 98 struct drm_info_node *tmp;
106 char name[64]; 99 char name[64];
100 int i, ret;
107 101
108 sprintf(name, "%d", minor_id); 102 for (i = 0; i < count; i++) {
109 minor->dev_root = proc_mkdir(name, root); 103 u32 features = files[i].driver_features;
110 if (!minor->dev_root) {
111 DRM_ERROR("Cannot create /proc/dri/%s\n", name);
112 return -1;
113 }
114
115 for (i = 0; i < DRM_PROC_ENTRIES; i++) {
116 u32 features = drm_proc_list[i].driver_features;
117 104
118 if (features != 0 && 105 if (features != 0 &&
119 (dev->driver->driver_features & features) != features) 106 (dev->driver->driver_features & features) != features)
120 continue; 107 continue;
121 108
122 ent = create_proc_entry(drm_proc_list[i].name, 109 tmp = drm_alloc(sizeof(struct drm_info_node), _DRM_DRIVER);
123 S_IFREG | S_IRUGO, minor->dev_root); 110 ent = create_proc_entry(files[i].name, S_IFREG | S_IRUGO, root);
124 if (!ent) { 111 if (!ent) {
125 DRM_ERROR("Cannot create /proc/dri/%s/%s\n", 112 DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
126 name, drm_proc_list[i].name); 113 name, files[i].name);
114 drm_free(tmp, sizeof(struct drm_info_node),
115 _DRM_DRIVER);
127 ret = -1; 116 ret = -1;
128 goto fail; 117 goto fail;
129 } 118 }
130 ent->read_proc = drm_proc_list[i].f;
131 ent->data = minor;
132 }
133 119
134 if (dev->driver->proc_init) { 120 ent->proc_fops = &drm_proc_fops;
135 ret = dev->driver->proc_init(minor); 121 ent->data = tmp;
136 if (ret) { 122 tmp->minor = minor;
137 DRM_ERROR("DRM: Driver failed to initialize " 123 tmp->info_ent = &files[i];
138 "/proc/dri.\n"); 124 list_add(&(tmp->list), &(minor->proc_nodes.list));
139 goto fail;
140 }
141 } 125 }
142
143 return 0; 126 return 0;
144 fail:
145 127
146 for (j = 0; j < i; j++) 128fail:
147 remove_proc_entry(drm_proc_list[i].name, 129 for (i = 0; i < count; i++)
148 minor->dev_root); 130 remove_proc_entry(drm_proc_list[i].name, minor->proc_root);
149 remove_proc_entry(name, root);
150 minor->dev_root = NULL;
151 return ret; 131 return ret;
152} 132}
153 133
154/** 134/**
155 * Cleanup the proc filesystem resources. 135 * Initialize the DRI proc filesystem for a device
156 * 136 *
157 * \param minor device minor number. 137 * \param dev DRM device
138 * \param minor device minor number
158 * \param root DRI proc dir entry. 139 * \param root DRI proc dir entry.
159 * \param dev_root DRI device proc dir entry. 140 * \param dev_root resulting DRI device proc dir entry.
160 * \return always zero. 141 * \return root entry pointer on success, or NULL on failure.
161 * 142 *
162 * Remove all proc entries created by proc_init(). 143 * Create the DRI proc root entry "/proc/dri", the device proc root entry
144 * "/proc/dri/%minor%/", and each entry in proc_list as
145 * "/proc/dri/%minor%/%name%".
163 */ 146 */
164int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root) 147int drm_proc_init(struct drm_minor *minor, int minor_id,
148 struct proc_dir_entry *root)
165{ 149{
166 struct drm_device *dev = minor->dev; 150 struct drm_device *dev = minor->dev;
167 int i;
168 char name[64]; 151 char name[64];
152 int ret;
169 153
170 if (!root || !minor->dev_root) 154 INIT_LIST_HEAD(&minor->proc_nodes.list);
171 return 0; 155 sprintf(name, "%d", minor_id);
172 156 minor->proc_root = proc_mkdir(name, root);
173 if (dev->driver->proc_cleanup) 157 if (!minor->proc_root) {
174 dev->driver->proc_cleanup(minor); 158 DRM_ERROR("Cannot create /proc/dri/%s\n", name);
175 159 return -1;
176 for (i = 0; i < DRM_PROC_ENTRIES; i++)
177 remove_proc_entry(drm_proc_list[i].name, minor->dev_root);
178 sprintf(name, "%d", minor->index);
179 remove_proc_entry(name, root);
180
181 return 0;
182}
183
184/**
185 * Called when "/proc/dri/.../name" is read.
186 *
187 * \param buf output buffer.
188 * \param start start of output data.
189 * \param offset requested start offset.
190 * \param request requested number of bytes.
191 * \param eof whether there is no more data to return.
192 * \param data private data.
193 * \return number of written bytes.
194 *
195 * Prints the device name together with the bus id if available.
196 */
197static int drm_name_info(char *buf, char **start, off_t offset, int request,
198 int *eof, void *data)
199{
200 struct drm_minor *minor = (struct drm_minor *) data;
201 struct drm_master *master = minor->master;
202 struct drm_device *dev = minor->dev;
203 int len = 0;
204
205 if (offset > DRM_PROC_LIMIT) {
206 *eof = 1;
207 return 0;
208 } 160 }
209 161
210 if (!master) 162 ret = drm_proc_create_files(drm_proc_list, DRM_PROC_ENTRIES,
211 return 0; 163 minor->proc_root, minor);
212 164 if (ret) {
213 *start = &buf[offset]; 165 remove_proc_entry(name, root);
214 *eof = 0; 166 minor->proc_root = NULL;
215 167 DRM_ERROR("Failed to create core drm proc files\n");
216 if (master->unique) { 168 return ret;
217 DRM_PROC_PRINT("%s %s %s\n",
218 dev->driver->pci_driver.name,
219 pci_name(dev->pdev), master->unique);
220 } else {
221 DRM_PROC_PRINT("%s %s\n", dev->driver->pci_driver.name,
222 pci_name(dev->pdev));
223 } 169 }
224 170
225 if (len > request + offset) 171 if (dev->driver->proc_init) {
226 return request; 172 ret = dev->driver->proc_init(minor);
227 *eof = 1; 173 if (ret) {
228 return len - offset; 174 DRM_ERROR("DRM: Driver failed to initialize "
229} 175 "/proc/dri.\n");
230 176 return ret;
231/**
232 * Called when "/proc/dri/.../vm" is read.
233 *
234 * \param buf output buffer.
235 * \param start start of output data.
236 * \param offset requested start offset.
237 * \param request requested number of bytes.
238 * \param eof whether there is no more data to return.
239 * \param data private data.
240 * \return number of written bytes.
241 *
242 * Prints information about all mappings in drm_device::maplist.
243 */
244static int drm__vm_info(char *buf, char **start, off_t offset, int request,
245 int *eof, void *data)
246{
247 struct drm_minor *minor = (struct drm_minor *) data;
248 struct drm_device *dev = minor->dev;
249 int len = 0;
250 struct drm_map *map;
251 struct drm_map_list *r_list;
252
253 /* Hardcoded from _DRM_FRAME_BUFFER,
254 _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
255 _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
256 const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
257 const char *type;
258 int i;
259
260 if (offset > DRM_PROC_LIMIT) {
261 *eof = 1;
262 return 0;
263 }
264
265 *start = &buf[offset];
266 *eof = 0;
267
268 DRM_PROC_PRINT("slot offset size type flags "
269 "address mtrr\n\n");
270 i = 0;
271 list_for_each_entry(r_list, &dev->maplist, head) {
272 map = r_list->map;
273 if (!map)
274 continue;
275 if (map->type < 0 || map->type > 5)
276 type = "??";
277 else
278 type = types[map->type];
279 DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
280 i,
281 map->offset,
282 map->size, type, map->flags,
283 (unsigned long) r_list->user_token);
284 if (map->mtrr < 0) {
285 DRM_PROC_PRINT("none\n");
286 } else {
287 DRM_PROC_PRINT("%4d\n", map->mtrr);
288 } 177 }
289 i++;
290 }
291
292 if (len > request + offset)
293 return request;
294 *eof = 1;
295 return len - offset;
296}
297
298/**
299 * Simply calls _vm_info() while holding the drm_device::struct_mutex lock.
300 */
301static int drm_vm_info(char *buf, char **start, off_t offset, int request,
302 int *eof, void *data)
303{
304 struct drm_minor *minor = (struct drm_minor *) data;
305 struct drm_device *dev = minor->dev;
306 int ret;
307
308 mutex_lock(&dev->struct_mutex);
309 ret = drm__vm_info(buf, start, offset, request, eof, data);
310 mutex_unlock(&dev->struct_mutex);
311 return ret;
312}
313
314/**
315 * Called when "/proc/dri/.../queues" is read.
316 *
317 * \param buf output buffer.
318 * \param start start of output data.
319 * \param offset requested start offset.
320 * \param request requested number of bytes.
321 * \param eof whether there is no more data to return.
322 * \param data private data.
323 * \return number of written bytes.
324 */
325static int drm__queues_info(char *buf, char **start, off_t offset,
326 int request, int *eof, void *data)
327{
328 struct drm_minor *minor = (struct drm_minor *) data;
329 struct drm_device *dev = minor->dev;
330 int len = 0;
331 int i;
332 struct drm_queue *q;
333
334 if (offset > DRM_PROC_LIMIT) {
335 *eof = 1;
336 return 0;
337 } 178 }
338 179 return 0;
339 *start = &buf[offset];
340 *eof = 0;
341
342 DRM_PROC_PRINT(" ctx/flags use fin"
343 " blk/rw/rwf wait flushed queued"
344 " locks\n\n");
345 for (i = 0; i < dev->queue_count; i++) {
346 q = dev->queuelist[i];
347 atomic_inc(&q->use_count);
348 DRM_PROC_PRINT_RET(atomic_dec(&q->use_count),
349 "%5d/0x%03x %5d %5d"
350 " %5d/%c%c/%c%c%c %5Zd\n",
351 i,
352 q->flags,
353 atomic_read(&q->use_count),
354 atomic_read(&q->finalization),
355 atomic_read(&q->block_count),
356 atomic_read(&q->block_read) ? 'r' : '-',
357 atomic_read(&q->block_write) ? 'w' : '-',
358 waitqueue_active(&q->read_queue) ? 'r' : '-',
359 waitqueue_active(&q->
360 write_queue) ? 'w' : '-',
361 waitqueue_active(&q->
362 flush_queue) ? 'f' : '-',
363 DRM_BUFCOUNT(&q->waitlist));
364 atomic_dec(&q->use_count);
365 }
366
367 if (len > request + offset)
368 return request;
369 *eof = 1;
370 return len - offset;
371}
372
373/**
374 * Simply calls _queues_info() while holding the drm_device::struct_mutex lock.
375 */
376static int drm_queues_info(char *buf, char **start, off_t offset, int request,
377 int *eof, void *data)
378{
379 struct drm_minor *minor = (struct drm_minor *) data;
380 struct drm_device *dev = minor->dev;
381 int ret;
382
383 mutex_lock(&dev->struct_mutex);
384 ret = drm__queues_info(buf, start, offset, request, eof, data);
385 mutex_unlock(&dev->struct_mutex);
386 return ret;
387} 180}
388 181
389/** 182int drm_proc_remove_files(struct drm_info_list *files, int count,
390 * Called when "/proc/dri/.../bufs" is read. 183 struct drm_minor *minor)
391 *
392 * \param buf output buffer.
393 * \param start start of output data.
394 * \param offset requested start offset.
395 * \param request requested number of bytes.
396 * \param eof whether there is no more data to return.
397 * \param data private data.
398 * \return number of written bytes.
399 */
400static int drm__bufs_info(char *buf, char **start, off_t offset, int request,
401 int *eof, void *data)
402{ 184{
403 struct drm_minor *minor = (struct drm_minor *) data; 185 struct list_head *pos, *q;
404 struct drm_device *dev = minor->dev; 186 struct drm_info_node *tmp;
405 int len = 0;
406 struct drm_device_dma *dma = dev->dma;
407 int i; 187 int i;
408 188
409 if (!dma || offset > DRM_PROC_LIMIT) { 189 for (i = 0; i < count; i++) {
410 *eof = 1; 190 list_for_each_safe(pos, q, &minor->proc_nodes.list) {
411 return 0; 191 tmp = list_entry(pos, struct drm_info_node, list);
412 } 192 if (tmp->info_ent == &files[i]) {
413 193 remove_proc_entry(files[i].name,
414 *start = &buf[offset]; 194 minor->proc_root);
415 *eof = 0; 195 list_del(pos);
416 196 drm_free(tmp, sizeof(struct drm_info_node),
417 DRM_PROC_PRINT(" o size count free segs pages kB\n\n"); 197 _DRM_DRIVER);
418 for (i = 0; i <= DRM_MAX_ORDER; i++) { 198 }
419 if (dma->bufs[i].buf_count) 199 }
420 DRM_PROC_PRINT("%2d %8d %5d %5d %5d %5d %5ld\n",
421 i,
422 dma->bufs[i].buf_size,
423 dma->bufs[i].buf_count,
424 atomic_read(&dma->bufs[i]
425 .freelist.count),
426 dma->bufs[i].seg_count,
427 dma->bufs[i].seg_count
428 * (1 << dma->bufs[i].page_order),
429 (dma->bufs[i].seg_count
430 * (1 << dma->bufs[i].page_order))
431 * PAGE_SIZE / 1024);
432 }
433 DRM_PROC_PRINT("\n");
434 for (i = 0; i < dma->buf_count; i++) {
435 if (i && !(i % 32))
436 DRM_PROC_PRINT("\n");
437 DRM_PROC_PRINT(" %d", dma->buflist[i]->list);
438 } 200 }
439 DRM_PROC_PRINT("\n"); 201 return 0;
440
441 if (len > request + offset)
442 return request;
443 *eof = 1;
444 return len - offset;
445}
446
447/**
448 * Simply calls _bufs_info() while holding the drm_device::struct_mutex lock.
449 */
450static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
451 int *eof, void *data)
452{
453 struct drm_minor *minor = (struct drm_minor *) data;
454 struct drm_device *dev = minor->dev;
455 int ret;
456
457 mutex_lock(&dev->struct_mutex);
458 ret = drm__bufs_info(buf, start, offset, request, eof, data);
459 mutex_unlock(&dev->struct_mutex);
460 return ret;
461} 202}
462 203
463/** 204/**
464 * Called when "/proc/dri/.../vblank" is read. 205 * Cleanup the proc filesystem resources.
465 * 206 *
466 * \param buf output buffer. 207 * \param minor device minor number.
467 * \param start start of output data. 208 * \param root DRI proc dir entry.
468 * \param offset requested start offset. 209 * \param dev_root DRI device proc dir entry.
469 * \param request requested number of bytes. 210 * \return always zero.
470 * \param eof whether there is no more data to return.
471 * \param data private data.
472 * \return number of written bytes.
473 */
474static int drm__vblank_info(char *buf, char **start, off_t offset, int request,
475 int *eof, void *data)
476{
477 struct drm_minor *minor = (struct drm_minor *) data;
478 struct drm_device *dev = minor->dev;
479 int len = 0;
480 int crtc;
481
482 if (offset > DRM_PROC_LIMIT) {
483 *eof = 1;
484 return 0;
485 }
486
487 *start = &buf[offset];
488 *eof = 0;
489
490 for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
491 DRM_PROC_PRINT("CRTC %d enable: %d\n",
492 crtc, atomic_read(&dev->vblank_refcount[crtc]));
493 DRM_PROC_PRINT("CRTC %d counter: %d\n",
494 crtc, drm_vblank_count(dev, crtc));
495 DRM_PROC_PRINT("CRTC %d last wait: %d\n",
496 crtc, dev->last_vblank_wait[crtc]);
497 DRM_PROC_PRINT("CRTC %d in modeset: %d\n",
498 crtc, dev->vblank_inmodeset[crtc]);
499 }
500
501 if (len > request + offset)
502 return request;
503 *eof = 1;
504 return len - offset;
505}
506
507/**
508 * Simply calls _vblank_info() while holding the drm_device::struct_mutex lock.
509 */
510static int drm_vblank_info(char *buf, char **start, off_t offset, int request,
511 int *eof, void *data)
512{
513 struct drm_minor *minor = (struct drm_minor *) data;
514 struct drm_device *dev = minor->dev;
515 int ret;
516
517 mutex_lock(&dev->struct_mutex);
518 ret = drm__vblank_info(buf, start, offset, request, eof, data);
519 mutex_unlock(&dev->struct_mutex);
520 return ret;
521}
522
523/**
524 * Called when "/proc/dri/.../clients" is read.
525 * 211 *
526 * \param buf output buffer. 212 * Remove all proc entries created by proc_init().
527 * \param start start of output data.
528 * \param offset requested start offset.
529 * \param request requested number of bytes.
530 * \param eof whether there is no more data to return.
531 * \param data private data.
532 * \return number of written bytes.
533 */ 213 */
534static int drm__clients_info(char *buf, char **start, off_t offset, 214int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
535 int request, int *eof, void *data)
536{ 215{
537 struct drm_minor *minor = (struct drm_minor *) data;
538 struct drm_device *dev = minor->dev; 216 struct drm_device *dev = minor->dev;
539 int len = 0; 217 char name[64];
540 struct drm_file *priv;
541 218
542 if (offset > DRM_PROC_LIMIT) { 219 if (!root || !minor->proc_root)
543 *eof = 1;
544 return 0; 220 return 0;
545 }
546
547 *start = &buf[offset];
548 *eof = 0;
549
550 DRM_PROC_PRINT("a dev pid uid magic ioctls\n\n");
551 list_for_each_entry(priv, &dev->filelist, lhead) {
552 DRM_PROC_PRINT("%c %3d %5d %5d %10u %10lu\n",
553 priv->authenticated ? 'y' : 'n',
554 priv->minor->index,
555 priv->pid,
556 priv->uid, priv->magic, priv->ioctl_count);
557 }
558 221
559 if (len > request + offset) 222 if (dev->driver->proc_cleanup)
560 return request; 223 dev->driver->proc_cleanup(minor);
561 *eof = 1;
562 return len - offset;
563}
564
565/**
566 * Simply calls _clients_info() while holding the drm_device::struct_mutex lock.
567 */
568static int drm_clients_info(char *buf, char **start, off_t offset,
569 int request, int *eof, void *data)
570{
571 struct drm_minor *minor = (struct drm_minor *) data;
572 struct drm_device *dev = minor->dev;
573 int ret;
574
575 mutex_lock(&dev->struct_mutex);
576 ret = drm__clients_info(buf, start, offset, request, eof, data);
577 mutex_unlock(&dev->struct_mutex);
578 return ret;
579}
580
581struct drm_gem_name_info_data {
582 int len;
583 char *buf;
584 int eof;
585};
586 224
587static int drm_gem_one_name_info(int id, void *ptr, void *data) 225 drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor);
588{
589 struct drm_gem_object *obj = ptr;
590 struct drm_gem_name_info_data *nid = data;
591 226
592 DRM_INFO("name %d size %zd\n", obj->name, obj->size); 227 sprintf(name, "%d", minor->index);
593 if (nid->eof) 228 remove_proc_entry(name, root);
594 return 0;
595 229
596 nid->len += sprintf(&nid->buf[nid->len],
597 "%6d %8zd %7d %8d\n",
598 obj->name, obj->size,
599 atomic_read(&obj->handlecount.refcount),
600 atomic_read(&obj->refcount.refcount));
601 if (nid->len > DRM_PROC_LIMIT) {
602 nid->eof = 1;
603 return 0;
604 }
605 return 0; 230 return 0;
606} 231}
607 232
608static int drm_gem_name_info(char *buf, char **start, off_t offset,
609 int request, int *eof, void *data)
610{
611 struct drm_minor *minor = (struct drm_minor *) data;
612 struct drm_device *dev = minor->dev;
613 struct drm_gem_name_info_data nid;
614
615 if (offset > DRM_PROC_LIMIT) {
616 *eof = 1;
617 return 0;
618 }
619
620 nid.len = sprintf(buf, " name size handles refcount\n");
621 nid.buf = buf;
622 nid.eof = 0;
623 idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, &nid);
624
625 *start = &buf[offset];
626 *eof = 0;
627 if (nid.len > request + offset)
628 return request;
629 *eof = 1;
630 return nid.len - offset;
631}
632
633static int drm_gem_object_info(char *buf, char **start, off_t offset,
634 int request, int *eof, void *data)
635{
636 struct drm_minor *minor = (struct drm_minor *) data;
637 struct drm_device *dev = minor->dev;
638 int len = 0;
639
640 if (offset > DRM_PROC_LIMIT) {
641 *eof = 1;
642 return 0;
643 }
644
645 *start = &buf[offset];
646 *eof = 0;
647 DRM_PROC_PRINT("%d objects\n", atomic_read(&dev->object_count));
648 DRM_PROC_PRINT("%d object bytes\n", atomic_read(&dev->object_memory));
649 DRM_PROC_PRINT("%d pinned\n", atomic_read(&dev->pin_count));
650 DRM_PROC_PRINT("%d pin bytes\n", atomic_read(&dev->pin_memory));
651 DRM_PROC_PRINT("%d gtt bytes\n", atomic_read(&dev->gtt_memory));
652 DRM_PROC_PRINT("%d gtt total\n", dev->gtt_total);
653 if (len > request + offset)
654 return request;
655 *eof = 1;
656 return len - offset;
657}
658
659#if DRM_DEBUG_CODE
660
661static int drm__vma_info(char *buf, char **start, off_t offset, int request,
662 int *eof, void *data)
663{
664 struct drm_minor *minor = (struct drm_minor *) data;
665 struct drm_device *dev = minor->dev;
666 int len = 0;
667 struct drm_vma_entry *pt;
668 struct vm_area_struct *vma;
669#if defined(__i386__)
670 unsigned int pgprot;
671#endif
672
673 if (offset > DRM_PROC_LIMIT) {
674 *eof = 1;
675 return 0;
676 }
677
678 *start = &buf[offset];
679 *eof = 0;
680
681 DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n",
682 atomic_read(&dev->vma_count),
683 high_memory, virt_to_phys(high_memory));
684 list_for_each_entry(pt, &dev->vmalist, head) {
685 if (!(vma = pt->vma))
686 continue;
687 DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
688 pt->pid,
689 vma->vm_start,
690 vma->vm_end,
691 vma->vm_flags & VM_READ ? 'r' : '-',
692 vma->vm_flags & VM_WRITE ? 'w' : '-',
693 vma->vm_flags & VM_EXEC ? 'x' : '-',
694 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
695 vma->vm_flags & VM_LOCKED ? 'l' : '-',
696 vma->vm_flags & VM_IO ? 'i' : '-',
697 vma->vm_pgoff);
698
699#if defined(__i386__)
700 pgprot = pgprot_val(vma->vm_page_prot);
701 DRM_PROC_PRINT(" %c%c%c%c%c%c%c%c%c",
702 pgprot & _PAGE_PRESENT ? 'p' : '-',
703 pgprot & _PAGE_RW ? 'w' : 'r',
704 pgprot & _PAGE_USER ? 'u' : 's',
705 pgprot & _PAGE_PWT ? 't' : 'b',
706 pgprot & _PAGE_PCD ? 'u' : 'c',
707 pgprot & _PAGE_ACCESSED ? 'a' : '-',
708 pgprot & _PAGE_DIRTY ? 'd' : '-',
709 pgprot & _PAGE_PSE ? 'm' : 'k',
710 pgprot & _PAGE_GLOBAL ? 'g' : 'l');
711#endif
712 DRM_PROC_PRINT("\n");
713 }
714
715 if (len > request + offset)
716 return request;
717 *eof = 1;
718 return len - offset;
719}
720
721static int drm_vma_info(char *buf, char **start, off_t offset, int request,
722 int *eof, void *data)
723{
724 struct drm_minor *minor = (struct drm_minor *) data;
725 struct drm_device *dev = minor->dev;
726 int ret;
727
728 mutex_lock(&dev->struct_mutex);
729 ret = drm__vma_info(buf, start, offset, request, eof, data);
730 mutex_unlock(&dev->struct_mutex);
731 return ret;
732}
733#endif
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 7c8b15b22bf2..48f33be8fd0f 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -50,6 +50,7 @@ struct idr drm_minors_idr;
50 50
51struct class *drm_class; 51struct class *drm_class;
52struct proc_dir_entry *drm_proc_root; 52struct proc_dir_entry *drm_proc_root;
53struct dentry *drm_debugfs_root;
53 54
54static int drm_minor_get_id(struct drm_device *dev, int type) 55static int drm_minor_get_id(struct drm_device *dev, int type)
55{ 56{
@@ -313,7 +314,15 @@ static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int t
313 goto err_mem; 314 goto err_mem;
314 } 315 }
315 } else 316 } else
316 new_minor->dev_root = NULL; 317 new_minor->proc_root = NULL;
318
319#if defined(CONFIG_DEBUG_FS)
320 ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
321 if (ret) {
322 DRM_ERROR("DRM: Failed to initialize /debugfs/dri.\n");
323 goto err_g2;
324 }
325#endif
317 326
318 ret = drm_sysfs_device_add(new_minor); 327 ret = drm_sysfs_device_add(new_minor);
319 if (ret) { 328 if (ret) {
@@ -451,6 +460,10 @@ int drm_put_minor(struct drm_minor **minor_p)
451 460
452 if (minor->type == DRM_MINOR_LEGACY) 461 if (minor->type == DRM_MINOR_LEGACY)
453 drm_proc_cleanup(minor, drm_proc_root); 462 drm_proc_cleanup(minor, drm_proc_root);
463#if defined(CONFIG_DEBUG_FS)
464 drm_debugfs_cleanup(minor);
465#endif
466
454 drm_sysfs_device_remove(minor); 467 drm_sysfs_device_remove(minor);
455 468
456 idr_remove(&drm_minors_idr, minor->index); 469 idr_remove(&drm_minors_idr, minor->index);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 793cba39d832..51c5a050aa73 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -7,7 +7,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
7 i915_suspend.o \ 7 i915_suspend.o \
8 i915_gem.o \ 8 i915_gem.o \
9 i915_gem_debug.o \ 9 i915_gem_debug.o \
10 i915_gem_proc.o \ 10 i915_gem_debugfs.o \
11 i915_gem_tiling.o \ 11 i915_gem_tiling.o \
12 intel_display.o \ 12 intel_display.o \
13 intel_crt.o \ 13 intel_crt.o \
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 6d21b9e48b89..a818b377e1f7 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -41,7 +41,6 @@
41int i915_wait_ring(struct drm_device * dev, int n, const char *caller) 41int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
42{ 42{
43 drm_i915_private_t *dev_priv = dev->dev_private; 43 drm_i915_private_t *dev_priv = dev->dev_private;
44 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
45 drm_i915_ring_buffer_t *ring = &(dev_priv->ring); 44 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
46 u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; 45 u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
47 u32 last_acthd = I915_READ(acthd_reg); 46 u32 last_acthd = I915_READ(acthd_reg);
@@ -58,8 +57,12 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
58 if (ring->space >= n) 57 if (ring->space >= n)
59 return 0; 58 return 0;
60 59
61 if (master_priv->sarea_priv) 60 if (dev->primary->master) {
62 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 61 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
62 if (master_priv->sarea_priv)
63 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
64 }
65
63 66
64 if (ring->head != last_head) 67 if (ring->head != last_head)
65 i = 0; 68 i = 0;
@@ -356,7 +359,7 @@ static int validate_cmd(int cmd)
356 return ret; 359 return ret;
357} 360}
358 361
359static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwords) 362static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
360{ 363{
361 drm_i915_private_t *dev_priv = dev->dev_private; 364 drm_i915_private_t *dev_priv = dev->dev_private;
362 int i; 365 int i;
@@ -370,8 +373,7 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
370 for (i = 0; i < dwords;) { 373 for (i = 0; i < dwords;) {
371 int cmd, sz; 374 int cmd, sz;
372 375
373 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) 376 cmd = buffer[i];
374 return -EINVAL;
375 377
376 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) 378 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
377 return -EINVAL; 379 return -EINVAL;
@@ -379,11 +381,7 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
379 OUT_RING(cmd); 381 OUT_RING(cmd);
380 382
381 while (++i, --sz) { 383 while (++i, --sz) {
382 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], 384 OUT_RING(buffer[i]);
383 sizeof(cmd))) {
384 return -EINVAL;
385 }
386 OUT_RING(cmd);
387 } 385 }
388 } 386 }
389 387
@@ -397,17 +395,13 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
397 395
398int 396int
399i915_emit_box(struct drm_device *dev, 397i915_emit_box(struct drm_device *dev,
400 struct drm_clip_rect __user *boxes, 398 struct drm_clip_rect *boxes,
401 int i, int DR1, int DR4) 399 int i, int DR1, int DR4)
402{ 400{
403 drm_i915_private_t *dev_priv = dev->dev_private; 401 drm_i915_private_t *dev_priv = dev->dev_private;
404 struct drm_clip_rect box; 402 struct drm_clip_rect box = boxes[i];
405 RING_LOCALS; 403 RING_LOCALS;
406 404
407 if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
408 return -EFAULT;
409 }
410
411 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { 405 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
412 DRM_ERROR("Bad box %d,%d..%d,%d\n", 406 DRM_ERROR("Bad box %d,%d..%d,%d\n",
413 box.x1, box.y1, box.x2, box.y2); 407 box.x1, box.y1, box.x2, box.y2);
@@ -460,7 +454,9 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
460} 454}
461 455
462static int i915_dispatch_cmdbuffer(struct drm_device * dev, 456static int i915_dispatch_cmdbuffer(struct drm_device * dev,
463 drm_i915_cmdbuffer_t * cmd) 457 drm_i915_cmdbuffer_t *cmd,
458 struct drm_clip_rect *cliprects,
459 void *cmdbuf)
464{ 460{
465 int nbox = cmd->num_cliprects; 461 int nbox = cmd->num_cliprects;
466 int i = 0, count, ret; 462 int i = 0, count, ret;
@@ -476,13 +472,13 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
476 472
477 for (i = 0; i < count; i++) { 473 for (i = 0; i < count; i++) {
478 if (i < nbox) { 474 if (i < nbox) {
479 ret = i915_emit_box(dev, cmd->cliprects, i, 475 ret = i915_emit_box(dev, cliprects, i,
480 cmd->DR1, cmd->DR4); 476 cmd->DR1, cmd->DR4);
481 if (ret) 477 if (ret)
482 return ret; 478 return ret;
483 } 479 }
484 480
485 ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4); 481 ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
486 if (ret) 482 if (ret)
487 return ret; 483 return ret;
488 } 484 }
@@ -492,10 +488,10 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
492} 488}
493 489
494static int i915_dispatch_batchbuffer(struct drm_device * dev, 490static int i915_dispatch_batchbuffer(struct drm_device * dev,
495 drm_i915_batchbuffer_t * batch) 491 drm_i915_batchbuffer_t * batch,
492 struct drm_clip_rect *cliprects)
496{ 493{
497 drm_i915_private_t *dev_priv = dev->dev_private; 494 drm_i915_private_t *dev_priv = dev->dev_private;
498 struct drm_clip_rect __user *boxes = batch->cliprects;
499 int nbox = batch->num_cliprects; 495 int nbox = batch->num_cliprects;
500 int i = 0, count; 496 int i = 0, count;
501 RING_LOCALS; 497 RING_LOCALS;
@@ -511,7 +507,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
511 507
512 for (i = 0; i < count; i++) { 508 for (i = 0; i < count; i++) {
513 if (i < nbox) { 509 if (i < nbox) {
514 int ret = i915_emit_box(dev, boxes, i, 510 int ret = i915_emit_box(dev, cliprects, i,
515 batch->DR1, batch->DR4); 511 batch->DR1, batch->DR4);
516 if (ret) 512 if (ret)
517 return ret; 513 return ret;
@@ -626,6 +622,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
626 master_priv->sarea_priv; 622 master_priv->sarea_priv;
627 drm_i915_batchbuffer_t *batch = data; 623 drm_i915_batchbuffer_t *batch = data;
628 int ret; 624 int ret;
625 struct drm_clip_rect *cliprects = NULL;
629 626
630 if (!dev_priv->allow_batchbuffer) { 627 if (!dev_priv->allow_batchbuffer) {
631 DRM_ERROR("Batchbuffer ioctl disabled\n"); 628 DRM_ERROR("Batchbuffer ioctl disabled\n");
@@ -637,17 +634,35 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
637 634
638 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 635 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
639 636
640 if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects, 637 if (batch->num_cliprects < 0)
641 batch->num_cliprects * 638 return -EINVAL;
642 sizeof(struct drm_clip_rect))) 639
643 return -EFAULT; 640 if (batch->num_cliprects) {
641 cliprects = drm_calloc(batch->num_cliprects,
642 sizeof(struct drm_clip_rect),
643 DRM_MEM_DRIVER);
644 if (cliprects == NULL)
645 return -ENOMEM;
646
647 ret = copy_from_user(cliprects, batch->cliprects,
648 batch->num_cliprects *
649 sizeof(struct drm_clip_rect));
650 if (ret != 0)
651 goto fail_free;
652 }
644 653
645 mutex_lock(&dev->struct_mutex); 654 mutex_lock(&dev->struct_mutex);
646 ret = i915_dispatch_batchbuffer(dev, batch); 655 ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
647 mutex_unlock(&dev->struct_mutex); 656 mutex_unlock(&dev->struct_mutex);
648 657
649 if (sarea_priv) 658 if (sarea_priv)
650 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 659 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
660
661fail_free:
662 drm_free(cliprects,
663 batch->num_cliprects * sizeof(struct drm_clip_rect),
664 DRM_MEM_DRIVER);
665
651 return ret; 666 return ret;
652} 667}
653 668
@@ -659,6 +674,8 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
659 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 674 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
660 master_priv->sarea_priv; 675 master_priv->sarea_priv;
661 drm_i915_cmdbuffer_t *cmdbuf = data; 676 drm_i915_cmdbuffer_t *cmdbuf = data;
677 struct drm_clip_rect *cliprects = NULL;
678 void *batch_data;
662 int ret; 679 int ret;
663 680
664 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n", 681 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
@@ -666,25 +683,50 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
666 683
667 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 684 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
668 685
669 if (cmdbuf->num_cliprects && 686 if (cmdbuf->num_cliprects < 0)
670 DRM_VERIFYAREA_READ(cmdbuf->cliprects, 687 return -EINVAL;
671 cmdbuf->num_cliprects * 688
672 sizeof(struct drm_clip_rect))) { 689 batch_data = drm_alloc(cmdbuf->sz, DRM_MEM_DRIVER);
673 DRM_ERROR("Fault accessing cliprects\n"); 690 if (batch_data == NULL)
674 return -EFAULT; 691 return -ENOMEM;
692
693 ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
694 if (ret != 0)
695 goto fail_batch_free;
696
697 if (cmdbuf->num_cliprects) {
698 cliprects = drm_calloc(cmdbuf->num_cliprects,
699 sizeof(struct drm_clip_rect),
700 DRM_MEM_DRIVER);
701 if (cliprects == NULL)
702 goto fail_batch_free;
703
704 ret = copy_from_user(cliprects, cmdbuf->cliprects,
705 cmdbuf->num_cliprects *
706 sizeof(struct drm_clip_rect));
707 if (ret != 0)
708 goto fail_clip_free;
675 } 709 }
676 710
677 mutex_lock(&dev->struct_mutex); 711 mutex_lock(&dev->struct_mutex);
678 ret = i915_dispatch_cmdbuffer(dev, cmdbuf); 712 ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
679 mutex_unlock(&dev->struct_mutex); 713 mutex_unlock(&dev->struct_mutex);
680 if (ret) { 714 if (ret) {
681 DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); 715 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
682 return ret; 716 goto fail_batch_free;
683 } 717 }
684 718
685 if (sarea_priv) 719 if (sarea_priv)
686 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 720 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
687 return 0; 721
722fail_batch_free:
723 drm_free(batch_data, cmdbuf->sz, DRM_MEM_DRIVER);
724fail_clip_free:
725 drm_free(cliprects,
726 cmdbuf->num_cliprects * sizeof(struct drm_clip_rect),
727 DRM_MEM_DRIVER);
728
729 return ret;
688} 730}
689 731
690static int i915_flip_bufs(struct drm_device *dev, void *data, 732static int i915_flip_bufs(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index b293ef0bae71..dcb91f5df6e3 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -150,8 +150,10 @@ static struct drm_driver driver = {
150 .get_reg_ofs = drm_core_get_reg_ofs, 150 .get_reg_ofs = drm_core_get_reg_ofs,
151 .master_create = i915_master_create, 151 .master_create = i915_master_create,
152 .master_destroy = i915_master_destroy, 152 .master_destroy = i915_master_destroy,
153 .proc_init = i915_gem_proc_init, 153#if defined(CONFIG_DEBUG_FS)
154 .proc_cleanup = i915_gem_proc_cleanup, 154 .debugfs_init = i915_gem_debugfs_init,
155 .debugfs_cleanup = i915_gem_debugfs_cleanup,
156#endif
155 .gem_init_object = i915_gem_init_object, 157 .gem_init_object = i915_gem_init_object,
156 .gem_free_object = i915_gem_free_object, 158 .gem_free_object = i915_gem_free_object,
157 .gem_vm_ops = &i915_gem_vm_ops, 159 .gem_vm_ops = &i915_gem_vm_ops,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d6cc9861e0a1..c1685d0c704f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -404,7 +404,8 @@ struct drm_i915_gem_object {
404 /** AGP memory structure for our GTT binding. */ 404 /** AGP memory structure for our GTT binding. */
405 DRM_AGP_MEM *agp_mem; 405 DRM_AGP_MEM *agp_mem;
406 406
407 struct page **page_list; 407 struct page **pages;
408 int pages_refcount;
408 409
409 /** 410 /**
410 * Current offset of the object in GTT space. 411 * Current offset of the object in GTT space.
@@ -519,7 +520,7 @@ extern int i915_driver_device_is_agp(struct drm_device * dev);
519extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 520extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
520 unsigned long arg); 521 unsigned long arg);
521extern int i915_emit_box(struct drm_device *dev, 522extern int i915_emit_box(struct drm_device *dev,
522 struct drm_clip_rect __user *boxes, 523 struct drm_clip_rect *boxes,
523 int i, int DR1, int DR4); 524 int i, int DR1, int DR4);
524 525
525/* i915_irq.c */ 526/* i915_irq.c */
@@ -604,8 +605,6 @@ int i915_gem_get_tiling(struct drm_device *dev, void *data,
604int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 605int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
605 struct drm_file *file_priv); 606 struct drm_file *file_priv);
606void i915_gem_load(struct drm_device *dev); 607void i915_gem_load(struct drm_device *dev);
607int i915_gem_proc_init(struct drm_minor *minor);
608void i915_gem_proc_cleanup(struct drm_minor *minor);
609int i915_gem_init_object(struct drm_gem_object *obj); 608int i915_gem_init_object(struct drm_gem_object *obj);
610void i915_gem_free_object(struct drm_gem_object *obj); 609void i915_gem_free_object(struct drm_gem_object *obj);
611int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); 610int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
@@ -649,6 +648,10 @@ void i915_gem_dump_object(struct drm_gem_object *obj, int len,
649 const char *where, uint32_t mark); 648 const char *where, uint32_t mark);
650void i915_dump_lru(struct drm_device *dev, const char *where); 649void i915_dump_lru(struct drm_device *dev, const char *where);
651 650
651/* i915_debugfs.c */
652int i915_gem_debugfs_init(struct drm_minor *minor);
653void i915_gem_debugfs_cleanup(struct drm_minor *minor);
654
652/* i915_suspend.c */ 655/* i915_suspend.c */
653extern int i915_save_state(struct drm_device *dev); 656extern int i915_save_state(struct drm_device *dev);
654extern int i915_restore_state(struct drm_device *dev); 657extern int i915_restore_state(struct drm_device *dev);
@@ -784,15 +787,21 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
784 (dev)->pci_device == 0x2E22 || \ 787 (dev)->pci_device == 0x2E22 || \
785 IS_GM45(dev)) 788 IS_GM45(dev))
786 789
790#define IS_IGDG(dev) ((dev)->pci_device == 0xa001)
791#define IS_IGDGM(dev) ((dev)->pci_device == 0xa011)
792#define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev))
793
787#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \ 794#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
788 (dev)->pci_device == 0x29B2 || \ 795 (dev)->pci_device == 0x29B2 || \
789 (dev)->pci_device == 0x29D2) 796 (dev)->pci_device == 0x29D2 || \
797 (IS_IGD(dev)))
790 798
791#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ 799#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
792 IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev)) 800 IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
793 801
794#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ 802#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
795 IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev)) 803 IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
804 IS_IGD(dev))
796 805
797#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev)) 806#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev))
798/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 807/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 37427e4016cb..b52cba0f16d2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -43,8 +43,8 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
43 uint64_t offset, 43 uint64_t offset,
44 uint64_t size); 44 uint64_t size);
45static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); 45static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
46static int i915_gem_object_get_page_list(struct drm_gem_object *obj); 46static int i915_gem_object_get_pages(struct drm_gem_object *obj);
47static void i915_gem_object_free_page_list(struct drm_gem_object *obj); 47static void i915_gem_object_put_pages(struct drm_gem_object *obj);
48static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); 48static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
49static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, 49static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
50 unsigned alignment); 50 unsigned alignment);
@@ -136,6 +136,224 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
136 return 0; 136 return 0;
137} 137}
138 138
139static inline int
140fast_shmem_read(struct page **pages,
141 loff_t page_base, int page_offset,
142 char __user *data,
143 int length)
144{
145 char __iomem *vaddr;
146 int ret;
147
148 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
149 if (vaddr == NULL)
150 return -ENOMEM;
151 ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
152 kunmap_atomic(vaddr, KM_USER0);
153
154 return ret;
155}
156
157static inline int
158slow_shmem_copy(struct page *dst_page,
159 int dst_offset,
160 struct page *src_page,
161 int src_offset,
162 int length)
163{
164 char *dst_vaddr, *src_vaddr;
165
166 dst_vaddr = kmap_atomic(dst_page, KM_USER0);
167 if (dst_vaddr == NULL)
168 return -ENOMEM;
169
170 src_vaddr = kmap_atomic(src_page, KM_USER1);
171 if (src_vaddr == NULL) {
172 kunmap_atomic(dst_vaddr, KM_USER0);
173 return -ENOMEM;
174 }
175
176 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
177
178 kunmap_atomic(src_vaddr, KM_USER1);
179 kunmap_atomic(dst_vaddr, KM_USER0);
180
181 return 0;
182}
183
184/**
185 * This is the fast shmem pread path, which attempts to copy_from_user directly
186 * from the backing pages of the object to the user's address space. On a
187 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
188 */
189static int
190i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
191 struct drm_i915_gem_pread *args,
192 struct drm_file *file_priv)
193{
194 struct drm_i915_gem_object *obj_priv = obj->driver_private;
195 ssize_t remain;
196 loff_t offset, page_base;
197 char __user *user_data;
198 int page_offset, page_length;
199 int ret;
200
201 user_data = (char __user *) (uintptr_t) args->data_ptr;
202 remain = args->size;
203
204 mutex_lock(&dev->struct_mutex);
205
206 ret = i915_gem_object_get_pages(obj);
207 if (ret != 0)
208 goto fail_unlock;
209
210 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
211 args->size);
212 if (ret != 0)
213 goto fail_put_pages;
214
215 obj_priv = obj->driver_private;
216 offset = args->offset;
217
218 while (remain > 0) {
219 /* Operation in this page
220 *
221 * page_base = page offset within aperture
222 * page_offset = offset within page
223 * page_length = bytes to copy for this page
224 */
225 page_base = (offset & ~(PAGE_SIZE-1));
226 page_offset = offset & (PAGE_SIZE-1);
227 page_length = remain;
228 if ((page_offset + remain) > PAGE_SIZE)
229 page_length = PAGE_SIZE - page_offset;
230
231 ret = fast_shmem_read(obj_priv->pages,
232 page_base, page_offset,
233 user_data, page_length);
234 if (ret)
235 goto fail_put_pages;
236
237 remain -= page_length;
238 user_data += page_length;
239 offset += page_length;
240 }
241
242fail_put_pages:
243 i915_gem_object_put_pages(obj);
244fail_unlock:
245 mutex_unlock(&dev->struct_mutex);
246
247 return ret;
248}
249
250/**
251 * This is the fallback shmem pread path, which allocates temporary storage
252 * in kernel space to copy_to_user into outside of the struct_mutex, so we
253 * can copy out of the object's backing pages while holding the struct mutex
254 * and not take page faults.
255 */
256static int
257i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
258 struct drm_i915_gem_pread *args,
259 struct drm_file *file_priv)
260{
261 struct drm_i915_gem_object *obj_priv = obj->driver_private;
262 struct mm_struct *mm = current->mm;
263 struct page **user_pages;
264 ssize_t remain;
265 loff_t offset, pinned_pages, i;
266 loff_t first_data_page, last_data_page, num_pages;
267 int shmem_page_index, shmem_page_offset;
268 int data_page_index, data_page_offset;
269 int page_length;
270 int ret;
271 uint64_t data_ptr = args->data_ptr;
272
273 remain = args->size;
274
275 /* Pin the user pages containing the data. We can't fault while
276 * holding the struct mutex, yet we want to hold it while
277 * dereferencing the user data.
278 */
279 first_data_page = data_ptr / PAGE_SIZE;
280 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
281 num_pages = last_data_page - first_data_page + 1;
282
283 user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
284 if (user_pages == NULL)
285 return -ENOMEM;
286
287 down_read(&mm->mmap_sem);
288 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
289 num_pages, 0, 0, user_pages, NULL);
290 up_read(&mm->mmap_sem);
291 if (pinned_pages < num_pages) {
292 ret = -EFAULT;
293 goto fail_put_user_pages;
294 }
295
296 mutex_lock(&dev->struct_mutex);
297
298 ret = i915_gem_object_get_pages(obj);
299 if (ret != 0)
300 goto fail_unlock;
301
302 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
303 args->size);
304 if (ret != 0)
305 goto fail_put_pages;
306
307 obj_priv = obj->driver_private;
308 offset = args->offset;
309
310 while (remain > 0) {
311 /* Operation in this page
312 *
313 * shmem_page_index = page number within shmem file
314 * shmem_page_offset = offset within page in shmem file
315 * data_page_index = page number in get_user_pages return
316 * data_page_offset = offset with data_page_index page.
317 * page_length = bytes to copy for this page
318 */
319 shmem_page_index = offset / PAGE_SIZE;
320 shmem_page_offset = offset & ~PAGE_MASK;
321 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
322 data_page_offset = data_ptr & ~PAGE_MASK;
323
324 page_length = remain;
325 if ((shmem_page_offset + page_length) > PAGE_SIZE)
326 page_length = PAGE_SIZE - shmem_page_offset;
327 if ((data_page_offset + page_length) > PAGE_SIZE)
328 page_length = PAGE_SIZE - data_page_offset;
329
330 ret = slow_shmem_copy(user_pages[data_page_index],
331 data_page_offset,
332 obj_priv->pages[shmem_page_index],
333 shmem_page_offset,
334 page_length);
335 if (ret)
336 goto fail_put_pages;
337
338 remain -= page_length;
339 data_ptr += page_length;
340 offset += page_length;
341 }
342
343fail_put_pages:
344 i915_gem_object_put_pages(obj);
345fail_unlock:
346 mutex_unlock(&dev->struct_mutex);
347fail_put_user_pages:
348 for (i = 0; i < pinned_pages; i++) {
349 SetPageDirty(user_pages[i]);
350 page_cache_release(user_pages[i]);
351 }
352 kfree(user_pages);
353
354 return ret;
355}
356
139/** 357/**
140 * Reads data from the object referenced by handle. 358 * Reads data from the object referenced by handle.
141 * 359 *
@@ -148,8 +366,6 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
148 struct drm_i915_gem_pread *args = data; 366 struct drm_i915_gem_pread *args = data;
149 struct drm_gem_object *obj; 367 struct drm_gem_object *obj;
150 struct drm_i915_gem_object *obj_priv; 368 struct drm_i915_gem_object *obj_priv;
151 ssize_t read;
152 loff_t offset;
153 int ret; 369 int ret;
154 370
155 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 371 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
@@ -167,33 +383,13 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
167 return -EINVAL; 383 return -EINVAL;
168 } 384 }
169 385
170 mutex_lock(&dev->struct_mutex); 386 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
171 387 if (ret != 0)
172 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, 388 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
173 args->size);
174 if (ret != 0) {
175 drm_gem_object_unreference(obj);
176 mutex_unlock(&dev->struct_mutex);
177 return ret;
178 }
179
180 offset = args->offset;
181
182 read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
183 args->size, &offset);
184 if (read != args->size) {
185 drm_gem_object_unreference(obj);
186 mutex_unlock(&dev->struct_mutex);
187 if (read < 0)
188 return read;
189 else
190 return -EINVAL;
191 }
192 389
193 drm_gem_object_unreference(obj); 390 drm_gem_object_unreference(obj);
194 mutex_unlock(&dev->struct_mutex);
195 391
196 return 0; 392 return ret;
197} 393}
198 394
199/* This is the fast write path which cannot handle 395/* This is the fast write path which cannot handle
@@ -223,29 +419,51 @@ fast_user_write(struct io_mapping *mapping,
223 */ 419 */
224 420
225static inline int 421static inline int
226slow_user_write(struct io_mapping *mapping, 422slow_kernel_write(struct io_mapping *mapping,
227 loff_t page_base, int page_offset, 423 loff_t gtt_base, int gtt_offset,
228 char __user *user_data, 424 struct page *user_page, int user_offset,
229 int length) 425 int length)
230{ 426{
231 char __iomem *vaddr; 427 char *src_vaddr, *dst_vaddr;
232 unsigned long unwritten; 428 unsigned long unwritten;
233 429
234 vaddr = io_mapping_map_wc(mapping, page_base); 430 dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
235 if (vaddr == NULL) 431 src_vaddr = kmap_atomic(user_page, KM_USER1);
236 return -EFAULT; 432 unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
237 unwritten = __copy_from_user(vaddr + page_offset, 433 src_vaddr + user_offset,
238 user_data, length); 434 length);
239 io_mapping_unmap(vaddr); 435 kunmap_atomic(src_vaddr, KM_USER1);
436 io_mapping_unmap_atomic(dst_vaddr);
240 if (unwritten) 437 if (unwritten)
241 return -EFAULT; 438 return -EFAULT;
242 return 0; 439 return 0;
243} 440}
244 441
442static inline int
443fast_shmem_write(struct page **pages,
444 loff_t page_base, int page_offset,
445 char __user *data,
446 int length)
447{
448 char __iomem *vaddr;
449
450 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
451 if (vaddr == NULL)
452 return -ENOMEM;
453 __copy_from_user_inatomic(vaddr + page_offset, data, length);
454 kunmap_atomic(vaddr, KM_USER0);
455
456 return 0;
457}
458
459/**
460 * This is the fast pwrite path, where we copy the data directly from the
461 * user into the GTT, uncached.
462 */
245static int 463static int
246i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj, 464i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
247 struct drm_i915_gem_pwrite *args, 465 struct drm_i915_gem_pwrite *args,
248 struct drm_file *file_priv) 466 struct drm_file *file_priv)
249{ 467{
250 struct drm_i915_gem_object *obj_priv = obj->driver_private; 468 struct drm_i915_gem_object *obj_priv = obj->driver_private;
251 drm_i915_private_t *dev_priv = dev->dev_private; 469 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -273,7 +491,6 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
273 491
274 obj_priv = obj->driver_private; 492 obj_priv = obj->driver_private;
275 offset = obj_priv->gtt_offset + args->offset; 493 offset = obj_priv->gtt_offset + args->offset;
276 obj_priv->dirty = 1;
277 494
278 while (remain > 0) { 495 while (remain > 0) {
279 /* Operation in this page 496 /* Operation in this page
@@ -292,16 +509,11 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
292 page_offset, user_data, page_length); 509 page_offset, user_data, page_length);
293 510
294 /* If we get a fault while copying data, then (presumably) our 511 /* If we get a fault while copying data, then (presumably) our
295 * source page isn't available. In this case, use the 512 * source page isn't available. Return the error and we'll
296 * non-atomic function 513 * retry in the slow path.
297 */ 514 */
298 if (ret) { 515 if (ret)
299 ret = slow_user_write (dev_priv->mm.gtt_mapping, 516 goto fail;
300 page_base, page_offset,
301 user_data, page_length);
302 if (ret)
303 goto fail;
304 }
305 517
306 remain -= page_length; 518 remain -= page_length;
307 user_data += page_length; 519 user_data += page_length;
@@ -315,39 +527,284 @@ fail:
315 return ret; 527 return ret;
316} 528}
317 529
530/**
531 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
532 * the memory and maps it using kmap_atomic for copying.
533 *
534 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
535 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
536 */
318static int 537static int
319i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj, 538i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
320 struct drm_i915_gem_pwrite *args, 539 struct drm_i915_gem_pwrite *args,
321 struct drm_file *file_priv) 540 struct drm_file *file_priv)
322{ 541{
542 struct drm_i915_gem_object *obj_priv = obj->driver_private;
543 drm_i915_private_t *dev_priv = dev->dev_private;
544 ssize_t remain;
545 loff_t gtt_page_base, offset;
546 loff_t first_data_page, last_data_page, num_pages;
547 loff_t pinned_pages, i;
548 struct page **user_pages;
549 struct mm_struct *mm = current->mm;
550 int gtt_page_offset, data_page_offset, data_page_index, page_length;
323 int ret; 551 int ret;
324 loff_t offset; 552 uint64_t data_ptr = args->data_ptr;
325 ssize_t written; 553
554 remain = args->size;
555
556 /* Pin the user pages containing the data. We can't fault while
557 * holding the struct mutex, and all of the pwrite implementations
558 * want to hold it while dereferencing the user data.
559 */
560 first_data_page = data_ptr / PAGE_SIZE;
561 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
562 num_pages = last_data_page - first_data_page + 1;
563
564 user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
565 if (user_pages == NULL)
566 return -ENOMEM;
567
568 down_read(&mm->mmap_sem);
569 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
570 num_pages, 0, 0, user_pages, NULL);
571 up_read(&mm->mmap_sem);
572 if (pinned_pages < num_pages) {
573 ret = -EFAULT;
574 goto out_unpin_pages;
575 }
326 576
327 mutex_lock(&dev->struct_mutex); 577 mutex_lock(&dev->struct_mutex);
578 ret = i915_gem_object_pin(obj, 0);
579 if (ret)
580 goto out_unlock;
581
582 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
583 if (ret)
584 goto out_unpin_object;
585
586 obj_priv = obj->driver_private;
587 offset = obj_priv->gtt_offset + args->offset;
588
589 while (remain > 0) {
590 /* Operation in this page
591 *
592 * gtt_page_base = page offset within aperture
593 * gtt_page_offset = offset within page in aperture
594 * data_page_index = page number in get_user_pages return
595 * data_page_offset = offset with data_page_index page.
596 * page_length = bytes to copy for this page
597 */
598 gtt_page_base = offset & PAGE_MASK;
599 gtt_page_offset = offset & ~PAGE_MASK;
600 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
601 data_page_offset = data_ptr & ~PAGE_MASK;
602
603 page_length = remain;
604 if ((gtt_page_offset + page_length) > PAGE_SIZE)
605 page_length = PAGE_SIZE - gtt_page_offset;
606 if ((data_page_offset + page_length) > PAGE_SIZE)
607 page_length = PAGE_SIZE - data_page_offset;
608
609 ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
610 gtt_page_base, gtt_page_offset,
611 user_pages[data_page_index],
612 data_page_offset,
613 page_length);
614
615 /* If we get a fault while copying data, then (presumably) our
616 * source page isn't available. Return the error and we'll
617 * retry in the slow path.
618 */
619 if (ret)
620 goto out_unpin_object;
621
622 remain -= page_length;
623 offset += page_length;
624 data_ptr += page_length;
625 }
626
627out_unpin_object:
628 i915_gem_object_unpin(obj);
629out_unlock:
630 mutex_unlock(&dev->struct_mutex);
631out_unpin_pages:
632 for (i = 0; i < pinned_pages; i++)
633 page_cache_release(user_pages[i]);
634 kfree(user_pages);
635
636 return ret;
637}
638
639/**
640 * This is the fast shmem pwrite path, which attempts to directly
641 * copy_from_user into the kmapped pages backing the object.
642 */
643static int
644i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
645 struct drm_i915_gem_pwrite *args,
646 struct drm_file *file_priv)
647{
648 struct drm_i915_gem_object *obj_priv = obj->driver_private;
649 ssize_t remain;
650 loff_t offset, page_base;
651 char __user *user_data;
652 int page_offset, page_length;
653 int ret;
654
655 user_data = (char __user *) (uintptr_t) args->data_ptr;
656 remain = args->size;
657
658 mutex_lock(&dev->struct_mutex);
659
660 ret = i915_gem_object_get_pages(obj);
661 if (ret != 0)
662 goto fail_unlock;
328 663
329 ret = i915_gem_object_set_to_cpu_domain(obj, 1); 664 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
330 if (ret) { 665 if (ret != 0)
331 mutex_unlock(&dev->struct_mutex); 666 goto fail_put_pages;
332 return ret; 667
668 obj_priv = obj->driver_private;
669 offset = args->offset;
670 obj_priv->dirty = 1;
671
672 while (remain > 0) {
673 /* Operation in this page
674 *
675 * page_base = page offset within aperture
676 * page_offset = offset within page
677 * page_length = bytes to copy for this page
678 */
679 page_base = (offset & ~(PAGE_SIZE-1));
680 page_offset = offset & (PAGE_SIZE-1);
681 page_length = remain;
682 if ((page_offset + remain) > PAGE_SIZE)
683 page_length = PAGE_SIZE - page_offset;
684
685 ret = fast_shmem_write(obj_priv->pages,
686 page_base, page_offset,
687 user_data, page_length);
688 if (ret)
689 goto fail_put_pages;
690
691 remain -= page_length;
692 user_data += page_length;
693 offset += page_length;
333 } 694 }
334 695
696fail_put_pages:
697 i915_gem_object_put_pages(obj);
698fail_unlock:
699 mutex_unlock(&dev->struct_mutex);
700
701 return ret;
702}
703
704/**
705 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
706 * the memory and maps it using kmap_atomic for copying.
707 *
708 * This avoids taking mmap_sem for faulting on the user's address while the
709 * struct_mutex is held.
710 */
711static int
712i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
713 struct drm_i915_gem_pwrite *args,
714 struct drm_file *file_priv)
715{
716 struct drm_i915_gem_object *obj_priv = obj->driver_private;
717 struct mm_struct *mm = current->mm;
718 struct page **user_pages;
719 ssize_t remain;
720 loff_t offset, pinned_pages, i;
721 loff_t first_data_page, last_data_page, num_pages;
722 int shmem_page_index, shmem_page_offset;
723 int data_page_index, data_page_offset;
724 int page_length;
725 int ret;
726 uint64_t data_ptr = args->data_ptr;
727
728 remain = args->size;
729
730 /* Pin the user pages containing the data. We can't fault while
731 * holding the struct mutex, and all of the pwrite implementations
732 * want to hold it while dereferencing the user data.
733 */
734 first_data_page = data_ptr / PAGE_SIZE;
735 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
736 num_pages = last_data_page - first_data_page + 1;
737
738 user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
739 if (user_pages == NULL)
740 return -ENOMEM;
741
742 down_read(&mm->mmap_sem);
743 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
744 num_pages, 0, 0, user_pages, NULL);
745 up_read(&mm->mmap_sem);
746 if (pinned_pages < num_pages) {
747 ret = -EFAULT;
748 goto fail_put_user_pages;
749 }
750
751 mutex_lock(&dev->struct_mutex);
752
753 ret = i915_gem_object_get_pages(obj);
754 if (ret != 0)
755 goto fail_unlock;
756
757 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
758 if (ret != 0)
759 goto fail_put_pages;
760
761 obj_priv = obj->driver_private;
335 offset = args->offset; 762 offset = args->offset;
763 obj_priv->dirty = 1;
336 764
337 written = vfs_write(obj->filp, 765 while (remain > 0) {
338 (char __user *)(uintptr_t) args->data_ptr, 766 /* Operation in this page
339 args->size, &offset); 767 *
340 if (written != args->size) { 768 * shmem_page_index = page number within shmem file
341 mutex_unlock(&dev->struct_mutex); 769 * shmem_page_offset = offset within page in shmem file
342 if (written < 0) 770 * data_page_index = page number in get_user_pages return
343 return written; 771 * data_page_offset = offset with data_page_index page.
344 else 772 * page_length = bytes to copy for this page
345 return -EINVAL; 773 */
774 shmem_page_index = offset / PAGE_SIZE;
775 shmem_page_offset = offset & ~PAGE_MASK;
776 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
777 data_page_offset = data_ptr & ~PAGE_MASK;
778
779 page_length = remain;
780 if ((shmem_page_offset + page_length) > PAGE_SIZE)
781 page_length = PAGE_SIZE - shmem_page_offset;
782 if ((data_page_offset + page_length) > PAGE_SIZE)
783 page_length = PAGE_SIZE - data_page_offset;
784
785 ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
786 shmem_page_offset,
787 user_pages[data_page_index],
788 data_page_offset,
789 page_length);
790 if (ret)
791 goto fail_put_pages;
792
793 remain -= page_length;
794 data_ptr += page_length;
795 offset += page_length;
346 } 796 }
347 797
798fail_put_pages:
799 i915_gem_object_put_pages(obj);
800fail_unlock:
348 mutex_unlock(&dev->struct_mutex); 801 mutex_unlock(&dev->struct_mutex);
802fail_put_user_pages:
803 for (i = 0; i < pinned_pages; i++)
804 page_cache_release(user_pages[i]);
805 kfree(user_pages);
349 806
350 return 0; 807 return ret;
351} 808}
352 809
353/** 810/**
@@ -388,10 +845,19 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
388 if (obj_priv->phys_obj) 845 if (obj_priv->phys_obj)
389 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv); 846 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
390 else if (obj_priv->tiling_mode == I915_TILING_NONE && 847 else if (obj_priv->tiling_mode == I915_TILING_NONE &&
391 dev->gtt_total != 0) 848 dev->gtt_total != 0) {
392 ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv); 849 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
393 else 850 if (ret == -EFAULT) {
394 ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv); 851 ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
852 file_priv);
853 }
854 } else {
855 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
856 if (ret == -EFAULT) {
857 ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
858 file_priv);
859 }
860 }
395 861
396#if WATCH_PWRITE 862#if WATCH_PWRITE
397 if (ret) 863 if (ret)
@@ -816,29 +1282,30 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
816} 1282}
817 1283
818static void 1284static void
819i915_gem_object_free_page_list(struct drm_gem_object *obj) 1285i915_gem_object_put_pages(struct drm_gem_object *obj)
820{ 1286{
821 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1287 struct drm_i915_gem_object *obj_priv = obj->driver_private;
822 int page_count = obj->size / PAGE_SIZE; 1288 int page_count = obj->size / PAGE_SIZE;
823 int i; 1289 int i;
824 1290
825 if (obj_priv->page_list == NULL) 1291 BUG_ON(obj_priv->pages_refcount == 0);
826 return;
827 1292
1293 if (--obj_priv->pages_refcount != 0)
1294 return;
828 1295
829 for (i = 0; i < page_count; i++) 1296 for (i = 0; i < page_count; i++)
830 if (obj_priv->page_list[i] != NULL) { 1297 if (obj_priv->pages[i] != NULL) {
831 if (obj_priv->dirty) 1298 if (obj_priv->dirty)
832 set_page_dirty(obj_priv->page_list[i]); 1299 set_page_dirty(obj_priv->pages[i]);
833 mark_page_accessed(obj_priv->page_list[i]); 1300 mark_page_accessed(obj_priv->pages[i]);
834 page_cache_release(obj_priv->page_list[i]); 1301 page_cache_release(obj_priv->pages[i]);
835 } 1302 }
836 obj_priv->dirty = 0; 1303 obj_priv->dirty = 0;
837 1304
838 drm_free(obj_priv->page_list, 1305 drm_free(obj_priv->pages,
839 page_count * sizeof(struct page *), 1306 page_count * sizeof(struct page *),
840 DRM_MEM_DRIVER); 1307 DRM_MEM_DRIVER);
841 obj_priv->page_list = NULL; 1308 obj_priv->pages = NULL;
842} 1309}
843 1310
844static void 1311static void
@@ -1290,7 +1757,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
1290 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) 1757 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
1291 i915_gem_clear_fence_reg(obj); 1758 i915_gem_clear_fence_reg(obj);
1292 1759
1293 i915_gem_object_free_page_list(obj); 1760 i915_gem_object_put_pages(obj);
1294 1761
1295 if (obj_priv->gtt_space) { 1762 if (obj_priv->gtt_space) {
1296 atomic_dec(&dev->gtt_count); 1763 atomic_dec(&dev->gtt_count);
@@ -1409,7 +1876,7 @@ i915_gem_evict_everything(struct drm_device *dev)
1409} 1876}
1410 1877
1411static int 1878static int
1412i915_gem_object_get_page_list(struct drm_gem_object *obj) 1879i915_gem_object_get_pages(struct drm_gem_object *obj)
1413{ 1880{
1414 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1881 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1415 int page_count, i; 1882 int page_count, i;
@@ -1418,18 +1885,19 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj)
1418 struct page *page; 1885 struct page *page;
1419 int ret; 1886 int ret;
1420 1887
1421 if (obj_priv->page_list) 1888 if (obj_priv->pages_refcount++ != 0)
1422 return 0; 1889 return 0;
1423 1890
1424 /* Get the list of pages out of our struct file. They'll be pinned 1891 /* Get the list of pages out of our struct file. They'll be pinned
1425 * at this point until we release them. 1892 * at this point until we release them.
1426 */ 1893 */
1427 page_count = obj->size / PAGE_SIZE; 1894 page_count = obj->size / PAGE_SIZE;
1428 BUG_ON(obj_priv->page_list != NULL); 1895 BUG_ON(obj_priv->pages != NULL);
1429 obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *), 1896 obj_priv->pages = drm_calloc(page_count, sizeof(struct page *),
1430 DRM_MEM_DRIVER); 1897 DRM_MEM_DRIVER);
1431 if (obj_priv->page_list == NULL) { 1898 if (obj_priv->pages == NULL) {
1432 DRM_ERROR("Faled to allocate page list\n"); 1899 DRM_ERROR("Faled to allocate page list\n");
1900 obj_priv->pages_refcount--;
1433 return -ENOMEM; 1901 return -ENOMEM;
1434 } 1902 }
1435 1903
@@ -1440,10 +1908,10 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj)
1440 if (IS_ERR(page)) { 1908 if (IS_ERR(page)) {
1441 ret = PTR_ERR(page); 1909 ret = PTR_ERR(page);
1442 DRM_ERROR("read_mapping_page failed: %d\n", ret); 1910 DRM_ERROR("read_mapping_page failed: %d\n", ret);
1443 i915_gem_object_free_page_list(obj); 1911 i915_gem_object_put_pages(obj);
1444 return ret; 1912 return ret;
1445 } 1913 }
1446 obj_priv->page_list[i] = page; 1914 obj_priv->pages[i] = page;
1447 } 1915 }
1448 return 0; 1916 return 0;
1449} 1917}
@@ -1766,7 +2234,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
1766 DRM_INFO("Binding object of size %d at 0x%08x\n", 2234 DRM_INFO("Binding object of size %d at 0x%08x\n",
1767 obj->size, obj_priv->gtt_offset); 2235 obj->size, obj_priv->gtt_offset);
1768#endif 2236#endif
1769 ret = i915_gem_object_get_page_list(obj); 2237 ret = i915_gem_object_get_pages(obj);
1770 if (ret) { 2238 if (ret) {
1771 drm_mm_put_block(obj_priv->gtt_space); 2239 drm_mm_put_block(obj_priv->gtt_space);
1772 obj_priv->gtt_space = NULL; 2240 obj_priv->gtt_space = NULL;
@@ -1778,12 +2246,12 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
1778 * into the GTT. 2246 * into the GTT.
1779 */ 2247 */
1780 obj_priv->agp_mem = drm_agp_bind_pages(dev, 2248 obj_priv->agp_mem = drm_agp_bind_pages(dev,
1781 obj_priv->page_list, 2249 obj_priv->pages,
1782 page_count, 2250 page_count,
1783 obj_priv->gtt_offset, 2251 obj_priv->gtt_offset,
1784 obj_priv->agp_type); 2252 obj_priv->agp_type);
1785 if (obj_priv->agp_mem == NULL) { 2253 if (obj_priv->agp_mem == NULL) {
1786 i915_gem_object_free_page_list(obj); 2254 i915_gem_object_put_pages(obj);
1787 drm_mm_put_block(obj_priv->gtt_space); 2255 drm_mm_put_block(obj_priv->gtt_space);
1788 obj_priv->gtt_space = NULL; 2256 obj_priv->gtt_space = NULL;
1789 return -ENOMEM; 2257 return -ENOMEM;
@@ -1810,10 +2278,10 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
1810 * to GPU, and we can ignore the cache flush because it'll happen 2278 * to GPU, and we can ignore the cache flush because it'll happen
1811 * again at bind time. 2279 * again at bind time.
1812 */ 2280 */
1813 if (obj_priv->page_list == NULL) 2281 if (obj_priv->pages == NULL)
1814 return; 2282 return;
1815 2283
1816 drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE); 2284 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
1817} 2285}
1818 2286
1819/** Flushes any GPU write domain for the object if it's dirty. */ 2287/** Flushes any GPU write domain for the object if it's dirty. */
@@ -1913,7 +2381,6 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
1913static int 2381static int
1914i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) 2382i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
1915{ 2383{
1916 struct drm_device *dev = obj->dev;
1917 int ret; 2384 int ret;
1918 2385
1919 i915_gem_object_flush_gpu_write_domain(obj); 2386 i915_gem_object_flush_gpu_write_domain(obj);
@@ -1932,7 +2399,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
1932 /* Flush the CPU cache if it's still invalid. */ 2399 /* Flush the CPU cache if it's still invalid. */
1933 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { 2400 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
1934 i915_gem_clflush_object(obj); 2401 i915_gem_clflush_object(obj);
1935 drm_agp_chipset_flush(dev);
1936 2402
1937 obj->read_domains |= I915_GEM_DOMAIN_CPU; 2403 obj->read_domains |= I915_GEM_DOMAIN_CPU;
1938 } 2404 }
@@ -2144,7 +2610,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
2144static void 2610static void
2145i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) 2611i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
2146{ 2612{
2147 struct drm_device *dev = obj->dev;
2148 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2613 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2149 2614
2150 if (!obj_priv->page_cpu_valid) 2615 if (!obj_priv->page_cpu_valid)
@@ -2158,9 +2623,8 @@ i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
2158 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) { 2623 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
2159 if (obj_priv->page_cpu_valid[i]) 2624 if (obj_priv->page_cpu_valid[i])
2160 continue; 2625 continue;
2161 drm_clflush_pages(obj_priv->page_list + i, 1); 2626 drm_clflush_pages(obj_priv->pages + i, 1);
2162 } 2627 }
2163 drm_agp_chipset_flush(dev);
2164 } 2628 }
2165 2629
2166 /* Free the page_cpu_valid mappings which are now stale, whether 2630 /* Free the page_cpu_valid mappings which are now stale, whether
@@ -2224,7 +2688,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
2224 if (obj_priv->page_cpu_valid[i]) 2688 if (obj_priv->page_cpu_valid[i])
2225 continue; 2689 continue;
2226 2690
2227 drm_clflush_pages(obj_priv->page_list + i, 1); 2691 drm_clflush_pages(obj_priv->pages + i, 1);
2228 2692
2229 obj_priv->page_cpu_valid[i] = 1; 2693 obj_priv->page_cpu_valid[i] = 1;
2230 } 2694 }
@@ -2245,12 +2709,11 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
2245static int 2709static int
2246i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, 2710i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2247 struct drm_file *file_priv, 2711 struct drm_file *file_priv,
2248 struct drm_i915_gem_exec_object *entry) 2712 struct drm_i915_gem_exec_object *entry,
2713 struct drm_i915_gem_relocation_entry *relocs)
2249{ 2714{
2250 struct drm_device *dev = obj->dev; 2715 struct drm_device *dev = obj->dev;
2251 drm_i915_private_t *dev_priv = dev->dev_private; 2716 drm_i915_private_t *dev_priv = dev->dev_private;
2252 struct drm_i915_gem_relocation_entry reloc;
2253 struct drm_i915_gem_relocation_entry __user *relocs;
2254 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2717 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2255 int i, ret; 2718 int i, ret;
2256 void __iomem *reloc_page; 2719 void __iomem *reloc_page;
@@ -2262,25 +2725,18 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2262 2725
2263 entry->offset = obj_priv->gtt_offset; 2726 entry->offset = obj_priv->gtt_offset;
2264 2727
2265 relocs = (struct drm_i915_gem_relocation_entry __user *)
2266 (uintptr_t) entry->relocs_ptr;
2267 /* Apply the relocations, using the GTT aperture to avoid cache 2728 /* Apply the relocations, using the GTT aperture to avoid cache
2268 * flushing requirements. 2729 * flushing requirements.
2269 */ 2730 */
2270 for (i = 0; i < entry->relocation_count; i++) { 2731 for (i = 0; i < entry->relocation_count; i++) {
2732 struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
2271 struct drm_gem_object *target_obj; 2733 struct drm_gem_object *target_obj;
2272 struct drm_i915_gem_object *target_obj_priv; 2734 struct drm_i915_gem_object *target_obj_priv;
2273 uint32_t reloc_val, reloc_offset; 2735 uint32_t reloc_val, reloc_offset;
2274 uint32_t __iomem *reloc_entry; 2736 uint32_t __iomem *reloc_entry;
2275 2737
2276 ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
2277 if (ret != 0) {
2278 i915_gem_object_unpin(obj);
2279 return ret;
2280 }
2281
2282 target_obj = drm_gem_object_lookup(obj->dev, file_priv, 2738 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
2283 reloc.target_handle); 2739 reloc->target_handle);
2284 if (target_obj == NULL) { 2740 if (target_obj == NULL) {
2285 i915_gem_object_unpin(obj); 2741 i915_gem_object_unpin(obj);
2286 return -EBADF; 2742 return -EBADF;
@@ -2292,53 +2748,53 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2292 */ 2748 */
2293 if (target_obj_priv->gtt_space == NULL) { 2749 if (target_obj_priv->gtt_space == NULL) {
2294 DRM_ERROR("No GTT space found for object %d\n", 2750 DRM_ERROR("No GTT space found for object %d\n",
2295 reloc.target_handle); 2751 reloc->target_handle);
2296 drm_gem_object_unreference(target_obj); 2752 drm_gem_object_unreference(target_obj);
2297 i915_gem_object_unpin(obj); 2753 i915_gem_object_unpin(obj);
2298 return -EINVAL; 2754 return -EINVAL;
2299 } 2755 }
2300 2756
2301 if (reloc.offset > obj->size - 4) { 2757 if (reloc->offset > obj->size - 4) {
2302 DRM_ERROR("Relocation beyond object bounds: " 2758 DRM_ERROR("Relocation beyond object bounds: "
2303 "obj %p target %d offset %d size %d.\n", 2759 "obj %p target %d offset %d size %d.\n",
2304 obj, reloc.target_handle, 2760 obj, reloc->target_handle,
2305 (int) reloc.offset, (int) obj->size); 2761 (int) reloc->offset, (int) obj->size);
2306 drm_gem_object_unreference(target_obj); 2762 drm_gem_object_unreference(target_obj);
2307 i915_gem_object_unpin(obj); 2763 i915_gem_object_unpin(obj);
2308 return -EINVAL; 2764 return -EINVAL;
2309 } 2765 }
2310 if (reloc.offset & 3) { 2766 if (reloc->offset & 3) {
2311 DRM_ERROR("Relocation not 4-byte aligned: " 2767 DRM_ERROR("Relocation not 4-byte aligned: "
2312 "obj %p target %d offset %d.\n", 2768 "obj %p target %d offset %d.\n",
2313 obj, reloc.target_handle, 2769 obj, reloc->target_handle,
2314 (int) reloc.offset); 2770 (int) reloc->offset);
2315 drm_gem_object_unreference(target_obj); 2771 drm_gem_object_unreference(target_obj);
2316 i915_gem_object_unpin(obj); 2772 i915_gem_object_unpin(obj);
2317 return -EINVAL; 2773 return -EINVAL;
2318 } 2774 }
2319 2775
2320 if (reloc.write_domain & I915_GEM_DOMAIN_CPU || 2776 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
2321 reloc.read_domains & I915_GEM_DOMAIN_CPU) { 2777 reloc->read_domains & I915_GEM_DOMAIN_CPU) {
2322 DRM_ERROR("reloc with read/write CPU domains: " 2778 DRM_ERROR("reloc with read/write CPU domains: "
2323 "obj %p target %d offset %d " 2779 "obj %p target %d offset %d "
2324 "read %08x write %08x", 2780 "read %08x write %08x",
2325 obj, reloc.target_handle, 2781 obj, reloc->target_handle,
2326 (int) reloc.offset, 2782 (int) reloc->offset,
2327 reloc.read_domains, 2783 reloc->read_domains,
2328 reloc.write_domain); 2784 reloc->write_domain);
2329 drm_gem_object_unreference(target_obj); 2785 drm_gem_object_unreference(target_obj);
2330 i915_gem_object_unpin(obj); 2786 i915_gem_object_unpin(obj);
2331 return -EINVAL; 2787 return -EINVAL;
2332 } 2788 }
2333 2789
2334 if (reloc.write_domain && target_obj->pending_write_domain && 2790 if (reloc->write_domain && target_obj->pending_write_domain &&
2335 reloc.write_domain != target_obj->pending_write_domain) { 2791 reloc->write_domain != target_obj->pending_write_domain) {
2336 DRM_ERROR("Write domain conflict: " 2792 DRM_ERROR("Write domain conflict: "
2337 "obj %p target %d offset %d " 2793 "obj %p target %d offset %d "
2338 "new %08x old %08x\n", 2794 "new %08x old %08x\n",
2339 obj, reloc.target_handle, 2795 obj, reloc->target_handle,
2340 (int) reloc.offset, 2796 (int) reloc->offset,
2341 reloc.write_domain, 2797 reloc->write_domain,
2342 target_obj->pending_write_domain); 2798 target_obj->pending_write_domain);
2343 drm_gem_object_unreference(target_obj); 2799 drm_gem_object_unreference(target_obj);
2344 i915_gem_object_unpin(obj); 2800 i915_gem_object_unpin(obj);
@@ -2351,22 +2807,22 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2351 "presumed %08x delta %08x\n", 2807 "presumed %08x delta %08x\n",
2352 __func__, 2808 __func__,
2353 obj, 2809 obj,
2354 (int) reloc.offset, 2810 (int) reloc->offset,
2355 (int) reloc.target_handle, 2811 (int) reloc->target_handle,
2356 (int) reloc.read_domains, 2812 (int) reloc->read_domains,
2357 (int) reloc.write_domain, 2813 (int) reloc->write_domain,
2358 (int) target_obj_priv->gtt_offset, 2814 (int) target_obj_priv->gtt_offset,
2359 (int) reloc.presumed_offset, 2815 (int) reloc->presumed_offset,
2360 reloc.delta); 2816 reloc->delta);
2361#endif 2817#endif
2362 2818
2363 target_obj->pending_read_domains |= reloc.read_domains; 2819 target_obj->pending_read_domains |= reloc->read_domains;
2364 target_obj->pending_write_domain |= reloc.write_domain; 2820 target_obj->pending_write_domain |= reloc->write_domain;
2365 2821
2366 /* If the relocation already has the right value in it, no 2822 /* If the relocation already has the right value in it, no
2367 * more work needs to be done. 2823 * more work needs to be done.
2368 */ 2824 */
2369 if (target_obj_priv->gtt_offset == reloc.presumed_offset) { 2825 if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
2370 drm_gem_object_unreference(target_obj); 2826 drm_gem_object_unreference(target_obj);
2371 continue; 2827 continue;
2372 } 2828 }
@@ -2381,32 +2837,26 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2381 /* Map the page containing the relocation we're going to 2837 /* Map the page containing the relocation we're going to
2382 * perform. 2838 * perform.
2383 */ 2839 */
2384 reloc_offset = obj_priv->gtt_offset + reloc.offset; 2840 reloc_offset = obj_priv->gtt_offset + reloc->offset;
2385 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, 2841 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
2386 (reloc_offset & 2842 (reloc_offset &
2387 ~(PAGE_SIZE - 1))); 2843 ~(PAGE_SIZE - 1)));
2388 reloc_entry = (uint32_t __iomem *)(reloc_page + 2844 reloc_entry = (uint32_t __iomem *)(reloc_page +
2389 (reloc_offset & (PAGE_SIZE - 1))); 2845 (reloc_offset & (PAGE_SIZE - 1)));
2390 reloc_val = target_obj_priv->gtt_offset + reloc.delta; 2846 reloc_val = target_obj_priv->gtt_offset + reloc->delta;
2391 2847
2392#if WATCH_BUF 2848#if WATCH_BUF
2393 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n", 2849 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
2394 obj, (unsigned int) reloc.offset, 2850 obj, (unsigned int) reloc->offset,
2395 readl(reloc_entry), reloc_val); 2851 readl(reloc_entry), reloc_val);
2396#endif 2852#endif
2397 writel(reloc_val, reloc_entry); 2853 writel(reloc_val, reloc_entry);
2398 io_mapping_unmap_atomic(reloc_page); 2854 io_mapping_unmap_atomic(reloc_page);
2399 2855
2400 /* Write the updated presumed offset for this entry back out 2856 /* The updated presumed offset for this entry will be
2401 * to the user. 2857 * copied back out to the user.
2402 */ 2858 */
2403 reloc.presumed_offset = target_obj_priv->gtt_offset; 2859 reloc->presumed_offset = target_obj_priv->gtt_offset;
2404 ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
2405 if (ret != 0) {
2406 drm_gem_object_unreference(target_obj);
2407 i915_gem_object_unpin(obj);
2408 return ret;
2409 }
2410 2860
2411 drm_gem_object_unreference(target_obj); 2861 drm_gem_object_unreference(target_obj);
2412 } 2862 }
@@ -2423,11 +2873,10 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2423static int 2873static int
2424i915_dispatch_gem_execbuffer(struct drm_device *dev, 2874i915_dispatch_gem_execbuffer(struct drm_device *dev,
2425 struct drm_i915_gem_execbuffer *exec, 2875 struct drm_i915_gem_execbuffer *exec,
2876 struct drm_clip_rect *cliprects,
2426 uint64_t exec_offset) 2877 uint64_t exec_offset)
2427{ 2878{
2428 drm_i915_private_t *dev_priv = dev->dev_private; 2879 drm_i915_private_t *dev_priv = dev->dev_private;
2429 struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
2430 (uintptr_t) exec->cliprects_ptr;
2431 int nbox = exec->num_cliprects; 2880 int nbox = exec->num_cliprects;
2432 int i = 0, count; 2881 int i = 0, count;
2433 uint32_t exec_start, exec_len; 2882 uint32_t exec_start, exec_len;
@@ -2448,7 +2897,7 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
2448 2897
2449 for (i = 0; i < count; i++) { 2898 for (i = 0; i < count; i++) {
2450 if (i < nbox) { 2899 if (i < nbox) {
2451 int ret = i915_emit_box(dev, boxes, i, 2900 int ret = i915_emit_box(dev, cliprects, i,
2452 exec->DR1, exec->DR4); 2901 exec->DR1, exec->DR4);
2453 if (ret) 2902 if (ret)
2454 return ret; 2903 return ret;
@@ -2504,6 +2953,75 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
2504 return ret; 2953 return ret;
2505} 2954}
2506 2955
2956static int
2957i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
2958 uint32_t buffer_count,
2959 struct drm_i915_gem_relocation_entry **relocs)
2960{
2961 uint32_t reloc_count = 0, reloc_index = 0, i;
2962 int ret;
2963
2964 *relocs = NULL;
2965 for (i = 0; i < buffer_count; i++) {
2966 if (reloc_count + exec_list[i].relocation_count < reloc_count)
2967 return -EINVAL;
2968 reloc_count += exec_list[i].relocation_count;
2969 }
2970
2971 *relocs = drm_calloc(reloc_count, sizeof(**relocs), DRM_MEM_DRIVER);
2972 if (*relocs == NULL)
2973 return -ENOMEM;
2974
2975 for (i = 0; i < buffer_count; i++) {
2976 struct drm_i915_gem_relocation_entry __user *user_relocs;
2977
2978 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
2979
2980 ret = copy_from_user(&(*relocs)[reloc_index],
2981 user_relocs,
2982 exec_list[i].relocation_count *
2983 sizeof(**relocs));
2984 if (ret != 0) {
2985 drm_free(*relocs, reloc_count * sizeof(**relocs),
2986 DRM_MEM_DRIVER);
2987 *relocs = NULL;
2988 return ret;
2989 }
2990
2991 reloc_index += exec_list[i].relocation_count;
2992 }
2993
2994 return ret;
2995}
2996
2997static int
2998i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
2999 uint32_t buffer_count,
3000 struct drm_i915_gem_relocation_entry *relocs)
3001{
3002 uint32_t reloc_count = 0, i;
3003 int ret;
3004
3005 for (i = 0; i < buffer_count; i++) {
3006 struct drm_i915_gem_relocation_entry __user *user_relocs;
3007
3008 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3009
3010 if (ret == 0) {
3011 ret = copy_to_user(user_relocs,
3012 &relocs[reloc_count],
3013 exec_list[i].relocation_count *
3014 sizeof(*relocs));
3015 }
3016
3017 reloc_count += exec_list[i].relocation_count;
3018 }
3019
3020 drm_free(relocs, reloc_count * sizeof(*relocs), DRM_MEM_DRIVER);
3021
3022 return ret;
3023}
3024
2507int 3025int
2508i915_gem_execbuffer(struct drm_device *dev, void *data, 3026i915_gem_execbuffer(struct drm_device *dev, void *data,
2509 struct drm_file *file_priv) 3027 struct drm_file *file_priv)
@@ -2515,9 +3033,11 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2515 struct drm_gem_object **object_list = NULL; 3033 struct drm_gem_object **object_list = NULL;
2516 struct drm_gem_object *batch_obj; 3034 struct drm_gem_object *batch_obj;
2517 struct drm_i915_gem_object *obj_priv; 3035 struct drm_i915_gem_object *obj_priv;
2518 int ret, i, pinned = 0; 3036 struct drm_clip_rect *cliprects = NULL;
3037 struct drm_i915_gem_relocation_entry *relocs;
3038 int ret, ret2, i, pinned = 0;
2519 uint64_t exec_offset; 3039 uint64_t exec_offset;
2520 uint32_t seqno, flush_domains; 3040 uint32_t seqno, flush_domains, reloc_index;
2521 int pin_tries; 3041 int pin_tries;
2522 3042
2523#if WATCH_EXEC 3043#if WATCH_EXEC
@@ -2551,6 +3071,28 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2551 goto pre_mutex_err; 3071 goto pre_mutex_err;
2552 } 3072 }
2553 3073
3074 if (args->num_cliprects != 0) {
3075 cliprects = drm_calloc(args->num_cliprects, sizeof(*cliprects),
3076 DRM_MEM_DRIVER);
3077 if (cliprects == NULL)
3078 goto pre_mutex_err;
3079
3080 ret = copy_from_user(cliprects,
3081 (struct drm_clip_rect __user *)
3082 (uintptr_t) args->cliprects_ptr,
3083 sizeof(*cliprects) * args->num_cliprects);
3084 if (ret != 0) {
3085 DRM_ERROR("copy %d cliprects failed: %d\n",
3086 args->num_cliprects, ret);
3087 goto pre_mutex_err;
3088 }
3089 }
3090
3091 ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
3092 &relocs);
3093 if (ret != 0)
3094 goto pre_mutex_err;
3095
2554 mutex_lock(&dev->struct_mutex); 3096 mutex_lock(&dev->struct_mutex);
2555 3097
2556 i915_verify_inactive(dev, __FILE__, __LINE__); 3098 i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -2593,15 +3135,19 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2593 /* Pin and relocate */ 3135 /* Pin and relocate */
2594 for (pin_tries = 0; ; pin_tries++) { 3136 for (pin_tries = 0; ; pin_tries++) {
2595 ret = 0; 3137 ret = 0;
3138 reloc_index = 0;
3139
2596 for (i = 0; i < args->buffer_count; i++) { 3140 for (i = 0; i < args->buffer_count; i++) {
2597 object_list[i]->pending_read_domains = 0; 3141 object_list[i]->pending_read_domains = 0;
2598 object_list[i]->pending_write_domain = 0; 3142 object_list[i]->pending_write_domain = 0;
2599 ret = i915_gem_object_pin_and_relocate(object_list[i], 3143 ret = i915_gem_object_pin_and_relocate(object_list[i],
2600 file_priv, 3144 file_priv,
2601 &exec_list[i]); 3145 &exec_list[i],
3146 &relocs[reloc_index]);
2602 if (ret) 3147 if (ret)
2603 break; 3148 break;
2604 pinned = i + 1; 3149 pinned = i + 1;
3150 reloc_index += exec_list[i].relocation_count;
2605 } 3151 }
2606 /* success */ 3152 /* success */
2607 if (ret == 0) 3153 if (ret == 0)
@@ -2687,7 +3233,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2687#endif 3233#endif
2688 3234
2689 /* Exec the batchbuffer */ 3235 /* Exec the batchbuffer */
2690 ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset); 3236 ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
2691 if (ret) { 3237 if (ret) {
2692 DRM_ERROR("dispatch failed %d\n", ret); 3238 DRM_ERROR("dispatch failed %d\n", ret);
2693 goto err; 3239 goto err;
@@ -2751,11 +3297,27 @@ err:
2751 args->buffer_count, ret); 3297 args->buffer_count, ret);
2752 } 3298 }
2753 3299
3300 /* Copy the updated relocations out regardless of current error
3301 * state. Failure to update the relocs would mean that the next
3302 * time userland calls execbuf, it would do so with presumed offset
3303 * state that didn't match the actual object state.
3304 */
3305 ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
3306 relocs);
3307 if (ret2 != 0) {
3308 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
3309
3310 if (ret == 0)
3311 ret = ret2;
3312 }
3313
2754pre_mutex_err: 3314pre_mutex_err:
2755 drm_free(object_list, sizeof(*object_list) * args->buffer_count, 3315 drm_free(object_list, sizeof(*object_list) * args->buffer_count,
2756 DRM_MEM_DRIVER); 3316 DRM_MEM_DRIVER);
2757 drm_free(exec_list, sizeof(*exec_list) * args->buffer_count, 3317 drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
2758 DRM_MEM_DRIVER); 3318 DRM_MEM_DRIVER);
3319 drm_free(cliprects, sizeof(*cliprects) * args->num_cliprects,
3320 DRM_MEM_DRIVER);
2759 3321
2760 return ret; 3322 return ret;
2761} 3323}
@@ -3192,7 +3754,7 @@ i915_gem_init_hws(struct drm_device *dev)
3192 3754
3193 dev_priv->status_gfx_addr = obj_priv->gtt_offset; 3755 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
3194 3756
3195 dev_priv->hw_status_page = kmap(obj_priv->page_list[0]); 3757 dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
3196 if (dev_priv->hw_status_page == NULL) { 3758 if (dev_priv->hw_status_page == NULL) {
3197 DRM_ERROR("Failed to map status page.\n"); 3759 DRM_ERROR("Failed to map status page.\n");
3198 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 3760 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
@@ -3222,7 +3784,7 @@ i915_gem_cleanup_hws(struct drm_device *dev)
3222 obj = dev_priv->hws_obj; 3784 obj = dev_priv->hws_obj;
3223 obj_priv = obj->driver_private; 3785 obj_priv = obj->driver_private;
3224 3786
3225 kunmap(obj_priv->page_list[0]); 3787 kunmap(obj_priv->pages[0]);
3226 i915_gem_object_unpin(obj); 3788 i915_gem_object_unpin(obj);
3227 drm_gem_object_unreference(obj); 3789 drm_gem_object_unreference(obj);
3228 dev_priv->hws_obj = NULL; 3790 dev_priv->hws_obj = NULL;
@@ -3525,20 +4087,20 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
3525 if (!obj_priv->phys_obj) 4087 if (!obj_priv->phys_obj)
3526 return; 4088 return;
3527 4089
3528 ret = i915_gem_object_get_page_list(obj); 4090 ret = i915_gem_object_get_pages(obj);
3529 if (ret) 4091 if (ret)
3530 goto out; 4092 goto out;
3531 4093
3532 page_count = obj->size / PAGE_SIZE; 4094 page_count = obj->size / PAGE_SIZE;
3533 4095
3534 for (i = 0; i < page_count; i++) { 4096 for (i = 0; i < page_count; i++) {
3535 char *dst = kmap_atomic(obj_priv->page_list[i], KM_USER0); 4097 char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
3536 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); 4098 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
3537 4099
3538 memcpy(dst, src, PAGE_SIZE); 4100 memcpy(dst, src, PAGE_SIZE);
3539 kunmap_atomic(dst, KM_USER0); 4101 kunmap_atomic(dst, KM_USER0);
3540 } 4102 }
3541 drm_clflush_pages(obj_priv->page_list, page_count); 4103 drm_clflush_pages(obj_priv->pages, page_count);
3542 drm_agp_chipset_flush(dev); 4104 drm_agp_chipset_flush(dev);
3543out: 4105out:
3544 obj_priv->phys_obj->cur_obj = NULL; 4106 obj_priv->phys_obj->cur_obj = NULL;
@@ -3581,7 +4143,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
3581 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; 4143 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
3582 obj_priv->phys_obj->cur_obj = obj; 4144 obj_priv->phys_obj->cur_obj = obj;
3583 4145
3584 ret = i915_gem_object_get_page_list(obj); 4146 ret = i915_gem_object_get_pages(obj);
3585 if (ret) { 4147 if (ret) {
3586 DRM_ERROR("failed to get page list\n"); 4148 DRM_ERROR("failed to get page list\n");
3587 goto out; 4149 goto out;
@@ -3590,7 +4152,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
3590 page_count = obj->size / PAGE_SIZE; 4152 page_count = obj->size / PAGE_SIZE;
3591 4153
3592 for (i = 0; i < page_count; i++) { 4154 for (i = 0; i < page_count; i++) {
3593 char *src = kmap_atomic(obj_priv->page_list[i], KM_USER0); 4155 char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
3594 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); 4156 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
3595 4157
3596 memcpy(dst, src, PAGE_SIZE); 4158 memcpy(dst, src, PAGE_SIZE);
diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c
new file mode 100644
index 000000000000..455ec970b385
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c
@@ -0,0 +1,257 @@
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
29#include <linux/seq_file.h>
30#include "drmP.h"
31#include "drm.h"
32#include "i915_drm.h"
33#include "i915_drv.h"
34
35#define DRM_I915_RING_DEBUG 1
36
37
38#if defined(CONFIG_DEBUG_FS)
39
40#define ACTIVE_LIST 1
41#define FLUSHING_LIST 2
42#define INACTIVE_LIST 3
43
44static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
45{
46 if (obj_priv->user_pin_count > 0)
47 return "P";
48 else if (obj_priv->pin_count > 0)
49 return "p";
50 else
51 return " ";
52}
53
54static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
55{
56 switch (obj_priv->tiling_mode) {
57 default:
58 case I915_TILING_NONE: return " ";
59 case I915_TILING_X: return "X";
60 case I915_TILING_Y: return "Y";
61 }
62}
63
64static int i915_gem_object_list_info(struct seq_file *m, void *data)
65{
66 struct drm_info_node *node = (struct drm_info_node *) m->private;
67 uintptr_t list = (uintptr_t) node->info_ent->data;
68 struct list_head *head;
69 struct drm_device *dev = node->minor->dev;
70 drm_i915_private_t *dev_priv = dev->dev_private;
71 struct drm_i915_gem_object *obj_priv;
72
73 switch (list) {
74 case ACTIVE_LIST:
75 seq_printf(m, "Active:\n");
76 head = &dev_priv->mm.active_list;
77 break;
78 case INACTIVE_LIST:
79 seq_printf(m, "Inctive:\n");
80 head = &dev_priv->mm.inactive_list;
81 break;
82 case FLUSHING_LIST:
83 seq_printf(m, "Flushing:\n");
84 head = &dev_priv->mm.flushing_list;
85 break;
86 default:
87 DRM_INFO("Ooops, unexpected list\n");
88 return 0;
89 }
90
91 list_for_each_entry(obj_priv, head, list)
92 {
93 struct drm_gem_object *obj = obj_priv->obj;
94
95 seq_printf(m, " %p: %s %08x %08x %d",
96 obj,
97 get_pin_flag(obj_priv),
98 obj->read_domains, obj->write_domain,
99 obj_priv->last_rendering_seqno);
100
101 if (obj->name)
102 seq_printf(m, " (name: %d)", obj->name);
103 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
104 seq_printf(m, " (fence: %d\n", obj_priv->fence_reg);
105 seq_printf(m, "\n");
106 }
107 return 0;
108}
109
110static int i915_gem_request_info(struct seq_file *m, void *data)
111{
112 struct drm_info_node *node = (struct drm_info_node *) m->private;
113 struct drm_device *dev = node->minor->dev;
114 drm_i915_private_t *dev_priv = dev->dev_private;
115 struct drm_i915_gem_request *gem_request;
116
117 seq_printf(m, "Request:\n");
118 list_for_each_entry(gem_request, &dev_priv->mm.request_list, list) {
119 seq_printf(m, " %d @ %d\n",
120 gem_request->seqno,
121 (int) (jiffies - gem_request->emitted_jiffies));
122 }
123 return 0;
124}
125
126static int i915_gem_seqno_info(struct seq_file *m, void *data)
127{
128 struct drm_info_node *node = (struct drm_info_node *) m->private;
129 struct drm_device *dev = node->minor->dev;
130 drm_i915_private_t *dev_priv = dev->dev_private;
131
132 if (dev_priv->hw_status_page != NULL) {
133 seq_printf(m, "Current sequence: %d\n",
134 i915_get_gem_seqno(dev));
135 } else {
136 seq_printf(m, "Current sequence: hws uninitialized\n");
137 }
138 seq_printf(m, "Waiter sequence: %d\n",
139 dev_priv->mm.waiting_gem_seqno);
140 seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
141 return 0;
142}
143
144
145static int i915_interrupt_info(struct seq_file *m, void *data)
146{
147 struct drm_info_node *node = (struct drm_info_node *) m->private;
148 struct drm_device *dev = node->minor->dev;
149 drm_i915_private_t *dev_priv = dev->dev_private;
150
151 seq_printf(m, "Interrupt enable: %08x\n",
152 I915_READ(IER));
153 seq_printf(m, "Interrupt identity: %08x\n",
154 I915_READ(IIR));
155 seq_printf(m, "Interrupt mask: %08x\n",
156 I915_READ(IMR));
157 seq_printf(m, "Pipe A stat: %08x\n",
158 I915_READ(PIPEASTAT));
159 seq_printf(m, "Pipe B stat: %08x\n",
160 I915_READ(PIPEBSTAT));
161 seq_printf(m, "Interrupts received: %d\n",
162 atomic_read(&dev_priv->irq_received));
163 if (dev_priv->hw_status_page != NULL) {
164 seq_printf(m, "Current sequence: %d\n",
165 i915_get_gem_seqno(dev));
166 } else {
167 seq_printf(m, "Current sequence: hws uninitialized\n");
168 }
169 seq_printf(m, "Waiter sequence: %d\n",
170 dev_priv->mm.waiting_gem_seqno);
171 seq_printf(m, "IRQ sequence: %d\n",
172 dev_priv->mm.irq_gem_seqno);
173 return 0;
174}
175
176static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
177{
178 struct drm_info_node *node = (struct drm_info_node *) m->private;
179 struct drm_device *dev = node->minor->dev;
180 drm_i915_private_t *dev_priv = dev->dev_private;
181 int i;
182
183 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
184 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
185 for (i = 0; i < dev_priv->num_fence_regs; i++) {
186 struct drm_gem_object *obj = dev_priv->fence_regs[i].obj;
187
188 if (obj == NULL) {
189 seq_printf(m, "Fenced object[%2d] = unused\n", i);
190 } else {
191 struct drm_i915_gem_object *obj_priv;
192
193 obj_priv = obj->driver_private;
194 seq_printf(m, "Fenced object[%2d] = %p: %s "
195 "%08x %08zx %08x %s %08x %08x %d",
196 i, obj, get_pin_flag(obj_priv),
197 obj_priv->gtt_offset,
198 obj->size, obj_priv->stride,
199 get_tiling_flag(obj_priv),
200 obj->read_domains, obj->write_domain,
201 obj_priv->last_rendering_seqno);
202 if (obj->name)
203 seq_printf(m, " (name: %d)", obj->name);
204 seq_printf(m, "\n");
205 }
206 }
207
208 return 0;
209}
210
211static int i915_hws_info(struct seq_file *m, void *data)
212{
213 struct drm_info_node *node = (struct drm_info_node *) m->private;
214 struct drm_device *dev = node->minor->dev;
215 drm_i915_private_t *dev_priv = dev->dev_private;
216 int i;
217 volatile u32 *hws;
218
219 hws = (volatile u32 *)dev_priv->hw_status_page;
220 if (hws == NULL)
221 return 0;
222
223 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
224 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
225 i * 4,
226 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
227 }
228 return 0;
229}
230
231static struct drm_info_list i915_gem_debugfs_list[] = {
232 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
233 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
234 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
235 {"i915_gem_request", i915_gem_request_info, 0},
236 {"i915_gem_seqno", i915_gem_seqno_info, 0},
237 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
238 {"i915_gem_interrupt", i915_interrupt_info, 0},
239 {"i915_gem_hws", i915_hws_info, 0},
240};
241#define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list)
242
243int i915_gem_debugfs_init(struct drm_minor *minor)
244{
245 return drm_debugfs_create_files(i915_gem_debugfs_list,
246 I915_GEM_DEBUGFS_ENTRIES,
247 minor->debugfs_root, minor);
248}
249
250void i915_gem_debugfs_cleanup(struct drm_minor *minor)
251{
252 drm_debugfs_remove_files(i915_gem_debugfs_list,
253 I915_GEM_DEBUGFS_ENTRIES, minor);
254}
255
256#endif /* CONFIG_DEBUG_FS */
257
diff --git a/drivers/gpu/drm/i915/i915_gem_proc.c b/drivers/gpu/drm/i915/i915_gem_proc.c
deleted file mode 100644
index 4d1b9de0cd8b..000000000000
--- a/drivers/gpu/drm/i915/i915_gem_proc.c
+++ /dev/null
@@ -1,334 +0,0 @@
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
29#include "drmP.h"
30#include "drm.h"
31#include "i915_drm.h"
32#include "i915_drv.h"
33
34static int i915_gem_active_info(char *buf, char **start, off_t offset,
35 int request, int *eof, void *data)
36{
37 struct drm_minor *minor = (struct drm_minor *) data;
38 struct drm_device *dev = minor->dev;
39 drm_i915_private_t *dev_priv = dev->dev_private;
40 struct drm_i915_gem_object *obj_priv;
41 int len = 0;
42
43 if (offset > DRM_PROC_LIMIT) {
44 *eof = 1;
45 return 0;
46 }
47
48 *start = &buf[offset];
49 *eof = 0;
50 DRM_PROC_PRINT("Active:\n");
51 list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
52 list)
53 {
54 struct drm_gem_object *obj = obj_priv->obj;
55 if (obj->name) {
56 DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
57 obj, obj->name,
58 obj->read_domains, obj->write_domain,
59 obj_priv->last_rendering_seqno);
60 } else {
61 DRM_PROC_PRINT(" %p: %08x %08x %d\n",
62 obj,
63 obj->read_domains, obj->write_domain,
64 obj_priv->last_rendering_seqno);
65 }
66 }
67 if (len > request + offset)
68 return request;
69 *eof = 1;
70 return len - offset;
71}
72
73static int i915_gem_flushing_info(char *buf, char **start, off_t offset,
74 int request, int *eof, void *data)
75{
76 struct drm_minor *minor = (struct drm_minor *) data;
77 struct drm_device *dev = minor->dev;
78 drm_i915_private_t *dev_priv = dev->dev_private;
79 struct drm_i915_gem_object *obj_priv;
80 int len = 0;
81
82 if (offset > DRM_PROC_LIMIT) {
83 *eof = 1;
84 return 0;
85 }
86
87 *start = &buf[offset];
88 *eof = 0;
89 DRM_PROC_PRINT("Flushing:\n");
90 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
91 list)
92 {
93 struct drm_gem_object *obj = obj_priv->obj;
94 if (obj->name) {
95 DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
96 obj, obj->name,
97 obj->read_domains, obj->write_domain,
98 obj_priv->last_rendering_seqno);
99 } else {
100 DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
101 obj->read_domains, obj->write_domain,
102 obj_priv->last_rendering_seqno);
103 }
104 }
105 if (len > request + offset)
106 return request;
107 *eof = 1;
108 return len - offset;
109}
110
111static int i915_gem_inactive_info(char *buf, char **start, off_t offset,
112 int request, int *eof, void *data)
113{
114 struct drm_minor *minor = (struct drm_minor *) data;
115 struct drm_device *dev = minor->dev;
116 drm_i915_private_t *dev_priv = dev->dev_private;
117 struct drm_i915_gem_object *obj_priv;
118 int len = 0;
119
120 if (offset > DRM_PROC_LIMIT) {
121 *eof = 1;
122 return 0;
123 }
124
125 *start = &buf[offset];
126 *eof = 0;
127 DRM_PROC_PRINT("Inactive:\n");
128 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list,
129 list)
130 {
131 struct drm_gem_object *obj = obj_priv->obj;
132 if (obj->name) {
133 DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
134 obj, obj->name,
135 obj->read_domains, obj->write_domain,
136 obj_priv->last_rendering_seqno);
137 } else {
138 DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
139 obj->read_domains, obj->write_domain,
140 obj_priv->last_rendering_seqno);
141 }
142 }
143 if (len > request + offset)
144 return request;
145 *eof = 1;
146 return len - offset;
147}
148
149static int i915_gem_request_info(char *buf, char **start, off_t offset,
150 int request, int *eof, void *data)
151{
152 struct drm_minor *minor = (struct drm_minor *) data;
153 struct drm_device *dev = minor->dev;
154 drm_i915_private_t *dev_priv = dev->dev_private;
155 struct drm_i915_gem_request *gem_request;
156 int len = 0;
157
158 if (offset > DRM_PROC_LIMIT) {
159 *eof = 1;
160 return 0;
161 }
162
163 *start = &buf[offset];
164 *eof = 0;
165 DRM_PROC_PRINT("Request:\n");
166 list_for_each_entry(gem_request, &dev_priv->mm.request_list,
167 list)
168 {
169 DRM_PROC_PRINT(" %d @ %d\n",
170 gem_request->seqno,
171 (int) (jiffies - gem_request->emitted_jiffies));
172 }
173 if (len > request + offset)
174 return request;
175 *eof = 1;
176 return len - offset;
177}
178
179static int i915_gem_seqno_info(char *buf, char **start, off_t offset,
180 int request, int *eof, void *data)
181{
182 struct drm_minor *minor = (struct drm_minor *) data;
183 struct drm_device *dev = minor->dev;
184 drm_i915_private_t *dev_priv = dev->dev_private;
185 int len = 0;
186
187 if (offset > DRM_PROC_LIMIT) {
188 *eof = 1;
189 return 0;
190 }
191
192 *start = &buf[offset];
193 *eof = 0;
194 if (dev_priv->hw_status_page != NULL) {
195 DRM_PROC_PRINT("Current sequence: %d\n",
196 i915_get_gem_seqno(dev));
197 } else {
198 DRM_PROC_PRINT("Current sequence: hws uninitialized\n");
199 }
200 DRM_PROC_PRINT("Waiter sequence: %d\n",
201 dev_priv->mm.waiting_gem_seqno);
202 DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
203 if (len > request + offset)
204 return request;
205 *eof = 1;
206 return len - offset;
207}
208
209
210static int i915_interrupt_info(char *buf, char **start, off_t offset,
211 int request, int *eof, void *data)
212{
213 struct drm_minor *minor = (struct drm_minor *) data;
214 struct drm_device *dev = minor->dev;
215 drm_i915_private_t *dev_priv = dev->dev_private;
216 int len = 0;
217
218 if (offset > DRM_PROC_LIMIT) {
219 *eof = 1;
220 return 0;
221 }
222
223 *start = &buf[offset];
224 *eof = 0;
225 DRM_PROC_PRINT("Interrupt enable: %08x\n",
226 I915_READ(IER));
227 DRM_PROC_PRINT("Interrupt identity: %08x\n",
228 I915_READ(IIR));
229 DRM_PROC_PRINT("Interrupt mask: %08x\n",
230 I915_READ(IMR));
231 DRM_PROC_PRINT("Pipe A stat: %08x\n",
232 I915_READ(PIPEASTAT));
233 DRM_PROC_PRINT("Pipe B stat: %08x\n",
234 I915_READ(PIPEBSTAT));
235 DRM_PROC_PRINT("Interrupts received: %d\n",
236 atomic_read(&dev_priv->irq_received));
237 if (dev_priv->hw_status_page != NULL) {
238 DRM_PROC_PRINT("Current sequence: %d\n",
239 i915_get_gem_seqno(dev));
240 } else {
241 DRM_PROC_PRINT("Current sequence: hws uninitialized\n");
242 }
243 DRM_PROC_PRINT("Waiter sequence: %d\n",
244 dev_priv->mm.waiting_gem_seqno);
245 DRM_PROC_PRINT("IRQ sequence: %d\n",
246 dev_priv->mm.irq_gem_seqno);
247 if (len > request + offset)
248 return request;
249 *eof = 1;
250 return len - offset;
251}
252
253static int i915_hws_info(char *buf, char **start, off_t offset,
254 int request, int *eof, void *data)
255{
256 struct drm_minor *minor = (struct drm_minor *) data;
257 struct drm_device *dev = minor->dev;
258 drm_i915_private_t *dev_priv = dev->dev_private;
259 int len = 0, i;
260 volatile u32 *hws;
261
262 if (offset > DRM_PROC_LIMIT) {
263 *eof = 1;
264 return 0;
265 }
266
267 hws = (volatile u32 *)dev_priv->hw_status_page;
268 if (hws == NULL) {
269 *eof = 1;
270 return 0;
271 }
272
273 *start = &buf[offset];
274 *eof = 0;
275 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
276 DRM_PROC_PRINT("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
277 i * 4,
278 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
279 }
280 if (len > request + offset)
281 return request;
282 *eof = 1;
283 return len - offset;
284}
285
286static struct drm_proc_list {
287 /** file name */
288 const char *name;
289 /** proc callback*/
290 int (*f) (char *, char **, off_t, int, int *, void *);
291} i915_gem_proc_list[] = {
292 {"i915_gem_active", i915_gem_active_info},
293 {"i915_gem_flushing", i915_gem_flushing_info},
294 {"i915_gem_inactive", i915_gem_inactive_info},
295 {"i915_gem_request", i915_gem_request_info},
296 {"i915_gem_seqno", i915_gem_seqno_info},
297 {"i915_gem_interrupt", i915_interrupt_info},
298 {"i915_gem_hws", i915_hws_info},
299};
300
301#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list)
302
303int i915_gem_proc_init(struct drm_minor *minor)
304{
305 struct proc_dir_entry *ent;
306 int i, j;
307
308 for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) {
309 ent = create_proc_entry(i915_gem_proc_list[i].name,
310 S_IFREG | S_IRUGO, minor->dev_root);
311 if (!ent) {
312 DRM_ERROR("Cannot create /proc/dri/.../%s\n",
313 i915_gem_proc_list[i].name);
314 for (j = 0; j < i; j++)
315 remove_proc_entry(i915_gem_proc_list[i].name,
316 minor->dev_root);
317 return -1;
318 }
319 ent->read_proc = i915_gem_proc_list[i].f;
320 ent->data = minor;
321 }
322 return 0;
323}
324
325void i915_gem_proc_cleanup(struct drm_minor *minor)
326{
327 int i;
328
329 if (!minor->dev_root)
330 return;
331
332 for (i = 0; i < I915_GEM_PROC_ENTRIES; i++)
333 remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root);
334}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 7fb4191ef934..4cce1aef438e 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -96,16 +96,16 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
96 */ 96 */
97 swizzle_x = I915_BIT_6_SWIZZLE_NONE; 97 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
98 swizzle_y = I915_BIT_6_SWIZZLE_NONE; 98 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
99 } else if ((!IS_I965G(dev) && !IS_G33(dev)) || IS_I965GM(dev) || 99 } else if (IS_MOBILE(dev)) {
100 IS_GM45(dev)) {
101 uint32_t dcc; 100 uint32_t dcc;
102 101
103 /* On 915-945 and GM965, channel interleave by the CPU is 102 /* On mobile 9xx chipsets, channel interleave by the CPU is
104 * determined by DCC. The CPU will alternate based on bit 6 103 * determined by DCC. For single-channel, neither the CPU
105 * in interleaved mode, and the GPU will then also alternate 104 * nor the GPU do swizzling. For dual channel interleaved,
106 * on bit 6, 9, and 10 for X, but the CPU may also optionally 105 * the GPU's interleave is bit 9 and 10 for X tiled, and bit
107 * alternate based on bit 17 (XOR not disabled and XOR 106 * 9 for Y tiled. The CPU's interleave is independent, and
108 * bit == 17). 107 * can be based on either bit 11 (haven't seen this yet) or
108 * bit 17 (common).
109 */ 109 */
110 dcc = I915_READ(DCC); 110 dcc = I915_READ(DCC);
111 switch (dcc & DCC_ADDRESSING_MODE_MASK) { 111 switch (dcc & DCC_ADDRESSING_MODE_MASK) {
@@ -115,19 +115,18 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
115 swizzle_y = I915_BIT_6_SWIZZLE_NONE; 115 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
116 break; 116 break;
117 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED: 117 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
118 if (IS_I915G(dev) || IS_I915GM(dev) || 118 if (dcc & DCC_CHANNEL_XOR_DISABLE) {
119 dcc & DCC_CHANNEL_XOR_DISABLE) { 119 /* This is the base swizzling by the GPU for
120 * tiled buffers.
121 */
120 swizzle_x = I915_BIT_6_SWIZZLE_9_10; 122 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
121 swizzle_y = I915_BIT_6_SWIZZLE_9; 123 swizzle_y = I915_BIT_6_SWIZZLE_9;
122 } else if ((IS_I965GM(dev) || IS_GM45(dev)) && 124 } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
123 (dcc & DCC_CHANNEL_XOR_BIT_17) == 0) { 125 /* Bit 11 swizzling by the CPU in addition. */
124 /* GM965/GM45 does either bit 11 or bit 17
125 * swizzling.
126 */
127 swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; 126 swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
128 swizzle_y = I915_BIT_6_SWIZZLE_9_11; 127 swizzle_y = I915_BIT_6_SWIZZLE_9_11;
129 } else { 128 } else {
130 /* Bit 17 or perhaps other swizzling */ 129 /* Bit 17 swizzling by the CPU in addition. */
131 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 130 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
132 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 131 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
133 } 132 }
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 90600d899413..377cc588f5e9 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -359,6 +359,7 @@
359#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */ 359#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
360#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ 360#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
361#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ 361#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
362#define DPLL_FPA01_P1_POST_DIV_MASK_IGD 0x00ff8000 /* IGD */
362 363
363#define I915_FIFO_UNDERRUN_STATUS (1UL<<31) 364#define I915_FIFO_UNDERRUN_STATUS (1UL<<31)
364#define I915_CRC_ERROR_ENABLE (1UL<<29) 365#define I915_CRC_ERROR_ENABLE (1UL<<29)
@@ -435,6 +436,7 @@
435 */ 436 */
436#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000 437#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
437#define DPLL_FPA01_P1_POST_DIV_SHIFT 16 438#define DPLL_FPA01_P1_POST_DIV_SHIFT 16
439#define DPLL_FPA01_P1_POST_DIV_SHIFT_IGD 15
438/* i830, required in DVO non-gang */ 440/* i830, required in DVO non-gang */
439#define PLL_P2_DIVIDE_BY_4 (1 << 23) 441#define PLL_P2_DIVIDE_BY_4 (1 << 23)
440#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */ 442#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
@@ -501,10 +503,12 @@
501#define FPB0 0x06048 503#define FPB0 0x06048
502#define FPB1 0x0604c 504#define FPB1 0x0604c
503#define FP_N_DIV_MASK 0x003f0000 505#define FP_N_DIV_MASK 0x003f0000
506#define FP_N_IGD_DIV_MASK 0x00ff0000
504#define FP_N_DIV_SHIFT 16 507#define FP_N_DIV_SHIFT 16
505#define FP_M1_DIV_MASK 0x00003f00 508#define FP_M1_DIV_MASK 0x00003f00
506#define FP_M1_DIV_SHIFT 8 509#define FP_M1_DIV_SHIFT 8
507#define FP_M2_DIV_MASK 0x0000003f 510#define FP_M2_DIV_MASK 0x0000003f
511#define FP_M2_IGD_DIV_MASK 0x000000ff
508#define FP_M2_DIV_SHIFT 0 512#define FP_M2_DIV_SHIFT 0
509#define DPLL_TEST 0x606c 513#define DPLL_TEST 0x606c
510#define DPLLB_TEST_SDVO_DIV_1 (0 << 22) 514#define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
@@ -629,6 +633,22 @@
629#define TV_HOTPLUG_INT_EN (1 << 18) 633#define TV_HOTPLUG_INT_EN (1 << 18)
630#define CRT_HOTPLUG_INT_EN (1 << 9) 634#define CRT_HOTPLUG_INT_EN (1 << 9)
631#define CRT_HOTPLUG_FORCE_DETECT (1 << 3) 635#define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
636#define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8)
637/* must use period 64 on GM45 according to docs */
638#define CRT_HOTPLUG_ACTIVATION_PERIOD_64 (1 << 8)
639#define CRT_HOTPLUG_DAC_ON_TIME_2M (0 << 7)
640#define CRT_HOTPLUG_DAC_ON_TIME_4M (1 << 7)
641#define CRT_HOTPLUG_VOLTAGE_COMPARE_40 (0 << 5)
642#define CRT_HOTPLUG_VOLTAGE_COMPARE_50 (1 << 5)
643#define CRT_HOTPLUG_VOLTAGE_COMPARE_60 (2 << 5)
644#define CRT_HOTPLUG_VOLTAGE_COMPARE_70 (3 << 5)
645#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK (3 << 5)
646#define CRT_HOTPLUG_DETECT_DELAY_1G (0 << 4)
647#define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4)
648#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
649#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
650#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */
651
632 652
633#define PORT_HOTPLUG_STAT 0x61114 653#define PORT_HOTPLUG_STAT 0x61114
634#define HDMIB_HOTPLUG_INT_STATUS (1 << 29) 654#define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
@@ -856,7 +876,7 @@
856 */ 876 */
857# define TV_ENC_C0_FIX (1 << 10) 877# define TV_ENC_C0_FIX (1 << 10)
858/** Bits that must be preserved by software */ 878/** Bits that must be preserved by software */
859# define TV_CTL_SAVE ((3 << 8) | (3 << 6)) 879# define TV_CTL_SAVE ((1 << 11) | (3 << 9) | (7 << 6) | 0xf)
860# define TV_FUSE_STATE_MASK (3 << 4) 880# define TV_FUSE_STATE_MASK (3 << 4)
861/** Read-only state that reports all features enabled */ 881/** Read-only state that reports all features enabled */
862# define TV_FUSE_STATE_ENABLED (0 << 4) 882# define TV_FUSE_STATE_ENABLED (0 << 4)
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 5ea715ace3a0..de621aad85b5 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -162,13 +162,13 @@ struct bdb_lvds_options {
162 u8 panel_type; 162 u8 panel_type;
163 u8 rsvd1; 163 u8 rsvd1;
164 /* LVDS capabilities, stored in a dword */ 164 /* LVDS capabilities, stored in a dword */
165 u8 rsvd2:1;
166 u8 lvds_edid:1;
167 u8 pixel_dither:1;
168 u8 pfit_ratio_auto:1;
169 u8 pfit_gfx_mode_enhanced:1;
170 u8 pfit_text_mode_enhanced:1;
171 u8 pfit_mode:2; 165 u8 pfit_mode:2;
166 u8 pfit_text_mode_enhanced:1;
167 u8 pfit_gfx_mode_enhanced:1;
168 u8 pfit_ratio_auto:1;
169 u8 pixel_dither:1;
170 u8 lvds_edid:1;
171 u8 rsvd2:1;
172 u8 rsvd4; 172 u8 rsvd4;
173} __attribute__((packed)); 173} __attribute__((packed));
174 174
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index dcaed3466e83..2b6d44381c31 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -64,11 +64,21 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
64static int intel_crt_mode_valid(struct drm_connector *connector, 64static int intel_crt_mode_valid(struct drm_connector *connector,
65 struct drm_display_mode *mode) 65 struct drm_display_mode *mode)
66{ 66{
67 struct drm_device *dev = connector->dev;
68
69 int max_clock = 0;
67 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 70 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
68 return MODE_NO_DBLESCAN; 71 return MODE_NO_DBLESCAN;
69 72
70 if (mode->clock > 400000 || mode->clock < 25000) 73 if (mode->clock < 25000)
71 return MODE_CLOCK_RANGE; 74 return MODE_CLOCK_LOW;
75
76 if (!IS_I9XX(dev))
77 max_clock = 350000;
78 else
79 max_clock = 400000;
80 if (mode->clock > max_clock)
81 return MODE_CLOCK_HIGH;
72 82
73 return MODE_OK; 83 return MODE_OK;
74} 84}
@@ -113,10 +123,13 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
113 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 123 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
114 adpa |= ADPA_VSYNC_ACTIVE_HIGH; 124 adpa |= ADPA_VSYNC_ACTIVE_HIGH;
115 125
116 if (intel_crtc->pipe == 0) 126 if (intel_crtc->pipe == 0) {
117 adpa |= ADPA_PIPE_A_SELECT; 127 adpa |= ADPA_PIPE_A_SELECT;
118 else 128 I915_WRITE(BCLRPAT_A, 0);
129 } else {
119 adpa |= ADPA_PIPE_B_SELECT; 130 adpa |= ADPA_PIPE_B_SELECT;
131 I915_WRITE(BCLRPAT_B, 0);
132 }
120 133
121 I915_WRITE(ADPA, adpa); 134 I915_WRITE(ADPA, adpa);
122} 135}
@@ -133,20 +146,39 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
133{ 146{
134 struct drm_device *dev = connector->dev; 147 struct drm_device *dev = connector->dev;
135 struct drm_i915_private *dev_priv = dev->dev_private; 148 struct drm_i915_private *dev_priv = dev->dev_private;
136 u32 temp; 149 u32 hotplug_en;
137 150 int i, tries = 0;
138 unsigned long timeout = jiffies + msecs_to_jiffies(1000); 151 /*
139 152 * On 4 series desktop, CRT detect sequence need to be done twice
140 temp = I915_READ(PORT_HOTPLUG_EN); 153 * to get a reliable result.
141 154 */
142 I915_WRITE(PORT_HOTPLUG_EN,
143 temp | CRT_HOTPLUG_FORCE_DETECT | (1 << 5));
144 155
145 do { 156 if (IS_G4X(dev) && !IS_GM45(dev))
146 if (!(I915_READ(PORT_HOTPLUG_EN) & CRT_HOTPLUG_FORCE_DETECT)) 157 tries = 2;
147 break; 158 else
148 msleep(1); 159 tries = 1;
149 } while (time_after(timeout, jiffies)); 160 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
161 hotplug_en &= ~(CRT_HOTPLUG_MASK);
162 hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
163
164 if (IS_GM45(dev))
165 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
166
167 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
168
169 for (i = 0; i < tries ; i++) {
170 unsigned long timeout;
171 /* turn on the FORCE_DETECT */
172 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
173 timeout = jiffies + msecs_to_jiffies(1000);
174 /* wait for FORCE_DETECT to go off */
175 do {
176 if (!(I915_READ(PORT_HOTPLUG_EN) &
177 CRT_HOTPLUG_FORCE_DETECT))
178 break;
179 msleep(1);
180 } while (time_after(timeout, jiffies));
181 }
150 182
151 if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) == 183 if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) ==
152 CRT_HOTPLUG_MONITOR_COLOR) 184 CRT_HOTPLUG_MONITOR_COLOR)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index a2834276cb38..d9c50ff94d76 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -56,11 +56,13 @@ typedef struct {
56} intel_p2_t; 56} intel_p2_t;
57 57
58#define INTEL_P2_NUM 2 58#define INTEL_P2_NUM 2
59 59typedef struct intel_limit intel_limit_t;
60typedef struct { 60struct intel_limit {
61 intel_range_t dot, vco, n, m, m1, m2, p, p1; 61 intel_range_t dot, vco, n, m, m1, m2, p, p1;
62 intel_p2_t p2; 62 intel_p2_t p2;
63} intel_limit_t; 63 bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
64 int, int, intel_clock_t *);
65};
64 66
65#define I8XX_DOT_MIN 25000 67#define I8XX_DOT_MIN 25000
66#define I8XX_DOT_MAX 350000 68#define I8XX_DOT_MAX 350000
@@ -90,18 +92,32 @@ typedef struct {
90#define I9XX_DOT_MAX 400000 92#define I9XX_DOT_MAX 400000
91#define I9XX_VCO_MIN 1400000 93#define I9XX_VCO_MIN 1400000
92#define I9XX_VCO_MAX 2800000 94#define I9XX_VCO_MAX 2800000
95#define IGD_VCO_MIN 1700000
96#define IGD_VCO_MAX 3500000
93#define I9XX_N_MIN 1 97#define I9XX_N_MIN 1
94#define I9XX_N_MAX 6 98#define I9XX_N_MAX 6
99/* IGD's Ncounter is a ring counter */
100#define IGD_N_MIN 3
101#define IGD_N_MAX 6
95#define I9XX_M_MIN 70 102#define I9XX_M_MIN 70
96#define I9XX_M_MAX 120 103#define I9XX_M_MAX 120
104#define IGD_M_MIN 2
105#define IGD_M_MAX 256
97#define I9XX_M1_MIN 10 106#define I9XX_M1_MIN 10
98#define I9XX_M1_MAX 22 107#define I9XX_M1_MAX 22
99#define I9XX_M2_MIN 5 108#define I9XX_M2_MIN 5
100#define I9XX_M2_MAX 9 109#define I9XX_M2_MAX 9
110/* IGD M1 is reserved, and must be 0 */
111#define IGD_M1_MIN 0
112#define IGD_M1_MAX 0
113#define IGD_M2_MIN 0
114#define IGD_M2_MAX 254
101#define I9XX_P_SDVO_DAC_MIN 5 115#define I9XX_P_SDVO_DAC_MIN 5
102#define I9XX_P_SDVO_DAC_MAX 80 116#define I9XX_P_SDVO_DAC_MAX 80
103#define I9XX_P_LVDS_MIN 7 117#define I9XX_P_LVDS_MIN 7
104#define I9XX_P_LVDS_MAX 98 118#define I9XX_P_LVDS_MAX 98
119#define IGD_P_LVDS_MIN 7
120#define IGD_P_LVDS_MAX 112
105#define I9XX_P1_MIN 1 121#define I9XX_P1_MIN 1
106#define I9XX_P1_MAX 8 122#define I9XX_P1_MAX 8
107#define I9XX_P2_SDVO_DAC_SLOW 10 123#define I9XX_P2_SDVO_DAC_SLOW 10
@@ -115,6 +131,97 @@ typedef struct {
115#define INTEL_LIMIT_I8XX_LVDS 1 131#define INTEL_LIMIT_I8XX_LVDS 1
116#define INTEL_LIMIT_I9XX_SDVO_DAC 2 132#define INTEL_LIMIT_I9XX_SDVO_DAC 2
117#define INTEL_LIMIT_I9XX_LVDS 3 133#define INTEL_LIMIT_I9XX_LVDS 3
134#define INTEL_LIMIT_G4X_SDVO 4
135#define INTEL_LIMIT_G4X_HDMI_DAC 5
136#define INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS 6
137#define INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS 7
138#define INTEL_LIMIT_IGD_SDVO_DAC 8
139#define INTEL_LIMIT_IGD_LVDS 9
140
141/*The parameter is for SDVO on G4x platform*/
142#define G4X_DOT_SDVO_MIN 25000
143#define G4X_DOT_SDVO_MAX 270000
144#define G4X_VCO_MIN 1750000
145#define G4X_VCO_MAX 3500000
146#define G4X_N_SDVO_MIN 1
147#define G4X_N_SDVO_MAX 4
148#define G4X_M_SDVO_MIN 104
149#define G4X_M_SDVO_MAX 138
150#define G4X_M1_SDVO_MIN 17
151#define G4X_M1_SDVO_MAX 23
152#define G4X_M2_SDVO_MIN 5
153#define G4X_M2_SDVO_MAX 11
154#define G4X_P_SDVO_MIN 10
155#define G4X_P_SDVO_MAX 30
156#define G4X_P1_SDVO_MIN 1
157#define G4X_P1_SDVO_MAX 3
158#define G4X_P2_SDVO_SLOW 10
159#define G4X_P2_SDVO_FAST 10
160#define G4X_P2_SDVO_LIMIT 270000
161
162/*The parameter is for HDMI_DAC on G4x platform*/
163#define G4X_DOT_HDMI_DAC_MIN 22000
164#define G4X_DOT_HDMI_DAC_MAX 400000
165#define G4X_N_HDMI_DAC_MIN 1
166#define G4X_N_HDMI_DAC_MAX 4
167#define G4X_M_HDMI_DAC_MIN 104
168#define G4X_M_HDMI_DAC_MAX 138
169#define G4X_M1_HDMI_DAC_MIN 16
170#define G4X_M1_HDMI_DAC_MAX 23
171#define G4X_M2_HDMI_DAC_MIN 5
172#define G4X_M2_HDMI_DAC_MAX 11
173#define G4X_P_HDMI_DAC_MIN 5
174#define G4X_P_HDMI_DAC_MAX 80
175#define G4X_P1_HDMI_DAC_MIN 1
176#define G4X_P1_HDMI_DAC_MAX 8
177#define G4X_P2_HDMI_DAC_SLOW 10
178#define G4X_P2_HDMI_DAC_FAST 5
179#define G4X_P2_HDMI_DAC_LIMIT 165000
180
181/*The parameter is for SINGLE_CHANNEL_LVDS on G4x platform*/
182#define G4X_DOT_SINGLE_CHANNEL_LVDS_MIN 20000
183#define G4X_DOT_SINGLE_CHANNEL_LVDS_MAX 115000
184#define G4X_N_SINGLE_CHANNEL_LVDS_MIN 1
185#define G4X_N_SINGLE_CHANNEL_LVDS_MAX 3
186#define G4X_M_SINGLE_CHANNEL_LVDS_MIN 104
187#define G4X_M_SINGLE_CHANNEL_LVDS_MAX 138
188#define G4X_M1_SINGLE_CHANNEL_LVDS_MIN 17
189#define G4X_M1_SINGLE_CHANNEL_LVDS_MAX 23
190#define G4X_M2_SINGLE_CHANNEL_LVDS_MIN 5
191#define G4X_M2_SINGLE_CHANNEL_LVDS_MAX 11
192#define G4X_P_SINGLE_CHANNEL_LVDS_MIN 28
193#define G4X_P_SINGLE_CHANNEL_LVDS_MAX 112
194#define G4X_P1_SINGLE_CHANNEL_LVDS_MIN 2
195#define G4X_P1_SINGLE_CHANNEL_LVDS_MAX 8
196#define G4X_P2_SINGLE_CHANNEL_LVDS_SLOW 14
197#define G4X_P2_SINGLE_CHANNEL_LVDS_FAST 14
198#define G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT 0
199
200/*The parameter is for DUAL_CHANNEL_LVDS on G4x platform*/
201#define G4X_DOT_DUAL_CHANNEL_LVDS_MIN 80000
202#define G4X_DOT_DUAL_CHANNEL_LVDS_MAX 224000
203#define G4X_N_DUAL_CHANNEL_LVDS_MIN 1
204#define G4X_N_DUAL_CHANNEL_LVDS_MAX 3
205#define G4X_M_DUAL_CHANNEL_LVDS_MIN 104
206#define G4X_M_DUAL_CHANNEL_LVDS_MAX 138
207#define G4X_M1_DUAL_CHANNEL_LVDS_MIN 17
208#define G4X_M1_DUAL_CHANNEL_LVDS_MAX 23
209#define G4X_M2_DUAL_CHANNEL_LVDS_MIN 5
210#define G4X_M2_DUAL_CHANNEL_LVDS_MAX 11
211#define G4X_P_DUAL_CHANNEL_LVDS_MIN 14
212#define G4X_P_DUAL_CHANNEL_LVDS_MAX 42
213#define G4X_P1_DUAL_CHANNEL_LVDS_MIN 2
214#define G4X_P1_DUAL_CHANNEL_LVDS_MAX 6
215#define G4X_P2_DUAL_CHANNEL_LVDS_SLOW 7
216#define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7
217#define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0
218
219static bool
220intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
221 int target, int refclk, intel_clock_t *best_clock);
222static bool
223intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
224 int target, int refclk, intel_clock_t *best_clock);
118 225
119static const intel_limit_t intel_limits[] = { 226static const intel_limit_t intel_limits[] = {
120 { /* INTEL_LIMIT_I8XX_DVO_DAC */ 227 { /* INTEL_LIMIT_I8XX_DVO_DAC */
@@ -128,6 +235,7 @@ static const intel_limit_t intel_limits[] = {
128 .p1 = { .min = I8XX_P1_MIN, .max = I8XX_P1_MAX }, 235 .p1 = { .min = I8XX_P1_MIN, .max = I8XX_P1_MAX },
129 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, 236 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
130 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, 237 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST },
238 .find_pll = intel_find_best_PLL,
131 }, 239 },
132 { /* INTEL_LIMIT_I8XX_LVDS */ 240 { /* INTEL_LIMIT_I8XX_LVDS */
133 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, 241 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
@@ -140,6 +248,7 @@ static const intel_limit_t intel_limits[] = {
140 .p1 = { .min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX }, 248 .p1 = { .min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX },
141 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, 249 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
142 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, 250 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST },
251 .find_pll = intel_find_best_PLL,
143 }, 252 },
144 { /* INTEL_LIMIT_I9XX_SDVO_DAC */ 253 { /* INTEL_LIMIT_I9XX_SDVO_DAC */
145 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, 254 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
@@ -152,6 +261,7 @@ static const intel_limit_t intel_limits[] = {
152 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, 261 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
153 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 262 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
154 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, 263 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
264 .find_pll = intel_find_best_PLL,
155 }, 265 },
156 { /* INTEL_LIMIT_I9XX_LVDS */ 266 { /* INTEL_LIMIT_I9XX_LVDS */
157 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, 267 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
@@ -167,19 +277,157 @@ static const intel_limit_t intel_limits[] = {
167 */ 277 */
168 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 278 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
169 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, 279 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
280 .find_pll = intel_find_best_PLL,
281 },
282 /* below parameter and function is for G4X Chipset Family*/
283 { /* INTEL_LIMIT_G4X_SDVO */
284 .dot = { .min = G4X_DOT_SDVO_MIN, .max = G4X_DOT_SDVO_MAX },
285 .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX},
286 .n = { .min = G4X_N_SDVO_MIN, .max = G4X_N_SDVO_MAX },
287 .m = { .min = G4X_M_SDVO_MIN, .max = G4X_M_SDVO_MAX },
288 .m1 = { .min = G4X_M1_SDVO_MIN, .max = G4X_M1_SDVO_MAX },
289 .m2 = { .min = G4X_M2_SDVO_MIN, .max = G4X_M2_SDVO_MAX },
290 .p = { .min = G4X_P_SDVO_MIN, .max = G4X_P_SDVO_MAX },
291 .p1 = { .min = G4X_P1_SDVO_MIN, .max = G4X_P1_SDVO_MAX},
292 .p2 = { .dot_limit = G4X_P2_SDVO_LIMIT,
293 .p2_slow = G4X_P2_SDVO_SLOW,
294 .p2_fast = G4X_P2_SDVO_FAST
295 },
296 .find_pll = intel_g4x_find_best_PLL,
297 },
298 { /* INTEL_LIMIT_G4X_HDMI_DAC */
299 .dot = { .min = G4X_DOT_HDMI_DAC_MIN, .max = G4X_DOT_HDMI_DAC_MAX },
300 .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX},
301 .n = { .min = G4X_N_HDMI_DAC_MIN, .max = G4X_N_HDMI_DAC_MAX },
302 .m = { .min = G4X_M_HDMI_DAC_MIN, .max = G4X_M_HDMI_DAC_MAX },
303 .m1 = { .min = G4X_M1_HDMI_DAC_MIN, .max = G4X_M1_HDMI_DAC_MAX },
304 .m2 = { .min = G4X_M2_HDMI_DAC_MIN, .max = G4X_M2_HDMI_DAC_MAX },
305 .p = { .min = G4X_P_HDMI_DAC_MIN, .max = G4X_P_HDMI_DAC_MAX },
306 .p1 = { .min = G4X_P1_HDMI_DAC_MIN, .max = G4X_P1_HDMI_DAC_MAX},
307 .p2 = { .dot_limit = G4X_P2_HDMI_DAC_LIMIT,
308 .p2_slow = G4X_P2_HDMI_DAC_SLOW,
309 .p2_fast = G4X_P2_HDMI_DAC_FAST
310 },
311 .find_pll = intel_g4x_find_best_PLL,
312 },
313 { /* INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS */
314 .dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN,
315 .max = G4X_DOT_SINGLE_CHANNEL_LVDS_MAX },
316 .vco = { .min = G4X_VCO_MIN,
317 .max = G4X_VCO_MAX },
318 .n = { .min = G4X_N_SINGLE_CHANNEL_LVDS_MIN,
319 .max = G4X_N_SINGLE_CHANNEL_LVDS_MAX },
320 .m = { .min = G4X_M_SINGLE_CHANNEL_LVDS_MIN,
321 .max = G4X_M_SINGLE_CHANNEL_LVDS_MAX },
322 .m1 = { .min = G4X_M1_SINGLE_CHANNEL_LVDS_MIN,
323 .max = G4X_M1_SINGLE_CHANNEL_LVDS_MAX },
324 .m2 = { .min = G4X_M2_SINGLE_CHANNEL_LVDS_MIN,
325 .max = G4X_M2_SINGLE_CHANNEL_LVDS_MAX },
326 .p = { .min = G4X_P_SINGLE_CHANNEL_LVDS_MIN,
327 .max = G4X_P_SINGLE_CHANNEL_LVDS_MAX },
328 .p1 = { .min = G4X_P1_SINGLE_CHANNEL_LVDS_MIN,
329 .max = G4X_P1_SINGLE_CHANNEL_LVDS_MAX },
330 .p2 = { .dot_limit = G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT,
331 .p2_slow = G4X_P2_SINGLE_CHANNEL_LVDS_SLOW,
332 .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST
333 },
334 .find_pll = intel_g4x_find_best_PLL,
335 },
336 { /* INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS */
337 .dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN,
338 .max = G4X_DOT_DUAL_CHANNEL_LVDS_MAX },
339 .vco = { .min = G4X_VCO_MIN,
340 .max = G4X_VCO_MAX },
341 .n = { .min = G4X_N_DUAL_CHANNEL_LVDS_MIN,
342 .max = G4X_N_DUAL_CHANNEL_LVDS_MAX },
343 .m = { .min = G4X_M_DUAL_CHANNEL_LVDS_MIN,
344 .max = G4X_M_DUAL_CHANNEL_LVDS_MAX },
345 .m1 = { .min = G4X_M1_DUAL_CHANNEL_LVDS_MIN,
346 .max = G4X_M1_DUAL_CHANNEL_LVDS_MAX },
347 .m2 = { .min = G4X_M2_DUAL_CHANNEL_LVDS_MIN,
348 .max = G4X_M2_DUAL_CHANNEL_LVDS_MAX },
349 .p = { .min = G4X_P_DUAL_CHANNEL_LVDS_MIN,
350 .max = G4X_P_DUAL_CHANNEL_LVDS_MAX },
351 .p1 = { .min = G4X_P1_DUAL_CHANNEL_LVDS_MIN,
352 .max = G4X_P1_DUAL_CHANNEL_LVDS_MAX },
353 .p2 = { .dot_limit = G4X_P2_DUAL_CHANNEL_LVDS_LIMIT,
354 .p2_slow = G4X_P2_DUAL_CHANNEL_LVDS_SLOW,
355 .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST
356 },
357 .find_pll = intel_g4x_find_best_PLL,
358 },
359 { /* INTEL_LIMIT_IGD_SDVO */
360 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
361 .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
362 .n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
363 .m = { .min = IGD_M_MIN, .max = IGD_M_MAX },
364 .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX },
365 .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX },
366 .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX },
367 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
368 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
369 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
170 }, 370 },
371 { /* INTEL_LIMIT_IGD_LVDS */
372 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
373 .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
374 .n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
375 .m = { .min = IGD_M_MIN, .max = IGD_M_MAX },
376 .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX },
377 .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX },
378 .p = { .min = IGD_P_LVDS_MIN, .max = IGD_P_LVDS_MAX },
379 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
380 /* IGD only supports single-channel mode. */
381 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
382 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
383 },
384
171}; 385};
172 386
387static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
388{
389 struct drm_device *dev = crtc->dev;
390 struct drm_i915_private *dev_priv = dev->dev_private;
391 const intel_limit_t *limit;
392
393 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
394 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
395 LVDS_CLKB_POWER_UP)
396 /* LVDS with dual channel */
397 limit = &intel_limits
398 [INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS];
399 else
400 /* LVDS with dual channel */
401 limit = &intel_limits
402 [INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS];
403 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
404 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
405 limit = &intel_limits[INTEL_LIMIT_G4X_HDMI_DAC];
406 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
407 limit = &intel_limits[INTEL_LIMIT_G4X_SDVO];
408 } else /* The option is for other outputs */
409 limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
410
411 return limit;
412}
413
173static const intel_limit_t *intel_limit(struct drm_crtc *crtc) 414static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
174{ 415{
175 struct drm_device *dev = crtc->dev; 416 struct drm_device *dev = crtc->dev;
176 const intel_limit_t *limit; 417 const intel_limit_t *limit;
177 418
178 if (IS_I9XX(dev)) { 419 if (IS_G4X(dev)) {
420 limit = intel_g4x_limit(crtc);
421 } else if (IS_I9XX(dev) && !IS_IGD(dev)) {
179 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 422 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
180 limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS]; 423 limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS];
181 else 424 else
182 limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC]; 425 limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
426 } else if (IS_IGD(dev)) {
427 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
428 limit = &intel_limits[INTEL_LIMIT_IGD_LVDS];
429 else
430 limit = &intel_limits[INTEL_LIMIT_IGD_SDVO_DAC];
183 } else { 431 } else {
184 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 432 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
185 limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS]; 433 limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS];
@@ -189,8 +437,21 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
189 return limit; 437 return limit;
190} 438}
191 439
192static void intel_clock(int refclk, intel_clock_t *clock) 440/* m1 is reserved as 0 in IGD, n is a ring counter */
441static void igd_clock(int refclk, intel_clock_t *clock)
193{ 442{
443 clock->m = clock->m2 + 2;
444 clock->p = clock->p1 * clock->p2;
445 clock->vco = refclk * clock->m / clock->n;
446 clock->dot = clock->vco / clock->p;
447}
448
449static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
450{
451 if (IS_IGD(dev)) {
452 igd_clock(refclk, clock);
453 return;
454 }
194 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); 455 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
195 clock->p = clock->p1 * clock->p2; 456 clock->p = clock->p1 * clock->p2;
196 clock->vco = refclk * clock->m / (clock->n + 2); 457 clock->vco = refclk * clock->m / (clock->n + 2);
@@ -226,6 +487,7 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
226static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock) 487static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
227{ 488{
228 const intel_limit_t *limit = intel_limit (crtc); 489 const intel_limit_t *limit = intel_limit (crtc);
490 struct drm_device *dev = crtc->dev;
229 491
230 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 492 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
231 INTELPllInvalid ("p1 out of range\n"); 493 INTELPllInvalid ("p1 out of range\n");
@@ -235,7 +497,7 @@ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
235 INTELPllInvalid ("m2 out of range\n"); 497 INTELPllInvalid ("m2 out of range\n");
236 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 498 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
237 INTELPllInvalid ("m1 out of range\n"); 499 INTELPllInvalid ("m1 out of range\n");
238 if (clock->m1 <= clock->m2) 500 if (clock->m1 <= clock->m2 && !IS_IGD(dev))
239 INTELPllInvalid ("m1 <= m2\n"); 501 INTELPllInvalid ("m1 <= m2\n");
240 if (clock->m < limit->m.min || limit->m.max < clock->m) 502 if (clock->m < limit->m.min || limit->m.max < clock->m)
241 INTELPllInvalid ("m out of range\n"); 503 INTELPllInvalid ("m out of range\n");
@@ -252,18 +514,14 @@ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
252 return true; 514 return true;
253} 515}
254 516
255/** 517static bool
256 * Returns a set of divisors for the desired target clock with the given 518intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
257 * refclk, or FALSE. The returned values represent the clock equation: 519 int target, int refclk, intel_clock_t *best_clock)
258 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 520
259 */
260static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
261 int refclk, intel_clock_t *best_clock)
262{ 521{
263 struct drm_device *dev = crtc->dev; 522 struct drm_device *dev = crtc->dev;
264 struct drm_i915_private *dev_priv = dev->dev_private; 523 struct drm_i915_private *dev_priv = dev->dev_private;
265 intel_clock_t clock; 524 intel_clock_t clock;
266 const intel_limit_t *limit = intel_limit(crtc);
267 int err = target; 525 int err = target;
268 526
269 if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 527 if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
@@ -289,15 +547,17 @@ static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
289 memset (best_clock, 0, sizeof (*best_clock)); 547 memset (best_clock, 0, sizeof (*best_clock));
290 548
291 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 549 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
292 for (clock.m2 = limit->m2.min; clock.m2 < clock.m1 && 550 for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) {
293 clock.m2 <= limit->m2.max; clock.m2++) { 551 /* m1 is always 0 in IGD */
552 if (clock.m2 >= clock.m1 && !IS_IGD(dev))
553 break;
294 for (clock.n = limit->n.min; clock.n <= limit->n.max; 554 for (clock.n = limit->n.min; clock.n <= limit->n.max;
295 clock.n++) { 555 clock.n++) {
296 for (clock.p1 = limit->p1.min; 556 for (clock.p1 = limit->p1.min;
297 clock.p1 <= limit->p1.max; clock.p1++) { 557 clock.p1 <= limit->p1.max; clock.p1++) {
298 int this_err; 558 int this_err;
299 559
300 intel_clock(refclk, &clock); 560 intel_clock(dev, refclk, &clock);
301 561
302 if (!intel_PLL_is_valid(crtc, &clock)) 562 if (!intel_PLL_is_valid(crtc, &clock))
303 continue; 563 continue;
@@ -315,6 +575,63 @@ static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
315 return (err != target); 575 return (err != target);
316} 576}
317 577
578static bool
579intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
580 int target, int refclk, intel_clock_t *best_clock)
581{
582 struct drm_device *dev = crtc->dev;
583 struct drm_i915_private *dev_priv = dev->dev_private;
584 intel_clock_t clock;
585 int max_n;
586 bool found;
587 /* approximately equals target * 0.00488 */
588 int err_most = (target >> 8) + (target >> 10);
589 found = false;
590
591 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
592 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
593 LVDS_CLKB_POWER_UP)
594 clock.p2 = limit->p2.p2_fast;
595 else
596 clock.p2 = limit->p2.p2_slow;
597 } else {
598 if (target < limit->p2.dot_limit)
599 clock.p2 = limit->p2.p2_slow;
600 else
601 clock.p2 = limit->p2.p2_fast;
602 }
603
604 memset(best_clock, 0, sizeof(*best_clock));
605 max_n = limit->n.max;
606 /* based on hardware requriment prefer smaller n to precision */
607 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
608 /* based on hardware requirment prefere larger m1,m2, p1 */
609 for (clock.m1 = limit->m1.max;
610 clock.m1 >= limit->m1.min; clock.m1--) {
611 for (clock.m2 = limit->m2.max;
612 clock.m2 >= limit->m2.min; clock.m2--) {
613 for (clock.p1 = limit->p1.max;
614 clock.p1 >= limit->p1.min; clock.p1--) {
615 int this_err;
616
617 intel_clock(dev, refclk, &clock);
618 if (!intel_PLL_is_valid(crtc, &clock))
619 continue;
620 this_err = abs(clock.dot - target) ;
621 if (this_err < err_most) {
622 *best_clock = clock;
623 err_most = this_err;
624 max_n = clock.n;
625 found = true;
626 }
627 }
628 }
629 }
630 }
631
632 return found;
633}
634
318void 635void
319intel_wait_for_vblank(struct drm_device *dev) 636intel_wait_for_vblank(struct drm_device *dev)
320{ 637{
@@ -634,7 +951,7 @@ static int intel_get_core_clock_speed(struct drm_device *dev)
634 return 400000; 951 return 400000;
635 else if (IS_I915G(dev)) 952 else if (IS_I915G(dev))
636 return 333000; 953 return 333000;
637 else if (IS_I945GM(dev) || IS_845G(dev)) 954 else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev))
638 return 200000; 955 return 200000;
639 else if (IS_I915GM(dev)) { 956 else if (IS_I915GM(dev)) {
640 u16 gcfgc = 0; 957 u16 gcfgc = 0;
@@ -733,6 +1050,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
733 bool is_crt = false, is_lvds = false, is_tv = false; 1050 bool is_crt = false, is_lvds = false, is_tv = false;
734 struct drm_mode_config *mode_config = &dev->mode_config; 1051 struct drm_mode_config *mode_config = &dev->mode_config;
735 struct drm_connector *connector; 1052 struct drm_connector *connector;
1053 const intel_limit_t *limit;
736 int ret; 1054 int ret;
737 1055
738 drm_vblank_pre_modeset(dev, pipe); 1056 drm_vblank_pre_modeset(dev, pipe);
@@ -776,13 +1094,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
776 refclk = 48000; 1094 refclk = 48000;
777 } 1095 }
778 1096
779 ok = intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock); 1097 /*
1098 * Returns a set of divisors for the desired target clock with the given
1099 * refclk, or FALSE. The returned values represent the clock equation:
1100 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
1101 */
1102 limit = intel_limit(crtc);
1103 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
780 if (!ok) { 1104 if (!ok) {
781 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 1105 DRM_ERROR("Couldn't find PLL settings for mode!\n");
782 return -EINVAL; 1106 return -EINVAL;
783 } 1107 }
784 1108
785 fp = clock.n << 16 | clock.m1 << 8 | clock.m2; 1109 if (IS_IGD(dev))
1110 fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
1111 else
1112 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
786 1113
787 dpll = DPLL_VGA_MODE_DIS; 1114 dpll = DPLL_VGA_MODE_DIS;
788 if (IS_I9XX(dev)) { 1115 if (IS_I9XX(dev)) {
@@ -799,7 +1126,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
799 } 1126 }
800 1127
801 /* compute bitmask from p1 value */ 1128 /* compute bitmask from p1 value */
802 dpll |= (1 << (clock.p1 - 1)) << 16; 1129 if (IS_IGD(dev))
1130 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_IGD;
1131 else
1132 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
803 switch (clock.p2) { 1133 switch (clock.p2) {
804 case 5: 1134 case 5:
805 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 1135 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
@@ -1279,10 +1609,20 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
1279 fp = I915_READ((pipe == 0) ? FPA1 : FPB1); 1609 fp = I915_READ((pipe == 0) ? FPA1 : FPB1);
1280 1610
1281 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 1611 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
1282 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 1612 if (IS_IGD(dev)) {
1283 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 1613 clock.n = ffs((fp & FP_N_IGD_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
1614 clock.m2 = (fp & FP_M2_IGD_DIV_MASK) >> FP_M2_DIV_SHIFT;
1615 } else {
1616 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
1617 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
1618 }
1619
1284 if (IS_I9XX(dev)) { 1620 if (IS_I9XX(dev)) {
1285 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 1621 if (IS_IGD(dev))
1622 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_IGD) >>
1623 DPLL_FPA01_P1_POST_DIV_SHIFT_IGD);
1624 else
1625 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
1286 DPLL_FPA01_P1_POST_DIV_SHIFT); 1626 DPLL_FPA01_P1_POST_DIV_SHIFT);
1287 1627
1288 switch (dpll & DPLL_MODE_MASK) { 1628 switch (dpll & DPLL_MODE_MASK) {
@@ -1301,7 +1641,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
1301 } 1641 }
1302 1642
1303 /* XXX: Handle the 100Mhz refclk */ 1643 /* XXX: Handle the 100Mhz refclk */
1304 intel_clock(96000, &clock); 1644 intel_clock(dev, 96000, &clock);
1305 } else { 1645 } else {
1306 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); 1646 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
1307 1647
@@ -1313,9 +1653,9 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
1313 if ((dpll & PLL_REF_INPUT_MASK) == 1653 if ((dpll & PLL_REF_INPUT_MASK) ==
1314 PLLB_REF_INPUT_SPREADSPECTRUMIN) { 1654 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
1315 /* XXX: might not be 66MHz */ 1655 /* XXX: might not be 66MHz */
1316 intel_clock(66000, &clock); 1656 intel_clock(dev, 66000, &clock);
1317 } else 1657 } else
1318 intel_clock(48000, &clock); 1658 intel_clock(dev, 48000, &clock);
1319 } else { 1659 } else {
1320 if (dpll & PLL_P1_DIVIDE_BY_TWO) 1660 if (dpll & PLL_P1_DIVIDE_BY_TWO)
1321 clock.p1 = 2; 1661 clock.p1 = 2;
@@ -1328,7 +1668,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
1328 else 1668 else
1329 clock.p2 = 2; 1669 clock.p2 = 2;
1330 1670
1331 intel_clock(48000, &clock); 1671 intel_clock(dev, 48000, &clock);
1332 } 1672 }
1333 } 1673 }
1334 1674
@@ -1474,13 +1814,21 @@ static void intel_setup_outputs(struct drm_device *dev)
1474 1814
1475 if (IS_I9XX(dev)) { 1815 if (IS_I9XX(dev)) {
1476 int found; 1816 int found;
1817 u32 reg;
1477 1818
1478 if (I915_READ(SDVOB) & SDVO_DETECTED) { 1819 if (I915_READ(SDVOB) & SDVO_DETECTED) {
1479 found = intel_sdvo_init(dev, SDVOB); 1820 found = intel_sdvo_init(dev, SDVOB);
1480 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) 1821 if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
1481 intel_hdmi_init(dev, SDVOB); 1822 intel_hdmi_init(dev, SDVOB);
1482 } 1823 }
1483 if (!IS_G4X(dev) || (I915_READ(SDVOB) & SDVO_DETECTED)) { 1824
1825 /* Before G4X SDVOC doesn't have its own detect register */
1826 if (IS_G4X(dev))
1827 reg = SDVOC;
1828 else
1829 reg = SDVOB;
1830
1831 if (I915_READ(reg) & SDVO_DETECTED) {
1484 found = intel_sdvo_init(dev, SDVOC); 1832 found = intel_sdvo_init(dev, SDVOC);
1485 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) 1833 if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
1486 intel_hdmi_init(dev, SDVOC); 1834 intel_hdmi_init(dev, SDVOC);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 0d211af98854..6619f26e46a5 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -265,7 +265,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
265 pfit_control = 0; 265 pfit_control = 0;
266 266
267 if (!IS_I965G(dev)) { 267 if (!IS_I965G(dev)) {
268 if (dev_priv->panel_wants_dither) 268 if (dev_priv->panel_wants_dither || dev_priv->lvds_dither)
269 pfit_control |= PANEL_8TO6_DITHER_ENABLE; 269 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
270 } 270 }
271 else 271 else
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 56485d67369b..ceca9471a75a 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -217,8 +217,8 @@ static const u32 filter_table[] = {
217 */ 217 */
218static const struct color_conversion ntsc_m_csc_composite = { 218static const struct color_conversion ntsc_m_csc_composite = {
219 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, 219 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
220 .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00, 220 .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
221 .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00, 221 .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
222}; 222};
223 223
224static const struct video_levels ntsc_m_levels_composite = { 224static const struct video_levels ntsc_m_levels_composite = {
@@ -226,9 +226,9 @@ static const struct video_levels ntsc_m_levels_composite = {
226}; 226};
227 227
228static const struct color_conversion ntsc_m_csc_svideo = { 228static const struct color_conversion ntsc_m_csc_svideo = {
229 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134, 229 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
230 .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00, 230 .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
231 .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00, 231 .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
232}; 232};
233 233
234static const struct video_levels ntsc_m_levels_svideo = { 234static const struct video_levels ntsc_m_levels_svideo = {
@@ -237,8 +237,8 @@ static const struct video_levels ntsc_m_levels_svideo = {
237 237
238static const struct color_conversion ntsc_j_csc_composite = { 238static const struct color_conversion ntsc_j_csc_composite = {
239 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0119, 239 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0119,
240 .ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0f00, 240 .ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0200,
241 .rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0f00, 241 .rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0200,
242}; 242};
243 243
244static const struct video_levels ntsc_j_levels_composite = { 244static const struct video_levels ntsc_j_levels_composite = {
@@ -247,8 +247,8 @@ static const struct video_levels ntsc_j_levels_composite = {
247 247
248static const struct color_conversion ntsc_j_csc_svideo = { 248static const struct color_conversion ntsc_j_csc_svideo = {
249 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x014c, 249 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x014c,
250 .ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0f00, 250 .ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0200,
251 .rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0f00, 251 .rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0200,
252}; 252};
253 253
254static const struct video_levels ntsc_j_levels_svideo = { 254static const struct video_levels ntsc_j_levels_svideo = {
@@ -257,8 +257,8 @@ static const struct video_levels ntsc_j_levels_svideo = {
257 257
258static const struct color_conversion pal_csc_composite = { 258static const struct color_conversion pal_csc_composite = {
259 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0113, 259 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0113,
260 .ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0f00, 260 .ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0200,
261 .rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0f00, 261 .rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0200,
262}; 262};
263 263
264static const struct video_levels pal_levels_composite = { 264static const struct video_levels pal_levels_composite = {
@@ -267,8 +267,8 @@ static const struct video_levels pal_levels_composite = {
267 267
268static const struct color_conversion pal_csc_svideo = { 268static const struct color_conversion pal_csc_svideo = {
269 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145, 269 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145,
270 .ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0f00, 270 .ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0200,
271 .rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0f00, 271 .rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0200,
272}; 272};
273 273
274static const struct video_levels pal_levels_svideo = { 274static const struct video_levels pal_levels_svideo = {
@@ -277,8 +277,8 @@ static const struct video_levels pal_levels_svideo = {
277 277
278static const struct color_conversion pal_m_csc_composite = { 278static const struct color_conversion pal_m_csc_composite = {
279 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, 279 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
280 .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00, 280 .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
281 .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00, 281 .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
282}; 282};
283 283
284static const struct video_levels pal_m_levels_composite = { 284static const struct video_levels pal_m_levels_composite = {
@@ -286,9 +286,9 @@ static const struct video_levels pal_m_levels_composite = {
286}; 286};
287 287
288static const struct color_conversion pal_m_csc_svideo = { 288static const struct color_conversion pal_m_csc_svideo = {
289 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134, 289 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
290 .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00, 290 .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
291 .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00, 291 .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
292}; 292};
293 293
294static const struct video_levels pal_m_levels_svideo = { 294static const struct video_levels pal_m_levels_svideo = {
@@ -297,8 +297,8 @@ static const struct video_levels pal_m_levels_svideo = {
297 297
298static const struct color_conversion pal_n_csc_composite = { 298static const struct color_conversion pal_n_csc_composite = {
299 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, 299 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
300 .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00, 300 .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
301 .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00, 301 .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
302}; 302};
303 303
304static const struct video_levels pal_n_levels_composite = { 304static const struct video_levels pal_n_levels_composite = {
@@ -306,9 +306,9 @@ static const struct video_levels pal_n_levels_composite = {
306}; 306};
307 307
308static const struct color_conversion pal_n_csc_svideo = { 308static const struct color_conversion pal_n_csc_svideo = {
309 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134, 309 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
310 .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00, 310 .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
311 .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00, 311 .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
312}; 312};
313 313
314static const struct video_levels pal_n_levels_svideo = { 314static const struct video_levels pal_n_levels_svideo = {
@@ -319,9 +319,9 @@ static const struct video_levels pal_n_levels_svideo = {
319 * Component connections 319 * Component connections
320 */ 320 */
321static const struct color_conversion sdtv_csc_yprpb = { 321static const struct color_conversion sdtv_csc_yprpb = {
322 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0146, 322 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145,
323 .ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0f00, 323 .ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0200,
324 .rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0f00, 324 .rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0200,
325}; 325};
326 326
327static const struct color_conversion sdtv_csc_rgb = { 327static const struct color_conversion sdtv_csc_rgb = {
@@ -331,9 +331,9 @@ static const struct color_conversion sdtv_csc_rgb = {
331}; 331};
332 332
333static const struct color_conversion hdtv_csc_yprpb = { 333static const struct color_conversion hdtv_csc_yprpb = {
334 .ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0146, 334 .ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0145,
335 .ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0f00, 335 .ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0200,
336 .rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0f00, 336 .rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0200,
337}; 337};
338 338
339static const struct color_conversion hdtv_csc_rgb = { 339static const struct color_conversion hdtv_csc_rgb = {
@@ -414,7 +414,7 @@ struct tv_mode {
414static const struct tv_mode tv_modes[] = { 414static const struct tv_mode tv_modes[] = {
415 { 415 {
416 .name = "NTSC-M", 416 .name = "NTSC-M",
417 .clock = 107520, 417 .clock = 108000,
418 .refresh = 29970, 418 .refresh = 29970,
419 .oversample = TV_OVERSAMPLE_8X, 419 .oversample = TV_OVERSAMPLE_8X,
420 .component_only = 0, 420 .component_only = 0,
@@ -442,8 +442,8 @@ static const struct tv_mode tv_modes[] = {
442 .vburst_start_f4 = 10, .vburst_end_f4 = 240, 442 .vburst_start_f4 = 10, .vburst_end_f4 = 240,
443 443
444 /* desired 3.5800000 actual 3.5800000 clock 107.52 */ 444 /* desired 3.5800000 actual 3.5800000 clock 107.52 */
445 .dda1_inc = 136, 445 .dda1_inc = 135,
446 .dda2_inc = 7624, .dda2_size = 20013, 446 .dda2_inc = 20800, .dda2_size = 27456,
447 .dda3_inc = 0, .dda3_size = 0, 447 .dda3_inc = 0, .dda3_size = 0,
448 .sc_reset = TV_SC_RESET_EVERY_4, 448 .sc_reset = TV_SC_RESET_EVERY_4,
449 .pal_burst = false, 449 .pal_burst = false,
@@ -457,7 +457,7 @@ static const struct tv_mode tv_modes[] = {
457 }, 457 },
458 { 458 {
459 .name = "NTSC-443", 459 .name = "NTSC-443",
460 .clock = 107520, 460 .clock = 108000,
461 .refresh = 29970, 461 .refresh = 29970,
462 .oversample = TV_OVERSAMPLE_8X, 462 .oversample = TV_OVERSAMPLE_8X,
463 .component_only = 0, 463 .component_only = 0,
@@ -485,10 +485,10 @@ static const struct tv_mode tv_modes[] = {
485 485
486 /* desired 4.4336180 actual 4.4336180 clock 107.52 */ 486 /* desired 4.4336180 actual 4.4336180 clock 107.52 */
487 .dda1_inc = 168, 487 .dda1_inc = 168,
488 .dda2_inc = 18557, .dda2_size = 20625, 488 .dda2_inc = 4093, .dda2_size = 27456,
489 .dda3_inc = 0, .dda3_size = 0, 489 .dda3_inc = 310, .dda3_size = 525,
490 .sc_reset = TV_SC_RESET_EVERY_8, 490 .sc_reset = TV_SC_RESET_NEVER,
491 .pal_burst = true, 491 .pal_burst = false,
492 492
493 .composite_levels = &ntsc_m_levels_composite, 493 .composite_levels = &ntsc_m_levels_composite,
494 .composite_color = &ntsc_m_csc_composite, 494 .composite_color = &ntsc_m_csc_composite,
@@ -499,7 +499,7 @@ static const struct tv_mode tv_modes[] = {
499 }, 499 },
500 { 500 {
501 .name = "NTSC-J", 501 .name = "NTSC-J",
502 .clock = 107520, 502 .clock = 108000,
503 .refresh = 29970, 503 .refresh = 29970,
504 .oversample = TV_OVERSAMPLE_8X, 504 .oversample = TV_OVERSAMPLE_8X,
505 .component_only = 0, 505 .component_only = 0,
@@ -527,8 +527,8 @@ static const struct tv_mode tv_modes[] = {
527 .vburst_start_f4 = 10, .vburst_end_f4 = 240, 527 .vburst_start_f4 = 10, .vburst_end_f4 = 240,
528 528
529 /* desired 3.5800000 actual 3.5800000 clock 107.52 */ 529 /* desired 3.5800000 actual 3.5800000 clock 107.52 */
530 .dda1_inc = 136, 530 .dda1_inc = 135,
531 .dda2_inc = 7624, .dda2_size = 20013, 531 .dda2_inc = 20800, .dda2_size = 27456,
532 .dda3_inc = 0, .dda3_size = 0, 532 .dda3_inc = 0, .dda3_size = 0,
533 .sc_reset = TV_SC_RESET_EVERY_4, 533 .sc_reset = TV_SC_RESET_EVERY_4,
534 .pal_burst = false, 534 .pal_burst = false,
@@ -542,7 +542,7 @@ static const struct tv_mode tv_modes[] = {
542 }, 542 },
543 { 543 {
544 .name = "PAL-M", 544 .name = "PAL-M",
545 .clock = 107520, 545 .clock = 108000,
546 .refresh = 29970, 546 .refresh = 29970,
547 .oversample = TV_OVERSAMPLE_8X, 547 .oversample = TV_OVERSAMPLE_8X,
548 .component_only = 0, 548 .component_only = 0,
@@ -570,11 +570,11 @@ static const struct tv_mode tv_modes[] = {
570 .vburst_start_f4 = 10, .vburst_end_f4 = 240, 570 .vburst_start_f4 = 10, .vburst_end_f4 = 240,
571 571
572 /* desired 3.5800000 actual 3.5800000 clock 107.52 */ 572 /* desired 3.5800000 actual 3.5800000 clock 107.52 */
573 .dda1_inc = 136, 573 .dda1_inc = 135,
574 .dda2_inc = 7624, .dda2_size = 20013, 574 .dda2_inc = 16704, .dda2_size = 27456,
575 .dda3_inc = 0, .dda3_size = 0, 575 .dda3_inc = 0, .dda3_size = 0,
576 .sc_reset = TV_SC_RESET_EVERY_4, 576 .sc_reset = TV_SC_RESET_EVERY_8,
577 .pal_burst = false, 577 .pal_burst = true,
578 578
579 .composite_levels = &pal_m_levels_composite, 579 .composite_levels = &pal_m_levels_composite,
580 .composite_color = &pal_m_csc_composite, 580 .composite_color = &pal_m_csc_composite,
@@ -586,7 +586,7 @@ static const struct tv_mode tv_modes[] = {
586 { 586 {
587 /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */ 587 /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
588 .name = "PAL-N", 588 .name = "PAL-N",
589 .clock = 107520, 589 .clock = 108000,
590 .refresh = 25000, 590 .refresh = 25000,
591 .oversample = TV_OVERSAMPLE_8X, 591 .oversample = TV_OVERSAMPLE_8X,
592 .component_only = 0, 592 .component_only = 0,
@@ -615,9 +615,9 @@ static const struct tv_mode tv_modes[] = {
615 615
616 616
617 /* desired 4.4336180 actual 4.4336180 clock 107.52 */ 617 /* desired 4.4336180 actual 4.4336180 clock 107.52 */
618 .dda1_inc = 168, 618 .dda1_inc = 135,
619 .dda2_inc = 18557, .dda2_size = 20625, 619 .dda2_inc = 23578, .dda2_size = 27648,
620 .dda3_inc = 0, .dda3_size = 0, 620 .dda3_inc = 134, .dda3_size = 625,
621 .sc_reset = TV_SC_RESET_EVERY_8, 621 .sc_reset = TV_SC_RESET_EVERY_8,
622 .pal_burst = true, 622 .pal_burst = true,
623 623
@@ -631,12 +631,12 @@ static const struct tv_mode tv_modes[] = {
631 { 631 {
632 /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */ 632 /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
633 .name = "PAL", 633 .name = "PAL",
634 .clock = 107520, 634 .clock = 108000,
635 .refresh = 25000, 635 .refresh = 25000,
636 .oversample = TV_OVERSAMPLE_8X, 636 .oversample = TV_OVERSAMPLE_8X,
637 .component_only = 0, 637 .component_only = 0,
638 638
639 .hsync_end = 64, .hblank_end = 128, 639 .hsync_end = 64, .hblank_end = 142,
640 .hblank_start = 844, .htotal = 863, 640 .hblank_start = 844, .htotal = 863,
641 641
642 .progressive = false, .trilevel_sync = false, 642 .progressive = false, .trilevel_sync = false,
@@ -659,8 +659,8 @@ static const struct tv_mode tv_modes[] = {
659 659
660 /* desired 4.4336180 actual 4.4336180 clock 107.52 */ 660 /* desired 4.4336180 actual 4.4336180 clock 107.52 */
661 .dda1_inc = 168, 661 .dda1_inc = 168,
662 .dda2_inc = 18557, .dda2_size = 20625, 662 .dda2_inc = 4122, .dda2_size = 27648,
663 .dda3_inc = 0, .dda3_size = 0, 663 .dda3_inc = 67, .dda3_size = 625,
664 .sc_reset = TV_SC_RESET_EVERY_8, 664 .sc_reset = TV_SC_RESET_EVERY_8,
665 .pal_burst = true, 665 .pal_burst = true,
666 666
@@ -689,7 +689,7 @@ static const struct tv_mode tv_modes[] = {
689 .veq_ena = false, 689 .veq_ena = false,
690 690
691 .vi_end_f1 = 44, .vi_end_f2 = 44, 691 .vi_end_f1 = 44, .vi_end_f2 = 44,
692 .nbr_end = 496, 692 .nbr_end = 479,
693 693
694 .burst_ena = false, 694 .burst_ena = false,
695 695
@@ -713,7 +713,7 @@ static const struct tv_mode tv_modes[] = {
713 .veq_ena = false, 713 .veq_ena = false,
714 714
715 .vi_end_f1 = 44, .vi_end_f2 = 44, 715 .vi_end_f1 = 44, .vi_end_f2 = 44,
716 .nbr_end = 496, 716 .nbr_end = 479,
717 717
718 .burst_ena = false, 718 .burst_ena = false,
719 719
@@ -876,7 +876,7 @@ static const struct tv_mode tv_modes[] = {
876 .component_only = 1, 876 .component_only = 1,
877 877
878 .hsync_end = 88, .hblank_end = 235, 878 .hsync_end = 88, .hblank_end = 235,
879 .hblank_start = 2155, .htotal = 2200, 879 .hblank_start = 2155, .htotal = 2201,
880 880
881 .progressive = false, .trilevel_sync = true, 881 .progressive = false, .trilevel_sync = true,
882 882
@@ -1082,7 +1082,7 @@ intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mo
1082 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); 1082 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
1083 1083
1084 /* Ensure TV refresh is close to desired refresh */ 1084 /* Ensure TV refresh is close to desired refresh */
1085 if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode)) < 1) 1085 if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode)) < 10)
1086 return MODE_OK; 1086 return MODE_OK;
1087 return MODE_CLOCK_RANGE; 1087 return MODE_CLOCK_RANGE;
1088} 1088}
@@ -1135,7 +1135,8 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1135 if (!tv_mode) 1135 if (!tv_mode)
1136 return; /* can't happen (mode_prepare prevents this) */ 1136 return; /* can't happen (mode_prepare prevents this) */
1137 1137
1138 tv_ctl = 0; 1138 tv_ctl = I915_READ(TV_CTL);
1139 tv_ctl &= TV_CTL_SAVE;
1139 1140
1140 switch (tv_priv->type) { 1141 switch (tv_priv->type) {
1141 default: 1142 default:
@@ -1215,7 +1216,6 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1215 /* dda1 implies valid video levels */ 1216 /* dda1 implies valid video levels */
1216 if (tv_mode->dda1_inc) { 1217 if (tv_mode->dda1_inc) {
1217 scctl1 |= TV_SC_DDA1_EN; 1218 scctl1 |= TV_SC_DDA1_EN;
1218 scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
1219 } 1219 }
1220 1220
1221 if (tv_mode->dda2_inc) 1221 if (tv_mode->dda2_inc)
@@ -1225,6 +1225,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1225 scctl1 |= TV_SC_DDA3_EN; 1225 scctl1 |= TV_SC_DDA3_EN;
1226 1226
1227 scctl1 |= tv_mode->sc_reset; 1227 scctl1 |= tv_mode->sc_reset;
1228 scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
1228 scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT; 1229 scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT;
1229 1230
1230 scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT | 1231 scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT |
@@ -1266,7 +1267,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1266 color_conversion->av); 1267 color_conversion->av);
1267 } 1268 }
1268 1269
1269 I915_WRITE(TV_CLR_KNOBS, 0x00606000); 1270 if (IS_I965G(dev))
1271 I915_WRITE(TV_CLR_KNOBS, 0x00404000);
1272 else
1273 I915_WRITE(TV_CLR_KNOBS, 0x00606000);
1274
1270 if (video_levels) 1275 if (video_levels)
1271 I915_WRITE(TV_CLR_LEVEL, 1276 I915_WRITE(TV_CLR_LEVEL,
1272 ((video_levels->black << TV_BLACK_LEVEL_SHIFT) | 1277 ((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
@@ -1401,6 +1406,7 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
1401 tv_dac = I915_READ(TV_DAC); 1406 tv_dac = I915_READ(TV_DAC);
1402 I915_WRITE(TV_DAC, save_tv_dac); 1407 I915_WRITE(TV_DAC, save_tv_dac);
1403 I915_WRITE(TV_CTL, save_tv_ctl); 1408 I915_WRITE(TV_CTL, save_tv_ctl);
1409 intel_wait_for_vblank(dev);
1404 } 1410 }
1405 /* 1411 /*
1406 * A B C 1412 * A B C
@@ -1451,7 +1457,7 @@ intel_tv_detect(struct drm_connector *connector)
1451 mode = reported_modes[0]; 1457 mode = reported_modes[0];
1452 drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); 1458 drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
1453 1459
1454 if (encoder->crtc) { 1460 if (encoder->crtc && encoder->crtc->enabled) {
1455 type = intel_tv_detect_type(encoder->crtc, intel_output); 1461 type = intel_tv_detect_type(encoder->crtc, intel_output);
1456 } else { 1462 } else {
1457 crtc = intel_get_load_detect_pipe(intel_output, &mode, &dpms_mode); 1463 crtc = intel_get_load_detect_pipe(intel_output, &mode, &dpms_mode);
@@ -1462,6 +1468,8 @@ intel_tv_detect(struct drm_connector *connector)
1462 type = -1; 1468 type = -1;
1463 } 1469 }
1464 1470
1471 tv_priv->type = type;
1472
1465 if (type < 0) 1473 if (type < 0)
1466 return connector_status_disconnected; 1474 return connector_status_disconnected;
1467 1475
@@ -1495,7 +1503,8 @@ intel_tv_get_modes(struct drm_connector *connector)
1495 struct drm_display_mode *mode_ptr; 1503 struct drm_display_mode *mode_ptr;
1496 struct intel_output *intel_output = to_intel_output(connector); 1504 struct intel_output *intel_output = to_intel_output(connector);
1497 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); 1505 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
1498 int j; 1506 int j, count = 0;
1507 u64 tmp;
1499 1508
1500 for (j = 0; j < sizeof(input_res_table) / sizeof(input_res_table[0]); 1509 for (j = 0; j < sizeof(input_res_table) / sizeof(input_res_table[0]);
1501 j++) { 1510 j++) {
@@ -1510,8 +1519,9 @@ intel_tv_get_modes(struct drm_connector *connector)
1510 && !tv_mode->component_only)) 1519 && !tv_mode->component_only))
1511 continue; 1520 continue;
1512 1521
1513 mode_ptr = drm_calloc(1, sizeof(struct drm_display_mode), 1522 mode_ptr = drm_mode_create(connector->dev);
1514 DRM_MEM_DRIVER); 1523 if (!mode_ptr)
1524 continue;
1515 strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN); 1525 strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN);
1516 1526
1517 mode_ptr->hdisplay = hactive_s; 1527 mode_ptr->hdisplay = hactive_s;
@@ -1528,15 +1538,17 @@ intel_tv_get_modes(struct drm_connector *connector)
1528 mode_ptr->vsync_end = mode_ptr->vsync_start + 1; 1538 mode_ptr->vsync_end = mode_ptr->vsync_start + 1;
1529 mode_ptr->vtotal = vactive_s + 33; 1539 mode_ptr->vtotal = vactive_s + 33;
1530 1540
1531 mode_ptr->clock = (int) (tv_mode->refresh * 1541 tmp = (u64) tv_mode->refresh * mode_ptr->vtotal;
1532 mode_ptr->vtotal * 1542 tmp *= mode_ptr->htotal;
1533 mode_ptr->htotal / 1000) / 1000; 1543 tmp = div_u64(tmp, 1000000);
1544 mode_ptr->clock = (int) tmp;
1534 1545
1535 mode_ptr->type = DRM_MODE_TYPE_DRIVER; 1546 mode_ptr->type = DRM_MODE_TYPE_DRIVER;
1536 drm_mode_probed_add(connector, mode_ptr); 1547 drm_mode_probed_add(connector, mode_ptr);
1548 count++;
1537 } 1549 }
1538 1550
1539 return 0; 1551 return count;
1540} 1552}
1541 1553
1542static void 1554static void
diff --git a/drivers/ieee1394/csr.c b/drivers/ieee1394/csr.c
index 31400c8ae051..d696f69ebce5 100644
--- a/drivers/ieee1394/csr.c
+++ b/drivers/ieee1394/csr.c
@@ -68,22 +68,22 @@ static struct hpsb_highlevel csr_highlevel = {
68 .host_reset = host_reset, 68 .host_reset = host_reset,
69}; 69};
70 70
71const static struct hpsb_address_ops map_ops = { 71static const struct hpsb_address_ops map_ops = {
72 .read = read_maps, 72 .read = read_maps,
73}; 73};
74 74
75const static struct hpsb_address_ops fcp_ops = { 75static const struct hpsb_address_ops fcp_ops = {
76 .write = write_fcp, 76 .write = write_fcp,
77}; 77};
78 78
79const static struct hpsb_address_ops reg_ops = { 79static const struct hpsb_address_ops reg_ops = {
80 .read = read_regs, 80 .read = read_regs,
81 .write = write_regs, 81 .write = write_regs,
82 .lock = lock_regs, 82 .lock = lock_regs,
83 .lock64 = lock64_regs, 83 .lock64 = lock64_regs,
84}; 84};
85 85
86const static struct hpsb_address_ops config_rom_ops = { 86static const struct hpsb_address_ops config_rom_ops = {
87 .read = read_config_rom, 87 .read = read_config_rom,
88}; 88};
89 89
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
index cb15bfa38d70..823a6297a1af 100644
--- a/drivers/ieee1394/dv1394.c
+++ b/drivers/ieee1394/dv1394.c
@@ -2171,7 +2171,7 @@ static const struct file_operations dv1394_fops=
2171 * Export information about protocols/devices supported by this driver. 2171 * Export information about protocols/devices supported by this driver.
2172 */ 2172 */
2173#ifdef MODULE 2173#ifdef MODULE
2174static struct ieee1394_device_id dv1394_id_table[] = { 2174static const struct ieee1394_device_id dv1394_id_table[] = {
2175 { 2175 {
2176 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, 2176 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
2177 .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff, 2177 .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff,
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
index 1a919df809f8..4ca103577c0a 100644
--- a/drivers/ieee1394/eth1394.c
+++ b/drivers/ieee1394/eth1394.c
@@ -181,7 +181,7 @@ static void ether1394_remove_host(struct hpsb_host *host);
181static void ether1394_host_reset(struct hpsb_host *host); 181static void ether1394_host_reset(struct hpsb_host *host);
182 182
183/* Function for incoming 1394 packets */ 183/* Function for incoming 1394 packets */
184const static struct hpsb_address_ops addr_ops = { 184static const struct hpsb_address_ops addr_ops = {
185 .write = ether1394_write, 185 .write = ether1394_write,
186}; 186};
187 187
@@ -438,7 +438,7 @@ static int eth1394_update(struct unit_directory *ud)
438 return eth1394_new_node(hi, ud); 438 return eth1394_new_node(hi, ud);
439} 439}
440 440
441static struct ieee1394_device_id eth1394_id_table[] = { 441static const struct ieee1394_device_id eth1394_id_table[] = {
442 { 442 {
443 .match_flags = (IEEE1394_MATCH_SPECIFIER_ID | 443 .match_flags = (IEEE1394_MATCH_SPECIFIER_ID |
444 IEEE1394_MATCH_VERSION), 444 IEEE1394_MATCH_VERSION),
diff --git a/drivers/ieee1394/highlevel.c b/drivers/ieee1394/highlevel.c
index 600e391c8fe7..4bc443546e04 100644
--- a/drivers/ieee1394/highlevel.c
+++ b/drivers/ieee1394/highlevel.c
@@ -478,7 +478,7 @@ int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
478 return retval; 478 return retval;
479} 479}
480 480
481const static struct hpsb_address_ops dummy_ops; 481static const struct hpsb_address_ops dummy_ops;
482 482
483/* dummy address spaces as lower and upper bounds of the host's a.s. list */ 483/* dummy address spaces as lower and upper bounds of the host's a.s. list */
484static void init_hpsb_highlevel(struct hpsb_host *host) 484static void init_hpsb_highlevel(struct hpsb_host *host)
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index 53aada5bbe1e..a6d55bebe61a 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -484,7 +484,7 @@ static struct device_attribute *const fw_host_attrs[] = {
484static ssize_t fw_show_drv_device_ids(struct device_driver *drv, char *buf) 484static ssize_t fw_show_drv_device_ids(struct device_driver *drv, char *buf)
485{ 485{
486 struct hpsb_protocol_driver *driver; 486 struct hpsb_protocol_driver *driver;
487 struct ieee1394_device_id *id; 487 const struct ieee1394_device_id *id;
488 int length = 0; 488 int length = 0;
489 char *scratch = buf; 489 char *scratch = buf;
490 490
@@ -658,7 +658,7 @@ static int nodemgr_bus_match(struct device * dev, struct device_driver * drv)
658{ 658{
659 struct hpsb_protocol_driver *driver; 659 struct hpsb_protocol_driver *driver;
660 struct unit_directory *ud; 660 struct unit_directory *ud;
661 struct ieee1394_device_id *id; 661 const struct ieee1394_device_id *id;
662 662
663 /* We only match unit directories */ 663 /* We only match unit directories */
664 if (dev->platform_data != &nodemgr_ud_platform_data) 664 if (dev->platform_data != &nodemgr_ud_platform_data)
diff --git a/drivers/ieee1394/nodemgr.h b/drivers/ieee1394/nodemgr.h
index ee5acdbd114a..749b271d3107 100644
--- a/drivers/ieee1394/nodemgr.h
+++ b/drivers/ieee1394/nodemgr.h
@@ -125,7 +125,7 @@ struct hpsb_protocol_driver {
125 * probe function below can implement further protocol 125 * probe function below can implement further protocol
126 * dependent or vendor dependent checking. 126 * dependent or vendor dependent checking.
127 */ 127 */
128 struct ieee1394_device_id *id_table; 128 const struct ieee1394_device_id *id_table;
129 129
130 /* 130 /*
131 * The update function is called when the node has just 131 * The update function is called when the node has just
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index bad66c65b0d6..da5f8829b503 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -90,7 +90,7 @@ static int arm_lock(struct hpsb_host *host, int nodeid, quadlet_t * store,
90static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store, 90static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store,
91 u64 addr, octlet_t data, octlet_t arg, int ext_tcode, 91 u64 addr, octlet_t data, octlet_t arg, int ext_tcode,
92 u16 flags); 92 u16 flags);
93const static struct hpsb_address_ops arm_ops = { 93static const struct hpsb_address_ops arm_ops = {
94 .read = arm_read, 94 .read = arm_read,
95 .write = arm_write, 95 .write = arm_write,
96 .lock = arm_lock, 96 .lock = arm_lock,
@@ -369,6 +369,7 @@ static const char __user *raw1394_compat_write(const char __user *buf)
369{ 369{
370 struct compat_raw1394_req __user *cr = (typeof(cr)) buf; 370 struct compat_raw1394_req __user *cr = (typeof(cr)) buf;
371 struct raw1394_request __user *r; 371 struct raw1394_request __user *r;
372
372 r = compat_alloc_user_space(sizeof(struct raw1394_request)); 373 r = compat_alloc_user_space(sizeof(struct raw1394_request));
373 374
374#define C(x) __copy_in_user(&r->x, &cr->x, sizeof(r->x)) 375#define C(x) __copy_in_user(&r->x, &cr->x, sizeof(r->x))
@@ -378,7 +379,8 @@ static const char __user *raw1394_compat_write(const char __user *buf)
378 C(tag) || 379 C(tag) ||
379 C(sendb) || 380 C(sendb) ||
380 C(recvb)) 381 C(recvb))
381 return ERR_PTR(-EFAULT); 382 return (__force const char __user *)ERR_PTR(-EFAULT);
383
382 return (const char __user *)r; 384 return (const char __user *)r;
383} 385}
384#undef C 386#undef C
@@ -389,6 +391,7 @@ static int
389raw1394_compat_read(const char __user *buf, struct raw1394_request *r) 391raw1394_compat_read(const char __user *buf, struct raw1394_request *r)
390{ 392{
391 struct compat_raw1394_req __user *cr = (typeof(cr)) buf; 393 struct compat_raw1394_req __user *cr = (typeof(cr)) buf;
394
392 if (!access_ok(VERIFY_WRITE, cr, sizeof(struct compat_raw1394_req)) || 395 if (!access_ok(VERIFY_WRITE, cr, sizeof(struct compat_raw1394_req)) ||
393 P(type) || 396 P(type) ||
394 P(error) || 397 P(error) ||
@@ -400,6 +403,7 @@ raw1394_compat_read(const char __user *buf, struct raw1394_request *r)
400 P(sendb) || 403 P(sendb) ||
401 P(recvb)) 404 P(recvb))
402 return -EFAULT; 405 return -EFAULT;
406
403 return sizeof(struct compat_raw1394_req); 407 return sizeof(struct compat_raw1394_req);
404} 408}
405#undef P 409#undef P
@@ -2249,8 +2253,8 @@ static ssize_t raw1394_write(struct file *file, const char __user * buffer,
2249 sizeof(struct compat_raw1394_req) != 2253 sizeof(struct compat_raw1394_req) !=
2250 sizeof(struct raw1394_request)) { 2254 sizeof(struct raw1394_request)) {
2251 buffer = raw1394_compat_write(buffer); 2255 buffer = raw1394_compat_write(buffer);
2252 if (IS_ERR(buffer)) 2256 if (IS_ERR((__force void *)buffer))
2253 return PTR_ERR(buffer); 2257 return PTR_ERR((__force void *)buffer);
2254 } else 2258 } else
2255#endif 2259#endif
2256 if (count != sizeof(struct raw1394_request)) { 2260 if (count != sizeof(struct raw1394_request)) {
@@ -2978,7 +2982,7 @@ static int raw1394_release(struct inode *inode, struct file *file)
2978 * Export information about protocols/devices supported by this driver. 2982 * Export information about protocols/devices supported by this driver.
2979 */ 2983 */
2980#ifdef MODULE 2984#ifdef MODULE
2981static struct ieee1394_device_id raw1394_id_table[] = { 2985static const struct ieee1394_device_id raw1394_id_table[] = {
2982 { 2986 {
2983 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, 2987 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
2984 .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff, 2988 .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff,
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index f3fd8657ce4b..a51ab233342d 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -265,7 +265,7 @@ static struct hpsb_highlevel sbp2_highlevel = {
265 .host_reset = sbp2_host_reset, 265 .host_reset = sbp2_host_reset,
266}; 266};
267 267
268const static struct hpsb_address_ops sbp2_ops = { 268static const struct hpsb_address_ops sbp2_ops = {
269 .write = sbp2_handle_status_write 269 .write = sbp2_handle_status_write
270}; 270};
271 271
@@ -275,7 +275,7 @@ static int sbp2_handle_physdma_write(struct hpsb_host *, int, int, quadlet_t *,
275static int sbp2_handle_physdma_read(struct hpsb_host *, int, quadlet_t *, u64, 275static int sbp2_handle_physdma_read(struct hpsb_host *, int, quadlet_t *, u64,
276 size_t, u16); 276 size_t, u16);
277 277
278const static struct hpsb_address_ops sbp2_physdma_ops = { 278static const struct hpsb_address_ops sbp2_physdma_ops = {
279 .read = sbp2_handle_physdma_read, 279 .read = sbp2_handle_physdma_read,
280 .write = sbp2_handle_physdma_write, 280 .write = sbp2_handle_physdma_write,
281}; 281};
@@ -285,7 +285,7 @@ const static struct hpsb_address_ops sbp2_physdma_ops = {
285/* 285/*
286 * Interface to driver core and IEEE 1394 core 286 * Interface to driver core and IEEE 1394 core
287 */ 287 */
288static struct ieee1394_device_id sbp2_id_table[] = { 288static const struct ieee1394_device_id sbp2_id_table[] = {
289 { 289 {
290 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, 290 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
291 .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff, 291 .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff,
@@ -1413,8 +1413,7 @@ static void sbp2_parse_unit_directory(struct sbp2_lu *lu,
1413 "(firmware_revision 0x%06x, vendor_id 0x%06x," 1413 "(firmware_revision 0x%06x, vendor_id 0x%06x,"
1414 " model_id 0x%06x)", 1414 " model_id 0x%06x)",
1415 NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid), 1415 NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid),
1416 workarounds, firmware_revision, 1416 workarounds, firmware_revision, ud->vendor_id,
1417 ud->vendor_id ? ud->vendor_id : ud->ne->vendor_id,
1418 model); 1417 model);
1419 1418
1420 /* We would need one SCSI host template for each target to adjust 1419 /* We would need one SCSI host template for each target to adjust
diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c
index 679a918a5cc7..d287ba79821d 100644
--- a/drivers/ieee1394/video1394.c
+++ b/drivers/ieee1394/video1394.c
@@ -1294,7 +1294,7 @@ static const struct file_operations video1394_fops=
1294 * Export information about protocols/devices supported by this driver. 1294 * Export information about protocols/devices supported by this driver.
1295 */ 1295 */
1296#ifdef MODULE 1296#ifdef MODULE
1297static struct ieee1394_device_id video1394_id_table[] = { 1297static const struct ieee1394_device_id video1394_id_table[] = {
1298 { 1298 {
1299 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, 1299 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
1300 .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff, 1300 .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 8d3e4c6f237e..ecb1f6fd6276 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1602,8 +1602,6 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
1602 netif_napi_add(netdev, &nesvnic->napi, nes_netdev_poll, 128); 1602 netif_napi_add(netdev, &nesvnic->napi, nes_netdev_poll, 128);
1603 nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n"); 1603 nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
1604 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 1604 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1605 netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
1606 netdev->features |= NETIF_F_LLTX;
1607 1605
1608 /* Fill in the port structure */ 1606 /* Fill in the port structure */
1609 nesvnic->netdev = netdev; 1607 nesvnic->netdev = netdev;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 12876392516e..13d7674b293d 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -168,7 +168,7 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
168{ 168{
169 int error = 0; 169 int error = 0;
170 170
171 debug_scsi("task deq [cid %d itt 0x%x]\n", conn->id, task->itt); 171 iser_dbg("task deq [cid %d itt 0x%x]\n", conn->id, task->itt);
172 172
173 error = iser_send_control(conn, task); 173 error = iser_send_control(conn, task);
174 174
@@ -195,7 +195,7 @@ iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn,
195 /* Send data-out PDUs while there's still unsolicited data to send */ 195 /* Send data-out PDUs while there's still unsolicited data to send */
196 while (iscsi_task_has_unsol_data(task)) { 196 while (iscsi_task_has_unsol_data(task)) {
197 iscsi_prep_data_out_pdu(task, r2t, &hdr); 197 iscsi_prep_data_out_pdu(task, r2t, &hdr);
198 debug_scsi("Sending data-out: itt 0x%x, data count %d\n", 198 iser_dbg("Sending data-out: itt 0x%x, data count %d\n",
199 hdr.itt, r2t->data_count); 199 hdr.itt, r2t->data_count);
200 200
201 /* the buffer description has been passed with the command */ 201 /* the buffer description has been passed with the command */
@@ -206,7 +206,7 @@ iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn,
206 goto iscsi_iser_task_xmit_unsol_data_exit; 206 goto iscsi_iser_task_xmit_unsol_data_exit;
207 } 207 }
208 r2t->sent += r2t->data_count; 208 r2t->sent += r2t->data_count;
209 debug_scsi("Need to send %d more as data-out PDUs\n", 209 iser_dbg("Need to send %d more as data-out PDUs\n",
210 r2t->data_length - r2t->sent); 210 r2t->data_length - r2t->sent);
211 } 211 }
212 212
@@ -227,12 +227,12 @@ iscsi_iser_task_xmit(struct iscsi_task *task)
227 if (task->sc->sc_data_direction == DMA_TO_DEVICE) { 227 if (task->sc->sc_data_direction == DMA_TO_DEVICE) {
228 BUG_ON(scsi_bufflen(task->sc) == 0); 228 BUG_ON(scsi_bufflen(task->sc) == 0);
229 229
230 debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n", 230 iser_dbg("cmd [itt %x total %d imm %d unsol_data %d\n",
231 task->itt, scsi_bufflen(task->sc), 231 task->itt, scsi_bufflen(task->sc),
232 task->imm_count, task->unsol_r2t.data_length); 232 task->imm_count, task->unsol_r2t.data_length);
233 } 233 }
234 234
235 debug_scsi("task deq [cid %d itt 0x%x]\n", 235 iser_dbg("task deq [cid %d itt 0x%x]\n",
236 conn->id, task->itt); 236 conn->id, task->itt);
237 237
238 /* Send the cmd PDU */ 238 /* Send the cmd PDU */
@@ -397,14 +397,14 @@ static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
397static struct iscsi_cls_session * 397static struct iscsi_cls_session *
398iscsi_iser_session_create(struct iscsi_endpoint *ep, 398iscsi_iser_session_create(struct iscsi_endpoint *ep,
399 uint16_t cmds_max, uint16_t qdepth, 399 uint16_t cmds_max, uint16_t qdepth,
400 uint32_t initial_cmdsn, uint32_t *hostno) 400 uint32_t initial_cmdsn)
401{ 401{
402 struct iscsi_cls_session *cls_session; 402 struct iscsi_cls_session *cls_session;
403 struct iscsi_session *session; 403 struct iscsi_session *session;
404 struct Scsi_Host *shost; 404 struct Scsi_Host *shost;
405 struct iser_conn *ib_conn; 405 struct iser_conn *ib_conn;
406 406
407 shost = iscsi_host_alloc(&iscsi_iser_sht, 0, ISCSI_MAX_CMD_PER_LUN); 407 shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 1);
408 if (!shost) 408 if (!shost)
409 return NULL; 409 return NULL;
410 shost->transportt = iscsi_iser_scsi_transport; 410 shost->transportt = iscsi_iser_scsi_transport;
@@ -423,7 +423,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
423 if (iscsi_host_add(shost, 423 if (iscsi_host_add(shost,
424 ep ? ib_conn->device->ib_device->dma_device : NULL)) 424 ep ? ib_conn->device->ib_device->dma_device : NULL))
425 goto free_host; 425 goto free_host;
426 *hostno = shost->host_no;
427 426
428 /* 427 /*
429 * we do not support setting can_queue cmd_per_lun from userspace yet 428 * we do not support setting can_queue cmd_per_lun from userspace yet
@@ -596,7 +595,7 @@ static struct scsi_host_template iscsi_iser_sht = {
596 .change_queue_depth = iscsi_change_queue_depth, 595 .change_queue_depth = iscsi_change_queue_depth,
597 .sg_tablesize = ISCSI_ISER_SG_TABLESIZE, 596 .sg_tablesize = ISCSI_ISER_SG_TABLESIZE,
598 .max_sectors = 1024, 597 .max_sectors = 1024,
599 .cmd_per_lun = ISCSI_MAX_CMD_PER_LUN, 598 .cmd_per_lun = ISER_DEF_CMD_PER_LUN,
600 .eh_abort_handler = iscsi_eh_abort, 599 .eh_abort_handler = iscsi_eh_abort,
601 .eh_device_reset_handler= iscsi_eh_device_reset, 600 .eh_device_reset_handler= iscsi_eh_device_reset,
602 .eh_target_reset_handler= iscsi_eh_target_reset, 601 .eh_target_reset_handler= iscsi_eh_target_reset,
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 861119593f2b..9d529cae1f0d 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -93,7 +93,7 @@
93 93
94 /* support upto 512KB in one RDMA */ 94 /* support upto 512KB in one RDMA */
95#define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K) 95#define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K)
96#define ISCSI_ISER_MAX_LUN 256 96#define ISER_DEF_CMD_PER_LUN 128
97 97
98/* QP settings */ 98/* QP settings */
99/* Maximal bounds on received asynchronous PDUs */ 99/* Maximal bounds on received asynchronous PDUs */
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index e209cb8dd948..9de640200ad3 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -661,7 +661,7 @@ void iser_snd_completion(struct iser_desc *tx_desc)
661 661
662 if (resume_tx) { 662 if (resume_tx) {
663 iser_dbg("%ld resuming tx\n",jiffies); 663 iser_dbg("%ld resuming tx\n",jiffies);
664 scsi_queue_work(conn->session->host, &conn->xmitwork); 664 iscsi_conn_queue_work(conn);
665 } 665 }
666 666
667 if (tx_desc->type == ISCSI_TX_CONTROL) { 667 if (tx_desc->type == ISCSI_TX_CONTROL) {
diff --git a/drivers/media/dvb/firewire/firedtv-avc.c b/drivers/media/dvb/firewire/firedtv-avc.c
index b55d9ccaf33e..32526f103b59 100644
--- a/drivers/media/dvb/firewire/firedtv-avc.c
+++ b/drivers/media/dvb/firewire/firedtv-avc.c
@@ -115,7 +115,7 @@ static const char *debug_fcp_ctype(unsigned int ctype)
115} 115}
116 116
117static const char *debug_fcp_opcode(unsigned int opcode, 117static const char *debug_fcp_opcode(unsigned int opcode,
118 const u8 *data, size_t length) 118 const u8 *data, int length)
119{ 119{
120 switch (opcode) { 120 switch (opcode) {
121 case AVC_OPCODE_VENDOR: break; 121 case AVC_OPCODE_VENDOR: break;
@@ -135,13 +135,14 @@ static const char *debug_fcp_opcode(unsigned int opcode,
135 case SFE_VENDOR_OPCODE_REGISTER_REMOTE_CONTROL: return "RegisterRC"; 135 case SFE_VENDOR_OPCODE_REGISTER_REMOTE_CONTROL: return "RegisterRC";
136 case SFE_VENDOR_OPCODE_LNB_CONTROL: return "LNBControl"; 136 case SFE_VENDOR_OPCODE_LNB_CONTROL: return "LNBControl";
137 case SFE_VENDOR_OPCODE_TUNE_QPSK: return "TuneQPSK"; 137 case SFE_VENDOR_OPCODE_TUNE_QPSK: return "TuneQPSK";
138 case SFE_VENDOR_OPCODE_TUNE_QPSK2: return "TuneQPSK2";
138 case SFE_VENDOR_OPCODE_HOST2CA: return "Host2CA"; 139 case SFE_VENDOR_OPCODE_HOST2CA: return "Host2CA";
139 case SFE_VENDOR_OPCODE_CA2HOST: return "CA2Host"; 140 case SFE_VENDOR_OPCODE_CA2HOST: return "CA2Host";
140 } 141 }
141 return "Vendor"; 142 return "Vendor";
142} 143}
143 144
144static void debug_fcp(const u8 *data, size_t length) 145static void debug_fcp(const u8 *data, int length)
145{ 146{
146 unsigned int subunit_type, subunit_id, op; 147 unsigned int subunit_type, subunit_id, op;
147 const char *prefix = data[0] > 7 ? "FCP <- " : "FCP -> "; 148 const char *prefix = data[0] > 7 ? "FCP <- " : "FCP -> ";
@@ -266,7 +267,10 @@ static void avc_tuner_tuneqpsk(struct firedtv *fdtv,
266 c->operand[0] = SFE_VENDOR_DE_COMPANYID_0; 267 c->operand[0] = SFE_VENDOR_DE_COMPANYID_0;
267 c->operand[1] = SFE_VENDOR_DE_COMPANYID_1; 268 c->operand[1] = SFE_VENDOR_DE_COMPANYID_1;
268 c->operand[2] = SFE_VENDOR_DE_COMPANYID_2; 269 c->operand[2] = SFE_VENDOR_DE_COMPANYID_2;
269 c->operand[3] = SFE_VENDOR_OPCODE_TUNE_QPSK; 270 if (fdtv->type == FIREDTV_DVB_S2)
271 c->operand[3] = SFE_VENDOR_OPCODE_TUNE_QPSK2;
272 else
273 c->operand[3] = SFE_VENDOR_OPCODE_TUNE_QPSK;
270 274
271 c->operand[4] = (params->frequency >> 24) & 0xff; 275 c->operand[4] = (params->frequency >> 24) & 0xff;
272 c->operand[5] = (params->frequency >> 16) & 0xff; 276 c->operand[5] = (params->frequency >> 16) & 0xff;
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
index 00d46e137b2a..92285d0089c2 100644
--- a/drivers/mtd/mtdsuper.c
+++ b/drivers/mtd/mtdsuper.c
@@ -81,13 +81,16 @@ static int get_sb_mtd_aux(struct file_system_type *fs_type, int flags,
81 81
82 /* go */ 82 /* go */
83 sb->s_flags |= MS_ACTIVE; 83 sb->s_flags |= MS_ACTIVE;
84 return simple_set_mnt(mnt, sb); 84 simple_set_mnt(mnt, sb);
85
86 return 0;
85 87
86 /* new mountpoint for an already mounted superblock */ 88 /* new mountpoint for an already mounted superblock */
87already_mounted: 89already_mounted:
88 DEBUG(1, "MTDSB: Device %d (\"%s\") is already mounted\n", 90 DEBUG(1, "MTDSB: Device %d (\"%s\") is already mounted\n",
89 mtd->index, mtd->name); 91 mtd->index, mtd->name);
90 ret = simple_set_mnt(mnt, sb); 92 simple_set_mnt(mnt, sb);
93 ret = 0;
91 goto out_put; 94 goto out_put;
92 95
93out_error: 96out_error:
diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c
index 5b91a85fe107..4f08bd995836 100644
--- a/drivers/net/3c503.c
+++ b/drivers/net/3c503.c
@@ -353,9 +353,6 @@ el2_probe1(struct net_device *dev, int ioaddr)
353 353
354 dev->netdev_ops = &el2_netdev_ops; 354 dev->netdev_ops = &el2_netdev_ops;
355 dev->ethtool_ops = &netdev_ethtool_ops; 355 dev->ethtool_ops = &netdev_ethtool_ops;
356#ifdef CONFIG_NET_POLL_CONTROLLER
357 dev->poll_controller = eip_poll;
358#endif
359 356
360 retval = register_netdev(dev); 357 retval = register_netdev(dev);
361 if (retval) 358 if (retval)
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index e5ffc1c606c1..f062b424704e 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -972,6 +972,14 @@ config ENC28J60_WRITEVERIFY
972 Enable the verify after the buffer write useful for debugging purpose. 972 Enable the verify after the buffer write useful for debugging purpose.
973 If unsure, say N. 973 If unsure, say N.
974 974
975config ETHOC
976 tristate "OpenCores 10/100 Mbps Ethernet MAC support"
977 depends on NET_ETHERNET
978 select MII
979 select PHYLIB
980 help
981 Say Y here if you want to use the OpenCores 10/100 Mbps Ethernet MAC.
982
975config SMC911X 983config SMC911X
976 tristate "SMSC LAN911[5678] support" 984 tristate "SMSC LAN911[5678] support"
977 select CRC32 985 select CRC32
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 758ecdf4c820..98409c9dd445 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -230,6 +230,7 @@ obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o
230pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o 230pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o
231obj-$(CONFIG_MLX4_CORE) += mlx4/ 231obj-$(CONFIG_MLX4_CORE) += mlx4/
232obj-$(CONFIG_ENC28J60) += enc28j60.o 232obj-$(CONFIG_ENC28J60) += enc28j60.o
233obj-$(CONFIG_ETHOC) += ethoc.o
233 234
234obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o 235obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o
235 236
diff --git a/drivers/net/ac3200.c b/drivers/net/ac3200.c
index 071a851a2ea1..eac73382c087 100644
--- a/drivers/net/ac3200.c
+++ b/drivers/net/ac3200.c
@@ -143,6 +143,22 @@ out:
143} 143}
144#endif 144#endif
145 145
146static const struct net_device_ops ac_netdev_ops = {
147 .ndo_open = ac_open,
148 .ndo_stop = ac_close_card,
149
150 .ndo_start_xmit = ei_start_xmit,
151 .ndo_tx_timeout = ei_tx_timeout,
152 .ndo_get_stats = ei_get_stats,
153 .ndo_set_multicast_list = ei_set_multicast_list,
154 .ndo_validate_addr = eth_validate_addr,
155 .ndo_set_mac_address = eth_mac_addr,
156 .ndo_change_mtu = eth_change_mtu,
157#ifdef CONFIG_NET_POLL_CONTROLLER
158 .ndo_poll_controller = ei_poll,
159#endif
160};
161
146static int __init ac_probe1(int ioaddr, struct net_device *dev) 162static int __init ac_probe1(int ioaddr, struct net_device *dev)
147{ 163{
148 int i, retval; 164 int i, retval;
@@ -253,11 +269,7 @@ static int __init ac_probe1(int ioaddr, struct net_device *dev)
253 ei_status.block_output = &ac_block_output; 269 ei_status.block_output = &ac_block_output;
254 ei_status.get_8390_hdr = &ac_get_8390_hdr; 270 ei_status.get_8390_hdr = &ac_get_8390_hdr;
255 271
256 dev->open = &ac_open; 272 dev->netdev_ops = &ac_netdev_ops;
257 dev->stop = &ac_close_card;
258#ifdef CONFIG_NET_POLL_CONTROLLER
259 dev->poll_controller = ei_poll;
260#endif
261 NS8390_init(dev, 0); 273 NS8390_init(dev, 0);
262 274
263 retval = register_netdev(dev); 275 retval = register_netdev(dev);
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index 54819a34ba0a..7f8325419803 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -171,7 +171,6 @@ static unsigned int cops_debug = COPS_DEBUG;
171 171
172struct cops_local 172struct cops_local
173{ 173{
174 struct net_device_stats stats;
175 int board; /* Holds what board type is. */ 174 int board; /* Holds what board type is. */
176 int nodeid; /* Set to 1 once have nodeid. */ 175 int nodeid; /* Set to 1 once have nodeid. */
177 unsigned char node_acquire; /* Node ID when acquired. */ 176 unsigned char node_acquire; /* Node ID when acquired. */
@@ -197,7 +196,6 @@ static int cops_send_packet (struct sk_buff *skb, struct net_device *dev);
197static void set_multicast_list (struct net_device *dev); 196static void set_multicast_list (struct net_device *dev);
198static int cops_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); 197static int cops_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
199static int cops_close (struct net_device *dev); 198static int cops_close (struct net_device *dev);
200static struct net_device_stats *cops_get_stats (struct net_device *dev);
201 199
202static void cleanup_card(struct net_device *dev) 200static void cleanup_card(struct net_device *dev)
203{ 201{
@@ -260,6 +258,15 @@ out:
260 return ERR_PTR(err); 258 return ERR_PTR(err);
261} 259}
262 260
261static const struct net_device_ops cops_netdev_ops = {
262 .ndo_open = cops_open,
263 .ndo_stop = cops_close,
264 .ndo_start_xmit = cops_send_packet,
265 .ndo_tx_timeout = cops_timeout,
266 .ndo_do_ioctl = cops_ioctl,
267 .ndo_set_multicast_list = set_multicast_list,
268};
269
263/* 270/*
264 * This is the real probe routine. Linux has a history of friendly device 271 * This is the real probe routine. Linux has a history of friendly device
265 * probes on the ISA bus. A good device probes avoids doing writes, and 272 * probes on the ISA bus. A good device probes avoids doing writes, and
@@ -333,16 +340,9 @@ static int __init cops_probe1(struct net_device *dev, int ioaddr)
333 /* Copy local board variable to lp struct. */ 340 /* Copy local board variable to lp struct. */
334 lp->board = board; 341 lp->board = board;
335 342
336 dev->hard_start_xmit = cops_send_packet; 343 dev->netdev_ops = &cops_netdev_ops;
337 dev->tx_timeout = cops_timeout;
338 dev->watchdog_timeo = HZ * 2; 344 dev->watchdog_timeo = HZ * 2;
339 345
340 dev->get_stats = cops_get_stats;
341 dev->open = cops_open;
342 dev->stop = cops_close;
343 dev->do_ioctl = cops_ioctl;
344 dev->set_multicast_list = set_multicast_list;
345 dev->mc_list = NULL;
346 346
347 /* Tell the user where the card is and what mode we're in. */ 347 /* Tell the user where the card is and what mode we're in. */
348 if(board==DAYNA) 348 if(board==DAYNA)
@@ -797,7 +797,7 @@ static void cops_rx(struct net_device *dev)
797 { 797 {
798 printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", 798 printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n",
799 dev->name); 799 dev->name);
800 lp->stats.rx_dropped++; 800 dev->stats.rx_dropped++;
801 while(pkt_len--) /* Discard packet */ 801 while(pkt_len--) /* Discard packet */
802 inb(ioaddr); 802 inb(ioaddr);
803 spin_unlock_irqrestore(&lp->lock, flags); 803 spin_unlock_irqrestore(&lp->lock, flags);
@@ -819,7 +819,7 @@ static void cops_rx(struct net_device *dev)
819 { 819 {
820 printk(KERN_WARNING "%s: Bad packet length of %d bytes.\n", 820 printk(KERN_WARNING "%s: Bad packet length of %d bytes.\n",
821 dev->name, pkt_len); 821 dev->name, pkt_len);
822 lp->stats.tx_errors++; 822 dev->stats.tx_errors++;
823 dev_kfree_skb_any(skb); 823 dev_kfree_skb_any(skb);
824 return; 824 return;
825 } 825 }
@@ -836,7 +836,7 @@ static void cops_rx(struct net_device *dev)
836 if(rsp_type != LAP_RESPONSE) 836 if(rsp_type != LAP_RESPONSE)
837 { 837 {
838 printk(KERN_WARNING "%s: Bad packet type %d.\n", dev->name, rsp_type); 838 printk(KERN_WARNING "%s: Bad packet type %d.\n", dev->name, rsp_type);
839 lp->stats.tx_errors++; 839 dev->stats.tx_errors++;
840 dev_kfree_skb_any(skb); 840 dev_kfree_skb_any(skb);
841 return; 841 return;
842 } 842 }
@@ -846,8 +846,8 @@ static void cops_rx(struct net_device *dev)
846 skb_reset_transport_header(skb); /* Point to data (Skip header). */ 846 skb_reset_transport_header(skb); /* Point to data (Skip header). */
847 847
848 /* Update the counters. */ 848 /* Update the counters. */
849 lp->stats.rx_packets++; 849 dev->stats.rx_packets++;
850 lp->stats.rx_bytes += skb->len; 850 dev->stats.rx_bytes += skb->len;
851 851
852 /* Send packet to a higher place. */ 852 /* Send packet to a higher place. */
853 netif_rx(skb); 853 netif_rx(skb);
@@ -858,7 +858,7 @@ static void cops_timeout(struct net_device *dev)
858 struct cops_local *lp = netdev_priv(dev); 858 struct cops_local *lp = netdev_priv(dev);
859 int ioaddr = dev->base_addr; 859 int ioaddr = dev->base_addr;
860 860
861 lp->stats.tx_errors++; 861 dev->stats.tx_errors++;
862 if(lp->board==TANGENT) 862 if(lp->board==TANGENT)
863 { 863 {
864 if((inb(ioaddr+TANG_CARD_STATUS)&TANG_TX_READY)==0) 864 if((inb(ioaddr+TANG_CARD_STATUS)&TANG_TX_READY)==0)
@@ -916,8 +916,8 @@ static int cops_send_packet(struct sk_buff *skb, struct net_device *dev)
916 spin_unlock_irqrestore(&lp->lock, flags); /* Restore interrupts. */ 916 spin_unlock_irqrestore(&lp->lock, flags); /* Restore interrupts. */
917 917
918 /* Done sending packet, update counters and cleanup. */ 918 /* Done sending packet, update counters and cleanup. */
919 lp->stats.tx_packets++; 919 dev->stats.tx_packets++;
920 lp->stats.tx_bytes += skb->len; 920 dev->stats.tx_bytes += skb->len;
921 dev->trans_start = jiffies; 921 dev->trans_start = jiffies;
922 dev_kfree_skb (skb); 922 dev_kfree_skb (skb);
923 return 0; 923 return 0;
@@ -986,15 +986,6 @@ static int cops_close(struct net_device *dev)
986 return 0; 986 return 0;
987} 987}
988 988
989/*
990 * Get the current statistics.
991 * This may be called with the card open or closed.
992 */
993static struct net_device_stats *cops_get_stats(struct net_device *dev)
994{
995 struct cops_local *lp = netdev_priv(dev);
996 return &lp->stats;
997}
998 989
999#ifdef MODULE 990#ifdef MODULE
1000static struct net_device *cops_dev; 991static struct net_device *cops_dev;
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index dc4d49605603..78cc71469136 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -261,7 +261,6 @@ static unsigned char *ltdmacbuf;
261 261
262struct ltpc_private 262struct ltpc_private
263{ 263{
264 struct net_device_stats stats;
265 struct atalk_addr my_addr; 264 struct atalk_addr my_addr;
266}; 265};
267 266
@@ -699,7 +698,6 @@ static int do_read(struct net_device *dev, void *cbuf, int cbuflen,
699static struct timer_list ltpc_timer; 698static struct timer_list ltpc_timer;
700 699
701static int ltpc_xmit(struct sk_buff *skb, struct net_device *dev); 700static int ltpc_xmit(struct sk_buff *skb, struct net_device *dev);
702static struct net_device_stats *ltpc_get_stats(struct net_device *dev);
703 701
704static int read_30 ( struct net_device *dev) 702static int read_30 ( struct net_device *dev)
705{ 703{
@@ -726,8 +724,6 @@ static int sendup_buffer (struct net_device *dev)
726 int dnode, snode, llaptype, len; 724 int dnode, snode, llaptype, len;
727 int sklen; 725 int sklen;
728 struct sk_buff *skb; 726 struct sk_buff *skb;
729 struct ltpc_private *ltpc_priv = netdev_priv(dev);
730 struct net_device_stats *stats = &ltpc_priv->stats;
731 struct lt_rcvlap *ltc = (struct lt_rcvlap *) ltdmacbuf; 727 struct lt_rcvlap *ltc = (struct lt_rcvlap *) ltdmacbuf;
732 728
733 if (ltc->command != LT_RCVLAP) { 729 if (ltc->command != LT_RCVLAP) {
@@ -779,8 +775,8 @@ static int sendup_buffer (struct net_device *dev)
779 775
780 skb_reset_transport_header(skb); 776 skb_reset_transport_header(skb);
781 777
782 stats->rx_packets++; 778 dev->stats.rx_packets++;
783 stats->rx_bytes+=skb->len; 779 dev->stats.rx_bytes += skb->len;
784 780
785 /* toss it onwards */ 781 /* toss it onwards */
786 netif_rx(skb); 782 netif_rx(skb);
@@ -904,10 +900,6 @@ static int ltpc_xmit(struct sk_buff *skb, struct net_device *dev)
904 /* in kernel 1.3.xx, on entry skb->data points to ddp header, 900 /* in kernel 1.3.xx, on entry skb->data points to ddp header,
905 * and skb->len is the length of the ddp data + ddp header 901 * and skb->len is the length of the ddp data + ddp header
906 */ 902 */
907
908 struct ltpc_private *ltpc_priv = netdev_priv(dev);
909 struct net_device_stats *stats = &ltpc_priv->stats;
910
911 int i; 903 int i;
912 struct lt_sendlap cbuf; 904 struct lt_sendlap cbuf;
913 unsigned char *hdr; 905 unsigned char *hdr;
@@ -936,20 +928,13 @@ static int ltpc_xmit(struct sk_buff *skb, struct net_device *dev)
936 printk("\n"); 928 printk("\n");
937 } 929 }
938 930
939 stats->tx_packets++; 931 dev->stats.tx_packets++;
940 stats->tx_bytes+=skb->len; 932 dev->stats.tx_bytes += skb->len;
941 933
942 dev_kfree_skb(skb); 934 dev_kfree_skb(skb);
943 return 0; 935 return 0;
944} 936}
945 937
946static struct net_device_stats *ltpc_get_stats(struct net_device *dev)
947{
948 struct ltpc_private *ltpc_priv = netdev_priv(dev);
949 struct net_device_stats *stats = &ltpc_priv->stats;
950 return stats;
951}
952
953/* initialization stuff */ 938/* initialization stuff */
954 939
955static int __init ltpc_probe_dma(int base, int dma) 940static int __init ltpc_probe_dma(int base, int dma)
@@ -1027,6 +1012,12 @@ static int __init ltpc_probe_dma(int base, int dma)
1027 return (want & 2) ? 3 : 1; 1012 return (want & 2) ? 3 : 1;
1028} 1013}
1029 1014
1015static const struct net_device_ops ltpc_netdev = {
1016 .ndo_start_xmit = ltpc_xmit,
1017 .ndo_do_ioctl = ltpc_ioctl,
1018 .ndo_set_multicast_list = set_multicast_list,
1019};
1020
1030struct net_device * __init ltpc_probe(void) 1021struct net_device * __init ltpc_probe(void)
1031{ 1022{
1032 struct net_device *dev; 1023 struct net_device *dev;
@@ -1133,14 +1124,7 @@ struct net_device * __init ltpc_probe(void)
1133 else 1124 else
1134 printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, DMA%d. Using polled mode.\n",io,dma); 1125 printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, DMA%d. Using polled mode.\n",io,dma);
1135 1126
1136 /* Fill in the fields of the device structure with ethernet-generic values. */ 1127 dev->netdev_ops = &ltpc_netdev;
1137 dev->hard_start_xmit = ltpc_xmit;
1138 dev->get_stats = ltpc_get_stats;
1139
1140 /* add the ltpc-specific things */
1141 dev->do_ioctl = &ltpc_ioctl;
1142
1143 dev->set_multicast_list = &set_multicast_list;
1144 dev->mc_list = NULL; 1128 dev->mc_list = NULL;
1145 dev->base_addr = io; 1129 dev->base_addr = io;
1146 dev->irq = irq; 1130 dev->irq = irq;
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
index ced70799b898..18b566ad4fd1 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/at1700.c
@@ -249,6 +249,17 @@ out:
249 return ERR_PTR(err); 249 return ERR_PTR(err);
250} 250}
251 251
252static const struct net_device_ops at1700_netdev_ops = {
253 .ndo_open = net_open,
254 .ndo_stop = net_close,
255 .ndo_start_xmit = net_send_packet,
256 .ndo_set_multicast_list = set_rx_mode,
257 .ndo_tx_timeout = net_tx_timeout,
258 .ndo_change_mtu = eth_change_mtu,
259 .ndo_set_mac_address = eth_mac_addr,
260 .ndo_validate_addr = eth_validate_addr,
261};
262
252/* The Fujitsu datasheet suggests that the NIC be probed for by checking its 263/* The Fujitsu datasheet suggests that the NIC be probed for by checking its
253 "signature", the default bit pattern after a reset. This *doesn't* work -- 264 "signature", the default bit pattern after a reset. This *doesn't* work --
254 there is no way to reset the bus interface without a complete power-cycle! 265 there is no way to reset the bus interface without a complete power-cycle!
@@ -448,13 +459,7 @@ found:
448 if (net_debug) 459 if (net_debug)
449 printk(version); 460 printk(version);
450 461
451 memset(lp, 0, sizeof(struct net_local)); 462 dev->netdev_ops = &at1700_netdev_ops;
452
453 dev->open = net_open;
454 dev->stop = net_close;
455 dev->hard_start_xmit = net_send_packet;
456 dev->set_multicast_list = &set_rx_mode;
457 dev->tx_timeout = net_tx_timeout;
458 dev->watchdog_timeo = TX_TIMEOUT; 463 dev->watchdog_timeo = TX_TIMEOUT;
459 464
460 spin_lock_init(&lp->lock); 465 spin_lock_init(&lp->lock);
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index f901fee79a20..9b75aa630062 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -16,6 +16,7 @@
16 */ 16 */
17 17
18#include "be.h" 18#include "be.h"
19#include <asm/div64.h>
19 20
20MODULE_VERSION(DRV_VER); 21MODULE_VERSION(DRV_VER);
21MODULE_DEVICE_TABLE(pci, be_dev_ids); 22MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -290,6 +291,17 @@ static struct net_device_stats *be_get_stats(struct net_device *dev)
290 return &adapter->stats.net_stats; 291 return &adapter->stats.net_stats;
291} 292}
292 293
294static u32 be_calc_rate(u64 bytes, unsigned long ticks)
295{
296 u64 rate = bytes;
297
298 do_div(rate, ticks / HZ);
299 rate <<= 3; /* bytes/sec -> bits/sec */
300 do_div(rate, 1000000ul); /* MB/Sec */
301
302 return rate;
303}
304
293static void be_tx_rate_update(struct be_adapter *adapter) 305static void be_tx_rate_update(struct be_adapter *adapter)
294{ 306{
295 struct be_drvr_stats *stats = drvr_stats(adapter); 307 struct be_drvr_stats *stats = drvr_stats(adapter);
@@ -303,11 +315,9 @@ static void be_tx_rate_update(struct be_adapter *adapter)
303 315
304 /* Update tx rate once in two seconds */ 316 /* Update tx rate once in two seconds */
305 if ((now - stats->be_tx_jiffies) > 2 * HZ) { 317 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
306 u32 r; 318 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
307 r = (stats->be_tx_bytes - stats->be_tx_bytes_prev) / 319 - stats->be_tx_bytes_prev,
308 ((now - stats->be_tx_jiffies) / HZ); 320 now - stats->be_tx_jiffies);
309 r = r / 1000000; /* M bytes/s */
310 stats->be_tx_rate = r * 8; /* M bits/s */
311 stats->be_tx_jiffies = now; 321 stats->be_tx_jiffies = now;
312 stats->be_tx_bytes_prev = stats->be_tx_bytes; 322 stats->be_tx_bytes_prev = stats->be_tx_bytes;
313 } 323 }
@@ -599,7 +609,6 @@ static void be_rx_rate_update(struct be_adapter *adapter)
599{ 609{
600 struct be_drvr_stats *stats = drvr_stats(adapter); 610 struct be_drvr_stats *stats = drvr_stats(adapter);
601 ulong now = jiffies; 611 ulong now = jiffies;
602 u32 rate;
603 612
604 /* Wrapped around */ 613 /* Wrapped around */
605 if (time_before(now, stats->be_rx_jiffies)) { 614 if (time_before(now, stats->be_rx_jiffies)) {
@@ -611,10 +620,9 @@ static void be_rx_rate_update(struct be_adapter *adapter)
611 if ((now - stats->be_rx_jiffies) < 2 * HZ) 620 if ((now - stats->be_rx_jiffies) < 2 * HZ)
612 return; 621 return;
613 622
614 rate = (stats->be_rx_bytes - stats->be_rx_bytes_prev) / 623 stats->be_rx_rate = be_calc_rate(stats->be_rx_bytes
615 ((now - stats->be_rx_jiffies) / HZ); 624 - stats->be_rx_bytes_prev,
616 rate = rate / 1000000; /* MB/Sec */ 625 now - stats->be_rx_jiffies);
617 stats->be_rx_rate = rate * 8; /* Mega Bits/Sec */
618 stats->be_rx_jiffies = now; 626 stats->be_rx_jiffies = now;
619 stats->be_rx_bytes_prev = stats->be_rx_bytes; 627 stats->be_rx_bytes_prev = stats->be_rx_bytes;
620} 628}
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index ff6497658a45..7433b88eed7e 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -501,6 +501,21 @@ static void net_poll_controller(struct net_device *dev)
501} 501}
502#endif 502#endif
503 503
504static const struct net_device_ops net_ops = {
505 .ndo_open = net_open,
506 .ndo_stop = net_close,
507 .ndo_tx_timeout = net_timeout,
508 .ndo_start_xmit = net_send_packet,
509 .ndo_get_stats = net_get_stats,
510 .ndo_set_multicast_list = set_multicast_list,
511 .ndo_set_mac_address = set_mac_address,
512#ifdef CONFIG_NET_POLL_CONTROLLER
513 .ndo_poll_controller = net_poll_controller,
514#endif
515 .ndo_change_mtu = eth_change_mtu,
516 .ndo_validate_addr = eth_validate_addr,
517};
518
504/* This is the real probe routine. Linux has a history of friendly device 519/* This is the real probe routine. Linux has a history of friendly device
505 probes on the ISA bus. A good device probes avoids doing writes, and 520 probes on the ISA bus. A good device probes avoids doing writes, and
506 verifies that the correct device exists and functions. 521 verifies that the correct device exists and functions.
@@ -843,17 +858,8 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
843 /* print the ethernet address. */ 858 /* print the ethernet address. */
844 printk(", MAC %pM", dev->dev_addr); 859 printk(", MAC %pM", dev->dev_addr);
845 860
846 dev->open = net_open; 861 dev->netdev_ops = &net_ops;
847 dev->stop = net_close; 862 dev->watchdog_timeo = HZ;
848 dev->tx_timeout = net_timeout;
849 dev->watchdog_timeo = HZ;
850 dev->hard_start_xmit = net_send_packet;
851 dev->get_stats = net_get_stats;
852 dev->set_multicast_list = set_multicast_list;
853 dev->set_mac_address = set_mac_address;
854#ifdef CONFIG_NET_POLL_CONTROLLER
855 dev->poll_controller = net_poll_controller;
856#endif
857 863
858 printk("\n"); 864 printk("\n");
859 if (net_debug) 865 if (net_debug)
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 71eaa431371d..714df2b675e6 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -85,6 +85,8 @@ struct fl_pg_chunk {
85 struct page *page; 85 struct page *page;
86 void *va; 86 void *va;
87 unsigned int offset; 87 unsigned int offset;
88 u64 *p_cnt;
89 DECLARE_PCI_UNMAP_ADDR(mapping);
88}; 90};
89 91
90struct rx_desc; 92struct rx_desc;
@@ -101,6 +103,7 @@ struct sge_fl { /* SGE per free-buffer list state */
101 struct fl_pg_chunk pg_chunk;/* page chunk cache */ 103 struct fl_pg_chunk pg_chunk;/* page chunk cache */
102 unsigned int use_pages; /* whether FL uses pages or sk_buffs */ 104 unsigned int use_pages; /* whether FL uses pages or sk_buffs */
103 unsigned int order; /* order of page allocations */ 105 unsigned int order; /* order of page allocations */
106 unsigned int alloc_size; /* size of allocated buffer */
104 struct rx_desc *desc; /* address of HW Rx descriptor ring */ 107 struct rx_desc *desc; /* address of HW Rx descriptor ring */
105 struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ 108 struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
106 dma_addr_t phys_addr; /* physical address of HW ring start */ 109 dma_addr_t phys_addr; /* physical address of HW ring start */
@@ -291,6 +294,7 @@ void t3_os_link_fault_handler(struct adapter *adapter, int port_id);
291 294
292void t3_sge_start(struct adapter *adap); 295void t3_sge_start(struct adapter *adap);
293void t3_sge_stop(struct adapter *adap); 296void t3_sge_stop(struct adapter *adap);
297void t3_start_sge_timers(struct adapter *adap);
294void t3_stop_sge_timers(struct adapter *adap); 298void t3_stop_sge_timers(struct adapter *adap);
295void t3_free_sge_resources(struct adapter *adap); 299void t3_free_sge_resources(struct adapter *adap);
296void t3_sge_err_intr_handler(struct adapter *adapter); 300void t3_sge_err_intr_handler(struct adapter *adapter);
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
index 9ee021e750c8..e508dc32f3ec 100644
--- a/drivers/net/cxgb3/common.h
+++ b/drivers/net/cxgb3/common.h
@@ -191,7 +191,8 @@ struct mdio_ops {
191}; 191};
192 192
193struct adapter_info { 193struct adapter_info {
194 unsigned char nports; /* # of ports */ 194 unsigned char nports0; /* # of ports on channel 0 */
195 unsigned char nports1; /* # of ports on channel 1 */
195 unsigned char phy_base_addr; /* MDIO PHY base address */ 196 unsigned char phy_base_addr; /* MDIO PHY base address */
196 unsigned int gpio_out; /* GPIO output settings */ 197 unsigned int gpio_out; /* GPIO output settings */
197 unsigned char gpio_intr[MAX_NPORTS]; /* GPIO PHY IRQ pins */ 198 unsigned char gpio_intr[MAX_NPORTS]; /* GPIO PHY IRQ pins */
@@ -422,6 +423,7 @@ struct adapter_params {
422 unsigned short b_wnd[NCCTRL_WIN]; 423 unsigned short b_wnd[NCCTRL_WIN];
423 424
424 unsigned int nports; /* # of ethernet ports */ 425 unsigned int nports; /* # of ethernet ports */
426 unsigned int chan_map; /* bitmap of in-use Tx channels */
425 unsigned int stats_update_period; /* MAC stats accumulation period */ 427 unsigned int stats_update_period; /* MAC stats accumulation period */
426 unsigned int linkpoll_period; /* link poll period in 0.1s */ 428 unsigned int linkpoll_period; /* link poll period in 0.1s */
427 unsigned int rev; /* chip revision */ 429 unsigned int rev; /* chip revision */
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index d8be89621bf7..2c2aaa741450 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -602,7 +602,6 @@ static int setup_sge_qsets(struct adapter *adap)
602 &adap->params.sge.qset[qset_idx], ntxq, dev, 602 &adap->params.sge.qset[qset_idx], ntxq, dev,
603 netdev_get_tx_queue(dev, j)); 603 netdev_get_tx_queue(dev, j));
604 if (err) { 604 if (err) {
605 t3_stop_sge_timers(adap);
606 t3_free_sge_resources(adap); 605 t3_free_sge_resources(adap);
607 return err; 606 return err;
608 } 607 }
@@ -1046,6 +1045,8 @@ static int cxgb_up(struct adapter *adap)
1046 setup_rss(adap); 1045 setup_rss(adap);
1047 if (!(adap->flags & NAPI_INIT)) 1046 if (!(adap->flags & NAPI_INIT))
1048 init_napi(adap); 1047 init_napi(adap);
1048
1049 t3_start_sge_timers(adap);
1049 adap->flags |= FULL_INIT_DONE; 1050 adap->flags |= FULL_INIT_DONE;
1050 } 1051 }
1051 1052
@@ -2870,6 +2871,9 @@ static void t3_io_resume(struct pci_dev *pdev)
2870{ 2871{
2871 struct adapter *adapter = pci_get_drvdata(pdev); 2872 struct adapter *adapter = pci_get_drvdata(pdev);
2872 2873
2874 CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
2875 t3_read_reg(adapter, A_PCIE_PEX_ERR));
2876
2873 t3_resume_ports(adapter); 2877 t3_resume_ports(adapter);
2874} 2878}
2875 2879
@@ -3002,7 +3006,7 @@ static int __devinit init_one(struct pci_dev *pdev,
3002 static int version_printed; 3006 static int version_printed;
3003 3007
3004 int i, err, pci_using_dac = 0; 3008 int i, err, pci_using_dac = 0;
3005 unsigned long mmio_start, mmio_len; 3009 resource_size_t mmio_start, mmio_len;
3006 const struct adapter_info *ai; 3010 const struct adapter_info *ai;
3007 struct adapter *adapter = NULL; 3011 struct adapter *adapter = NULL;
3008 struct port_info *pi; 3012 struct port_info *pi;
@@ -3082,7 +3086,7 @@ static int __devinit init_one(struct pci_dev *pdev,
3082 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task); 3086 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3083 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task); 3087 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3084 3088
3085 for (i = 0; i < ai->nports; ++i) { 3089 for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3086 struct net_device *netdev; 3090 struct net_device *netdev;
3087 3091
3088 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS); 3092 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
@@ -3172,7 +3176,7 @@ static int __devinit init_one(struct pci_dev *pdev,
3172 3176
3173out_free_dev: 3177out_free_dev:
3174 iounmap(adapter->regs); 3178 iounmap(adapter->regs);
3175 for (i = ai->nports - 1; i >= 0; --i) 3179 for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3176 if (adapter->port[i]) 3180 if (adapter->port[i])
3177 free_netdev(adapter->port[i]); 3181 free_netdev(adapter->port[i]);
3178 3182
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index a7555cb3fa4a..26d3587f3399 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -50,6 +50,7 @@
50#define SGE_RX_COPY_THRES 256 50#define SGE_RX_COPY_THRES 256
51#define SGE_RX_PULL_LEN 128 51#define SGE_RX_PULL_LEN 128
52 52
53#define SGE_PG_RSVD SMP_CACHE_BYTES
53/* 54/*
54 * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks. 55 * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
55 * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs 56 * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs
@@ -57,8 +58,10 @@
57 */ 58 */
58#define FL0_PG_CHUNK_SIZE 2048 59#define FL0_PG_CHUNK_SIZE 2048
59#define FL0_PG_ORDER 0 60#define FL0_PG_ORDER 0
61#define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER)
60#define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192) 62#define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
61#define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1) 63#define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
64#define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER)
62 65
63#define SGE_RX_DROP_THRES 16 66#define SGE_RX_DROP_THRES 16
64#define RX_RECLAIM_PERIOD (HZ/4) 67#define RX_RECLAIM_PERIOD (HZ/4)
@@ -345,13 +348,21 @@ static inline int should_restart_tx(const struct sge_txq *q)
345 return q->in_use - r < (q->size >> 1); 348 return q->in_use - r < (q->size >> 1);
346} 349}
347 350
348static void clear_rx_desc(const struct sge_fl *q, struct rx_sw_desc *d) 351static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
352 struct rx_sw_desc *d)
349{ 353{
350 if (q->use_pages) { 354 if (q->use_pages && d->pg_chunk.page) {
351 if (d->pg_chunk.page) 355 (*d->pg_chunk.p_cnt)--;
352 put_page(d->pg_chunk.page); 356 if (!*d->pg_chunk.p_cnt)
357 pci_unmap_page(pdev,
358 pci_unmap_addr(&d->pg_chunk, mapping),
359 q->alloc_size, PCI_DMA_FROMDEVICE);
360
361 put_page(d->pg_chunk.page);
353 d->pg_chunk.page = NULL; 362 d->pg_chunk.page = NULL;
354 } else { 363 } else {
364 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
365 q->buf_size, PCI_DMA_FROMDEVICE);
355 kfree_skb(d->skb); 366 kfree_skb(d->skb);
356 d->skb = NULL; 367 d->skb = NULL;
357 } 368 }
@@ -372,9 +383,8 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
372 while (q->credits--) { 383 while (q->credits--) {
373 struct rx_sw_desc *d = &q->sdesc[cidx]; 384 struct rx_sw_desc *d = &q->sdesc[cidx];
374 385
375 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr), 386
376 q->buf_size, PCI_DMA_FROMDEVICE); 387 clear_rx_desc(pdev, q, d);
377 clear_rx_desc(q, d);
378 if (++cidx == q->size) 388 if (++cidx == q->size)
379 cidx = 0; 389 cidx = 0;
380 } 390 }
@@ -417,18 +427,39 @@ static inline int add_one_rx_buf(void *va, unsigned int len,
417 return 0; 427 return 0;
418} 428}
419 429
420static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp, 430static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d,
431 unsigned int gen)
432{
433 d->addr_lo = cpu_to_be32(mapping);
434 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
435 wmb();
436 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
437 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
438 return 0;
439}
440
441static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
442 struct rx_sw_desc *sd, gfp_t gfp,
421 unsigned int order) 443 unsigned int order)
422{ 444{
423 if (!q->pg_chunk.page) { 445 if (!q->pg_chunk.page) {
446 dma_addr_t mapping;
447
424 q->pg_chunk.page = alloc_pages(gfp, order); 448 q->pg_chunk.page = alloc_pages(gfp, order);
425 if (unlikely(!q->pg_chunk.page)) 449 if (unlikely(!q->pg_chunk.page))
426 return -ENOMEM; 450 return -ENOMEM;
427 q->pg_chunk.va = page_address(q->pg_chunk.page); 451 q->pg_chunk.va = page_address(q->pg_chunk.page);
452 q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) -
453 SGE_PG_RSVD;
428 q->pg_chunk.offset = 0; 454 q->pg_chunk.offset = 0;
455 mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
456 0, q->alloc_size, PCI_DMA_FROMDEVICE);
457 pci_unmap_addr_set(&q->pg_chunk, mapping, mapping);
429 } 458 }
430 sd->pg_chunk = q->pg_chunk; 459 sd->pg_chunk = q->pg_chunk;
431 460
461 prefetch(sd->pg_chunk.p_cnt);
462
432 q->pg_chunk.offset += q->buf_size; 463 q->pg_chunk.offset += q->buf_size;
433 if (q->pg_chunk.offset == (PAGE_SIZE << order)) 464 if (q->pg_chunk.offset == (PAGE_SIZE << order))
434 q->pg_chunk.page = NULL; 465 q->pg_chunk.page = NULL;
@@ -436,6 +467,12 @@ static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp,
436 q->pg_chunk.va += q->buf_size; 467 q->pg_chunk.va += q->buf_size;
437 get_page(q->pg_chunk.page); 468 get_page(q->pg_chunk.page);
438 } 469 }
470
471 if (sd->pg_chunk.offset == 0)
472 *sd->pg_chunk.p_cnt = 1;
473 else
474 *sd->pg_chunk.p_cnt += 1;
475
439 return 0; 476 return 0;
440} 477}
441 478
@@ -460,35 +497,43 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
460 */ 497 */
461static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp) 498static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
462{ 499{
463 void *buf_start;
464 struct rx_sw_desc *sd = &q->sdesc[q->pidx]; 500 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
465 struct rx_desc *d = &q->desc[q->pidx]; 501 struct rx_desc *d = &q->desc[q->pidx];
466 unsigned int count = 0; 502 unsigned int count = 0;
467 503
468 while (n--) { 504 while (n--) {
505 dma_addr_t mapping;
469 int err; 506 int err;
470 507
471 if (q->use_pages) { 508 if (q->use_pages) {
472 if (unlikely(alloc_pg_chunk(q, sd, gfp, q->order))) { 509 if (unlikely(alloc_pg_chunk(adap, q, sd, gfp,
510 q->order))) {
473nomem: q->alloc_failed++; 511nomem: q->alloc_failed++;
474 break; 512 break;
475 } 513 }
476 buf_start = sd->pg_chunk.va; 514 mapping = pci_unmap_addr(&sd->pg_chunk, mapping) +
515 sd->pg_chunk.offset;
516 pci_unmap_addr_set(sd, dma_addr, mapping);
517
518 add_one_rx_chunk(mapping, d, q->gen);
519 pci_dma_sync_single_for_device(adap->pdev, mapping,
520 q->buf_size - SGE_PG_RSVD,
521 PCI_DMA_FROMDEVICE);
477 } else { 522 } else {
478 struct sk_buff *skb = alloc_skb(q->buf_size, gfp); 523 void *buf_start;
479 524
525 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
480 if (!skb) 526 if (!skb)
481 goto nomem; 527 goto nomem;
482 528
483 sd->skb = skb; 529 sd->skb = skb;
484 buf_start = skb->data; 530 buf_start = skb->data;
485 } 531 err = add_one_rx_buf(buf_start, q->buf_size, d, sd,
486 532 q->gen, adap->pdev);
487 err = add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen, 533 if (unlikely(err)) {
488 adap->pdev); 534 clear_rx_desc(adap->pdev, q, sd);
489 if (unlikely(err)) { 535 break;
490 clear_rx_desc(q, sd); 536 }
491 break;
492 } 537 }
493 538
494 d++; 539 d++;
@@ -795,19 +840,19 @@ static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
795 struct sk_buff *newskb, *skb; 840 struct sk_buff *newskb, *skb;
796 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; 841 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
797 842
798 newskb = skb = q->pg_skb; 843 dma_addr_t dma_addr = pci_unmap_addr(sd, dma_addr);
799 844
845 newskb = skb = q->pg_skb;
800 if (!skb && (len <= SGE_RX_COPY_THRES)) { 846 if (!skb && (len <= SGE_RX_COPY_THRES)) {
801 newskb = alloc_skb(len, GFP_ATOMIC); 847 newskb = alloc_skb(len, GFP_ATOMIC);
802 if (likely(newskb != NULL)) { 848 if (likely(newskb != NULL)) {
803 __skb_put(newskb, len); 849 __skb_put(newskb, len);
804 pci_dma_sync_single_for_cpu(adap->pdev, 850 pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
805 pci_unmap_addr(sd, dma_addr), len,
806 PCI_DMA_FROMDEVICE); 851 PCI_DMA_FROMDEVICE);
807 memcpy(newskb->data, sd->pg_chunk.va, len); 852 memcpy(newskb->data, sd->pg_chunk.va, len);
808 pci_dma_sync_single_for_device(adap->pdev, 853 pci_dma_sync_single_for_device(adap->pdev, dma_addr,
809 pci_unmap_addr(sd, dma_addr), len, 854 len,
810 PCI_DMA_FROMDEVICE); 855 PCI_DMA_FROMDEVICE);
811 } else if (!drop_thres) 856 } else if (!drop_thres)
812 return NULL; 857 return NULL;
813recycle: 858recycle:
@@ -820,16 +865,25 @@ recycle:
820 if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres))) 865 if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
821 goto recycle; 866 goto recycle;
822 867
868 prefetch(sd->pg_chunk.p_cnt);
869
823 if (!skb) 870 if (!skb)
824 newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC); 871 newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
872
825 if (unlikely(!newskb)) { 873 if (unlikely(!newskb)) {
826 if (!drop_thres) 874 if (!drop_thres)
827 return NULL; 875 return NULL;
828 goto recycle; 876 goto recycle;
829 } 877 }
830 878
831 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr), 879 pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
832 fl->buf_size, PCI_DMA_FROMDEVICE); 880 PCI_DMA_FROMDEVICE);
881 (*sd->pg_chunk.p_cnt)--;
882 if (!*sd->pg_chunk.p_cnt)
883 pci_unmap_page(adap->pdev,
884 pci_unmap_addr(&sd->pg_chunk, mapping),
885 fl->alloc_size,
886 PCI_DMA_FROMDEVICE);
833 if (!skb) { 887 if (!skb) {
834 __skb_put(newskb, SGE_RX_PULL_LEN); 888 __skb_put(newskb, SGE_RX_PULL_LEN);
835 memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN); 889 memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
@@ -1089,7 +1143,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1089 struct tx_desc *d = &q->desc[pidx]; 1143 struct tx_desc *d = &q->desc[pidx];
1090 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d; 1144 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
1091 1145
1092 cpl->len = htonl(skb->len | 0x80000000); 1146 cpl->len = htonl(skb->len);
1093 cntrl = V_TXPKT_INTF(pi->port_id); 1147 cntrl = V_TXPKT_INTF(pi->port_id);
1094 1148
1095 if (vlan_tx_tag_present(skb) && pi->vlan_grp) 1149 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
@@ -1958,8 +2012,8 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1958 skb_pull(skb, sizeof(*p) + pad); 2012 skb_pull(skb, sizeof(*p) + pad);
1959 skb->protocol = eth_type_trans(skb, adap->port[p->iff]); 2013 skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
1960 pi = netdev_priv(skb->dev); 2014 pi = netdev_priv(skb->dev);
1961 if ((pi->rx_offload & T3_RX_CSUM) && p->csum_valid && p->csum == htons(0xffff) && 2015 if ((pi->rx_offload & T3_RX_CSUM) && p->csum_valid &&
1962 !p->fragment) { 2016 p->csum == htons(0xffff) && !p->fragment) {
1963 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; 2017 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
1964 skb->ip_summed = CHECKSUM_UNNECESSARY; 2018 skb->ip_summed = CHECKSUM_UNNECESSARY;
1965 } else 2019 } else
@@ -2034,10 +2088,19 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2034 fl->credits--; 2088 fl->credits--;
2035 2089
2036 len -= offset; 2090 len -= offset;
2037 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr), 2091 pci_dma_sync_single_for_cpu(adap->pdev,
2038 fl->buf_size, PCI_DMA_FROMDEVICE); 2092 pci_unmap_addr(sd, dma_addr),
2093 fl->buf_size - SGE_PG_RSVD,
2094 PCI_DMA_FROMDEVICE);
2039 2095
2040 prefetch(&qs->lro_frag_tbl); 2096 (*sd->pg_chunk.p_cnt)--;
2097 if (!*sd->pg_chunk.p_cnt)
2098 pci_unmap_page(adap->pdev,
2099 pci_unmap_addr(&sd->pg_chunk, mapping),
2100 fl->alloc_size,
2101 PCI_DMA_FROMDEVICE);
2102
2103 prefetch(qs->lro_va);
2041 2104
2042 rx_frag += nr_frags; 2105 rx_frag += nr_frags;
2043 rx_frag->page = sd->pg_chunk.page; 2106 rx_frag->page = sd->pg_chunk.page;
@@ -2047,6 +2110,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2047 qs->lro_frag_tbl.nr_frags++; 2110 qs->lro_frag_tbl.nr_frags++;
2048 qs->lro_frag_tbl.len = frag_len; 2111 qs->lro_frag_tbl.len = frag_len;
2049 2112
2113
2050 if (!complete) 2114 if (!complete)
2051 return; 2115 return;
2052 2116
@@ -2236,6 +2300,8 @@ no_mem:
2236 if (fl->use_pages) { 2300 if (fl->use_pages) {
2237 void *addr = fl->sdesc[fl->cidx].pg_chunk.va; 2301 void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
2238 2302
2303 prefetch(&qs->lro_frag_tbl);
2304
2239 prefetch(addr); 2305 prefetch(addr);
2240#if L1_CACHE_BYTES < 128 2306#if L1_CACHE_BYTES < 128
2241 prefetch(addr + L1_CACHE_BYTES); 2307 prefetch(addr + L1_CACHE_BYTES);
@@ -2972,21 +3038,23 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2972 q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0; 3038 q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
2973 q->fl[0].order = FL0_PG_ORDER; 3039 q->fl[0].order = FL0_PG_ORDER;
2974 q->fl[1].order = FL1_PG_ORDER; 3040 q->fl[1].order = FL1_PG_ORDER;
3041 q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE;
3042 q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE;
2975 3043
2976 spin_lock_irq(&adapter->sge.reg_lock); 3044 spin_lock_irq(&adapter->sge.reg_lock);
2977 3045
2978 /* FL threshold comparison uses < */ 3046 /* FL threshold comparison uses < */
2979 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx, 3047 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
2980 q->rspq.phys_addr, q->rspq.size, 3048 q->rspq.phys_addr, q->rspq.size,
2981 q->fl[0].buf_size, 1, 0); 3049 q->fl[0].buf_size - SGE_PG_RSVD, 1, 0);
2982 if (ret) 3050 if (ret)
2983 goto err_unlock; 3051 goto err_unlock;
2984 3052
2985 for (i = 0; i < SGE_RXQ_PER_SET; ++i) { 3053 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2986 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0, 3054 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
2987 q->fl[i].phys_addr, q->fl[i].size, 3055 q->fl[i].phys_addr, q->fl[i].size,
2988 q->fl[i].buf_size, p->cong_thres, 1, 3056 q->fl[i].buf_size - SGE_PG_RSVD,
2989 0); 3057 p->cong_thres, 1, 0);
2990 if (ret) 3058 if (ret)
2991 goto err_unlock; 3059 goto err_unlock;
2992 } 3060 }
@@ -3044,9 +3112,6 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
3044 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) | 3112 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
3045 V_NEWTIMER(q->rspq.holdoff_tmr)); 3113 V_NEWTIMER(q->rspq.holdoff_tmr));
3046 3114
3047 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
3048 mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
3049
3050 return 0; 3115 return 0;
3051 3116
3052err_unlock: 3117err_unlock:
@@ -3057,6 +3122,27 @@ err:
3057} 3122}
3058 3123
3059/** 3124/**
3125 * t3_start_sge_timers - start SGE timer call backs
3126 * @adap: the adapter
3127 *
3128 * Starts each SGE queue set's timer call back
3129 */
3130void t3_start_sge_timers(struct adapter *adap)
3131{
3132 int i;
3133
3134 for (i = 0; i < SGE_QSETS; ++i) {
3135 struct sge_qset *q = &adap->sge.qs[i];
3136
3137 if (q->tx_reclaim_timer.function)
3138 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
3139
3140 if (q->rx_reclaim_timer.function)
3141 mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
3142 }
3143}
3144
3145/**
3060 * t3_stop_sge_timers - stop SGE timer call backs 3146 * t3_stop_sge_timers - stop SGE timer call backs
3061 * @adap: the adapter 3147 * @adap: the adapter
3062 * 3148 *
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index ff262a04ded0..31ed31a3428b 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -493,20 +493,20 @@ int t3_phy_lasi_intr_handler(struct cphy *phy)
493} 493}
494 494
495static const struct adapter_info t3_adap_info[] = { 495static const struct adapter_info t3_adap_info[] = {
496 {2, 0, 496 {1, 1, 0,
497 F_GPIO2_OEN | F_GPIO4_OEN | 497 F_GPIO2_OEN | F_GPIO4_OEN |
498 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0, 498 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
499 &mi1_mdio_ops, "Chelsio PE9000"}, 499 &mi1_mdio_ops, "Chelsio PE9000"},
500 {2, 0, 500 {1, 1, 0,
501 F_GPIO2_OEN | F_GPIO4_OEN | 501 F_GPIO2_OEN | F_GPIO4_OEN |
502 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0, 502 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
503 &mi1_mdio_ops, "Chelsio T302"}, 503 &mi1_mdio_ops, "Chelsio T302"},
504 {1, 0, 504 {1, 0, 0,
505 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN | 505 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
506 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 506 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
507 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI, 507 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
508 &mi1_mdio_ext_ops, "Chelsio T310"}, 508 &mi1_mdio_ext_ops, "Chelsio T310"},
509 {2, 0, 509 {1, 1, 0,
510 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN | 510 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
511 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL | 511 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
512 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 512 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
@@ -514,7 +514,7 @@ static const struct adapter_info t3_adap_info[] = {
514 &mi1_mdio_ext_ops, "Chelsio T320"}, 514 &mi1_mdio_ext_ops, "Chelsio T320"},
515 {}, 515 {},
516 {}, 516 {},
517 {1, 0, 517 {1, 0, 0,
518 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN | 518 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
519 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 519 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
520 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI, 520 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
@@ -2128,16 +2128,40 @@ void t3_port_intr_clear(struct adapter *adapter, int idx)
2128static int t3_sge_write_context(struct adapter *adapter, unsigned int id, 2128static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
2129 unsigned int type) 2129 unsigned int type)
2130{ 2130{
2131 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff); 2131 if (type == F_RESPONSEQ) {
2132 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff); 2132 /*
2133 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff); 2133 * Can't write the Response Queue Context bits for
2134 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff); 2134 * Interrupt Armed or the Reserve bits after the chip
2135 * has been initialized out of reset. Writing to these
2136 * bits can confuse the hardware.
2137 */
2138 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2139 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2140 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
2141 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2142 } else {
2143 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2144 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2145 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2146 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2147 }
2135 t3_write_reg(adapter, A_SG_CONTEXT_CMD, 2148 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2136 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id)); 2149 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2137 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 2150 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2138 0, SG_CONTEXT_CMD_ATTEMPTS, 1); 2151 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2139} 2152}
2140 2153
2154/**
2155 * clear_sge_ctxt - completely clear an SGE context
2156 * @adapter: the adapter
2157 * @id: the context id
2158 * @type: the context type
2159 *
2160 * Completely clear an SGE context. Used predominantly at post-reset
2161 * initialization. Note in particular that we don't skip writing to any
2162 * "sensitive bits" in the contexts the way that t3_sge_write_context()
2163 * does ...
2164 */
2141static int clear_sge_ctxt(struct adapter *adap, unsigned int id, 2165static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
2142 unsigned int type) 2166 unsigned int type)
2143{ 2167{
@@ -2145,7 +2169,14 @@ static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
2145 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0); 2169 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2146 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0); 2170 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2147 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0); 2171 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2148 return t3_sge_write_context(adap, id, type); 2172 t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
2173 t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
2174 t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
2175 t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
2176 t3_write_reg(adap, A_SG_CONTEXT_CMD,
2177 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2178 return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2179 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2149} 2180}
2150 2181
2151/** 2182/**
@@ -2729,10 +2760,10 @@ static void tp_config(struct adapter *adap, const struct tp_params *p)
2729 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64)); 2760 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2730 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) | 2761 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2731 F_MTUENABLE | V_WINDOWSCALEMODE(1) | 2762 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2732 V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1)); 2763 V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2733 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) | 2764 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2734 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) | 2765 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2735 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) | 2766 V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
2736 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1)); 2767 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2737 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO, 2768 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
2738 F_IPV6ENABLE | F_NICMODE); 2769 F_IPV6ENABLE | F_NICMODE);
@@ -3196,20 +3227,22 @@ int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
3196} 3227}
3197 3228
3198/* 3229/*
3199 * Perform the bits of HW initialization that are dependent on the number 3230 * Perform the bits of HW initialization that are dependent on the Tx
3200 * of available ports. 3231 * channels being used.
3201 */ 3232 */
3202static void init_hw_for_avail_ports(struct adapter *adap, int nports) 3233static void chan_init_hw(struct adapter *adap, unsigned int chan_map)
3203{ 3234{
3204 int i; 3235 int i;
3205 3236
3206 if (nports == 1) { 3237 if (chan_map != 3) { /* one channel */
3207 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0); 3238 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3208 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0); 3239 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3209 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN | 3240 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3210 F_PORT0ACTIVE | F_ENFORCEPKT); 3241 (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3211 t3_write_reg(adap, A_PM1_TX_CFG, 0xffffffff); 3242 F_TPTXPORT1EN | F_PORT1ACTIVE));
3212 } else { 3243 t3_write_reg(adap, A_PM1_TX_CFG,
3244 chan_map == 1 ? 0xffffffff : 0);
3245 } else { /* two channels */
3213 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN); 3246 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3214 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB); 3247 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3215 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT, 3248 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
@@ -3517,7 +3550,7 @@ int t3_init_hw(struct adapter *adapter, u32 fw_params)
3517 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff); 3550 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3518 t3_write_reg(adapter, A_PM1_RX_MODE, 0); 3551 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3519 t3_write_reg(adapter, A_PM1_TX_MODE, 0); 3552 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3520 init_hw_for_avail_ports(adapter, adapter->params.nports); 3553 chan_init_hw(adapter, adapter->params.chan_map);
3521 t3_sge_init(adapter, &adapter->params.sge); 3554 t3_sge_init(adapter, &adapter->params.sge);
3522 3555
3523 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter)); 3556 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
@@ -3754,7 +3787,8 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3754 get_pci_mode(adapter, &adapter->params.pci); 3787 get_pci_mode(adapter, &adapter->params.pci);
3755 3788
3756 adapter->params.info = ai; 3789 adapter->params.info = ai;
3757 adapter->params.nports = ai->nports; 3790 adapter->params.nports = ai->nports0 + ai->nports1;
3791 adapter->params.chan_map = !!ai->nports0 | (!!ai->nports1 << 1);
3758 adapter->params.rev = t3_read_reg(adapter, A_PL_REV); 3792 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3759 /* 3793 /*
3760 * We used to only run the "adapter check task" once a second if 3794 * We used to only run the "adapter check task" once a second if
@@ -3785,7 +3819,7 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3785 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX"); 3819 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3786 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM"); 3820 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3787 3821
3788 p->nchan = ai->nports; 3822 p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
3789 p->pmrx_size = t3_mc7_size(&adapter->pmrx); 3823 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3790 p->pmtx_size = t3_mc7_size(&adapter->pmtx); 3824 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3791 p->cm_size = t3_mc7_size(&adapter->cm); 3825 p->cm_size = t3_mc7_size(&adapter->cm);
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 55625dbbae5a..357f565851ed 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -566,6 +566,18 @@ MODULE_LICENSE("GPL");
566 outw(CSR0, DEPCA_ADDR);\ 566 outw(CSR0, DEPCA_ADDR);\
567 outw(STOP, DEPCA_DATA) 567 outw(STOP, DEPCA_DATA)
568 568
569static const struct net_device_ops depca_netdev_ops = {
570 .ndo_open = depca_open,
571 .ndo_start_xmit = depca_start_xmit,
572 .ndo_stop = depca_close,
573 .ndo_set_multicast_list = set_multicast_list,
574 .ndo_do_ioctl = depca_ioctl,
575 .ndo_tx_timeout = depca_tx_timeout,
576 .ndo_change_mtu = eth_change_mtu,
577 .ndo_set_mac_address = eth_mac_addr,
578 .ndo_validate_addr = eth_validate_addr,
579};
580
569static int __init depca_hw_init (struct net_device *dev, struct device *device) 581static int __init depca_hw_init (struct net_device *dev, struct device *device)
570{ 582{
571 struct depca_private *lp; 583 struct depca_private *lp;
@@ -793,12 +805,7 @@ static int __init depca_hw_init (struct net_device *dev, struct device *device)
793 } 805 }
794 806
795 /* The DEPCA-specific entries in the device structure. */ 807 /* The DEPCA-specific entries in the device structure. */
796 dev->open = &depca_open; 808 dev->netdev_ops = &depca_netdev_ops;
797 dev->hard_start_xmit = &depca_start_xmit;
798 dev->stop = &depca_close;
799 dev->set_multicast_list = &set_multicast_list;
800 dev->do_ioctl = &depca_ioctl;
801 dev->tx_timeout = depca_tx_timeout;
802 dev->watchdog_timeo = TX_TIMEOUT; 809 dev->watchdog_timeo = TX_TIMEOUT;
803 810
804 dev->mem_start = 0; 811 dev->mem_start = 0;
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index e187c88ae145..cc2ab6412c73 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -739,6 +739,17 @@ static void __init eepro_print_info (struct net_device *dev)
739 739
740static const struct ethtool_ops eepro_ethtool_ops; 740static const struct ethtool_ops eepro_ethtool_ops;
741 741
742static const struct net_device_ops eepro_netdev_ops = {
743 .ndo_open = eepro_open,
744 .ndo_stop = eepro_close,
745 .ndo_start_xmit = eepro_send_packet,
746 .ndo_set_multicast_list = set_multicast_list,
747 .ndo_tx_timeout = eepro_tx_timeout,
748 .ndo_change_mtu = eth_change_mtu,
749 .ndo_set_mac_address = eth_mac_addr,
750 .ndo_validate_addr = eth_validate_addr,
751};
752
742/* This is the real probe routine. Linux has a history of friendly device 753/* This is the real probe routine. Linux has a history of friendly device
743 probes on the ISA bus. A good device probe avoids doing writes, and 754 probes on the ISA bus. A good device probe avoids doing writes, and
744 verifies that the correct device exists and functions. */ 755 verifies that the correct device exists and functions. */
@@ -851,11 +862,7 @@ static int __init eepro_probe1(struct net_device *dev, int autoprobe)
851 } 862 }
852 } 863 }
853 864
854 dev->open = eepro_open; 865 dev->netdev_ops = &eepro_netdev_ops;
855 dev->stop = eepro_close;
856 dev->hard_start_xmit = eepro_send_packet;
857 dev->set_multicast_list = &set_multicast_list;
858 dev->tx_timeout = eepro_tx_timeout;
859 dev->watchdog_timeo = TX_TIMEOUT; 866 dev->watchdog_timeo = TX_TIMEOUT;
860 dev->ethtool_ops = &eepro_ethtool_ops; 867 dev->ethtool_ops = &eepro_ethtool_ops;
861 868
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c
index 9ff3f2f5e382..1686dca28748 100644
--- a/drivers/net/eexpress.c
+++ b/drivers/net/eexpress.c
@@ -1043,6 +1043,17 @@ static void eexp_hw_tx_pio(struct net_device *dev, unsigned short *buf,
1043 lp->last_tx = jiffies; 1043 lp->last_tx = jiffies;
1044} 1044}
1045 1045
1046static const struct net_device_ops eexp_netdev_ops = {
1047 .ndo_open = eexp_open,
1048 .ndo_stop = eexp_close,
1049 .ndo_start_xmit = eexp_xmit,
1050 .ndo_set_multicast_list = eexp_set_multicast,
1051 .ndo_tx_timeout = eexp_timeout,
1052 .ndo_change_mtu = eth_change_mtu,
1053 .ndo_set_mac_address = eth_mac_addr,
1054 .ndo_validate_addr = eth_validate_addr,
1055};
1056
1046/* 1057/*
1047 * Sanity check the suspected EtherExpress card 1058 * Sanity check the suspected EtherExpress card
1048 * Read hardware address, reset card, size memory and initialize buffer 1059 * Read hardware address, reset card, size memory and initialize buffer
@@ -1163,11 +1174,7 @@ static int __init eexp_hw_probe(struct net_device *dev, unsigned short ioaddr)
1163 lp->rx_buf_start = TX_BUF_START + (lp->num_tx_bufs*TX_BUF_SIZE); 1174 lp->rx_buf_start = TX_BUF_START + (lp->num_tx_bufs*TX_BUF_SIZE);
1164 lp->width = buswidth; 1175 lp->width = buswidth;
1165 1176
1166 dev->open = eexp_open; 1177 dev->netdev_ops = &eexp_netdev_ops;
1167 dev->stop = eexp_close;
1168 dev->hard_start_xmit = eexp_xmit;
1169 dev->set_multicast_list = &eexp_set_multicast;
1170 dev->tx_timeout = eexp_timeout;
1171 dev->watchdog_timeo = 2*HZ; 1178 dev->watchdog_timeo = 2*HZ;
1172 1179
1173 return register_netdev(dev); 1180 return register_netdev(dev);
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c
index 5c048f2fd74f..0d8b6da046f2 100644
--- a/drivers/net/eth16i.c
+++ b/drivers/net/eth16i.c
@@ -475,6 +475,17 @@ out:
475} 475}
476#endif 476#endif
477 477
478static const struct net_device_ops eth16i_netdev_ops = {
479 .ndo_open = eth16i_open,
480 .ndo_stop = eth16i_close,
481 .ndo_start_xmit = eth16i_tx,
482 .ndo_set_multicast_list = eth16i_multicast,
483 .ndo_tx_timeout = eth16i_timeout,
484 .ndo_change_mtu = eth_change_mtu,
485 .ndo_set_mac_address = eth_mac_addr,
486 .ndo_validate_addr = eth_validate_addr,
487};
488
478static int __init eth16i_probe1(struct net_device *dev, int ioaddr) 489static int __init eth16i_probe1(struct net_device *dev, int ioaddr)
479{ 490{
480 struct eth16i_local *lp = netdev_priv(dev); 491 struct eth16i_local *lp = netdev_priv(dev);
@@ -549,12 +560,7 @@ static int __init eth16i_probe1(struct net_device *dev, int ioaddr)
549 BITCLR(ioaddr + CONFIG_REG_1, POWERUP); 560 BITCLR(ioaddr + CONFIG_REG_1, POWERUP);
550 561
551 /* Initialize the device structure */ 562 /* Initialize the device structure */
552 memset(lp, 0, sizeof(struct eth16i_local)); 563 dev->netdev_ops = &eth16i_netdev_ops;
553 dev->open = eth16i_open;
554 dev->stop = eth16i_close;
555 dev->hard_start_xmit = eth16i_tx;
556 dev->set_multicast_list = eth16i_multicast;
557 dev->tx_timeout = eth16i_timeout;
558 dev->watchdog_timeo = TX_TIMEOUT; 564 dev->watchdog_timeo = TX_TIMEOUT;
559 spin_lock_init(&lp->lock); 565 spin_lock_init(&lp->lock);
560 566
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
new file mode 100644
index 000000000000..91a9b1a33764
--- /dev/null
+++ b/drivers/net/ethoc.c
@@ -0,0 +1,1112 @@
1/*
2 * linux/drivers/net/ethoc.c
3 *
4 * Copyright (C) 2007-2008 Avionic Design Development GmbH
5 * Copyright (C) 2008-2009 Avionic Design GmbH
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Written by Thierry Reding <thierry.reding@avionic-design.de>
12 */
13
14#include <linux/etherdevice.h>
15#include <linux/crc32.h>
16#include <linux/io.h>
17#include <linux/mii.h>
18#include <linux/phy.h>
19#include <linux/platform_device.h>
20#include <net/ethoc.h>
21
22/* register offsets */
23#define MODER 0x00
24#define INT_SOURCE 0x04
25#define INT_MASK 0x08
26#define IPGT 0x0c
27#define IPGR1 0x10
28#define IPGR2 0x14
29#define PACKETLEN 0x18
30#define COLLCONF 0x1c
31#define TX_BD_NUM 0x20
32#define CTRLMODER 0x24
33#define MIIMODER 0x28
34#define MIICOMMAND 0x2c
35#define MIIADDRESS 0x30
36#define MIITX_DATA 0x34
37#define MIIRX_DATA 0x38
38#define MIISTATUS 0x3c
39#define MAC_ADDR0 0x40
40#define MAC_ADDR1 0x44
41#define ETH_HASH0 0x48
42#define ETH_HASH1 0x4c
43#define ETH_TXCTRL 0x50
44
45/* mode register */
46#define MODER_RXEN (1 << 0) /* receive enable */
47#define MODER_TXEN (1 << 1) /* transmit enable */
48#define MODER_NOPRE (1 << 2) /* no preamble */
49#define MODER_BRO (1 << 3) /* broadcast address */
50#define MODER_IAM (1 << 4) /* individual address mode */
51#define MODER_PRO (1 << 5) /* promiscuous mode */
52#define MODER_IFG (1 << 6) /* interframe gap for incoming frames */
53#define MODER_LOOP (1 << 7) /* loopback */
54#define MODER_NBO (1 << 8) /* no back-off */
55#define MODER_EDE (1 << 9) /* excess defer enable */
56#define MODER_FULLD (1 << 10) /* full duplex */
57#define MODER_RESET (1 << 11) /* FIXME: reset (undocumented) */
58#define MODER_DCRC (1 << 12) /* delayed CRC enable */
59#define MODER_CRC (1 << 13) /* CRC enable */
60#define MODER_HUGE (1 << 14) /* huge packets enable */
61#define MODER_PAD (1 << 15) /* padding enabled */
62#define MODER_RSM (1 << 16) /* receive small packets */
63
64/* interrupt source and mask registers */
65#define INT_MASK_TXF (1 << 0) /* transmit frame */
66#define INT_MASK_TXE (1 << 1) /* transmit error */
67#define INT_MASK_RXF (1 << 2) /* receive frame */
68#define INT_MASK_RXE (1 << 3) /* receive error */
69#define INT_MASK_BUSY (1 << 4)
70#define INT_MASK_TXC (1 << 5) /* transmit control frame */
71#define INT_MASK_RXC (1 << 6) /* receive control frame */
72
73#define INT_MASK_TX (INT_MASK_TXF | INT_MASK_TXE)
74#define INT_MASK_RX (INT_MASK_RXF | INT_MASK_RXE)
75
76#define INT_MASK_ALL ( \
77 INT_MASK_TXF | INT_MASK_TXE | \
78 INT_MASK_RXF | INT_MASK_RXE | \
79 INT_MASK_TXC | INT_MASK_RXC | \
80 INT_MASK_BUSY \
81 )
82
83/* packet length register */
84#define PACKETLEN_MIN(min) (((min) & 0xffff) << 16)
85#define PACKETLEN_MAX(max) (((max) & 0xffff) << 0)
86#define PACKETLEN_MIN_MAX(min, max) (PACKETLEN_MIN(min) | \
87 PACKETLEN_MAX(max))
88
89/* transmit buffer number register */
90#define TX_BD_NUM_VAL(x) (((x) <= 0x80) ? (x) : 0x80)
91
92/* control module mode register */
93#define CTRLMODER_PASSALL (1 << 0) /* pass all receive frames */
94#define CTRLMODER_RXFLOW (1 << 1) /* receive control flow */
95#define CTRLMODER_TXFLOW (1 << 2) /* transmit control flow */
96
97/* MII mode register */
98#define MIIMODER_CLKDIV(x) ((x) & 0xfe) /* needs to be an even number */
99#define MIIMODER_NOPRE (1 << 8) /* no preamble */
100
101/* MII command register */
102#define MIICOMMAND_SCAN (1 << 0) /* scan status */
103#define MIICOMMAND_READ (1 << 1) /* read status */
104#define MIICOMMAND_WRITE (1 << 2) /* write control data */
105
106/* MII address register */
107#define MIIADDRESS_FIAD(x) (((x) & 0x1f) << 0)
108#define MIIADDRESS_RGAD(x) (((x) & 0x1f) << 8)
109#define MIIADDRESS_ADDR(phy, reg) (MIIADDRESS_FIAD(phy) | \
110 MIIADDRESS_RGAD(reg))
111
112/* MII transmit data register */
113#define MIITX_DATA_VAL(x) ((x) & 0xffff)
114
115/* MII receive data register */
116#define MIIRX_DATA_VAL(x) ((x) & 0xffff)
117
118/* MII status register */
119#define MIISTATUS_LINKFAIL (1 << 0)
120#define MIISTATUS_BUSY (1 << 1)
121#define MIISTATUS_INVALID (1 << 2)
122
123/* TX buffer descriptor */
124#define TX_BD_CS (1 << 0) /* carrier sense lost */
125#define TX_BD_DF (1 << 1) /* defer indication */
126#define TX_BD_LC (1 << 2) /* late collision */
127#define TX_BD_RL (1 << 3) /* retransmission limit */
128#define TX_BD_RETRY_MASK (0x00f0)
129#define TX_BD_RETRY(x) (((x) & 0x00f0) >> 4)
130#define TX_BD_UR (1 << 8) /* transmitter underrun */
131#define TX_BD_CRC (1 << 11) /* TX CRC enable */
132#define TX_BD_PAD (1 << 12) /* pad enable for short packets */
133#define TX_BD_WRAP (1 << 13)
134#define TX_BD_IRQ (1 << 14) /* interrupt request enable */
135#define TX_BD_READY (1 << 15) /* TX buffer ready */
136#define TX_BD_LEN(x) (((x) & 0xffff) << 16)
137#define TX_BD_LEN_MASK (0xffff << 16)
138
139#define TX_BD_STATS (TX_BD_CS | TX_BD_DF | TX_BD_LC | \
140 TX_BD_RL | TX_BD_RETRY_MASK | TX_BD_UR)
141
142/* RX buffer descriptor */
143#define RX_BD_LC (1 << 0) /* late collision */
144#define RX_BD_CRC (1 << 1) /* RX CRC error */
145#define RX_BD_SF (1 << 2) /* short frame */
146#define RX_BD_TL (1 << 3) /* too long */
147#define RX_BD_DN (1 << 4) /* dribble nibble */
148#define RX_BD_IS (1 << 5) /* invalid symbol */
149#define RX_BD_OR (1 << 6) /* receiver overrun */
150#define RX_BD_MISS (1 << 7)
151#define RX_BD_CF (1 << 8) /* control frame */
152#define RX_BD_WRAP (1 << 13)
153#define RX_BD_IRQ (1 << 14) /* interrupt request enable */
154#define RX_BD_EMPTY (1 << 15)
155#define RX_BD_LEN(x) (((x) & 0xffff) << 16)
156
157#define RX_BD_STATS (RX_BD_LC | RX_BD_CRC | RX_BD_SF | RX_BD_TL | \
158 RX_BD_DN | RX_BD_IS | RX_BD_OR | RX_BD_MISS)
159
160#define ETHOC_BUFSIZ 1536
161#define ETHOC_ZLEN 64
162#define ETHOC_BD_BASE 0x400
163#define ETHOC_TIMEOUT (HZ / 2)
164#define ETHOC_MII_TIMEOUT (1 + (HZ / 5))
165
166/**
167 * struct ethoc - driver-private device structure
168 * @iobase: pointer to I/O memory region
169 * @membase: pointer to buffer memory region
170 * @num_tx: number of send buffers
171 * @cur_tx: last send buffer written
172 * @dty_tx: last buffer actually sent
173 * @num_rx: number of receive buffers
174 * @cur_rx: current receive buffer
175 * @netdev: pointer to network device structure
176 * @napi: NAPI structure
177 * @stats: network device statistics
178 * @msg_enable: device state flags
179 * @rx_lock: receive lock
180 * @lock: device lock
181 * @phy: attached PHY
182 * @mdio: MDIO bus for PHY access
183 * @phy_id: address of attached PHY
184 */
185struct ethoc {
186 void __iomem *iobase;
187 void __iomem *membase;
188
189 unsigned int num_tx;
190 unsigned int cur_tx;
191 unsigned int dty_tx;
192
193 unsigned int num_rx;
194 unsigned int cur_rx;
195
196 struct net_device *netdev;
197 struct napi_struct napi;
198 struct net_device_stats stats;
199 u32 msg_enable;
200
201 spinlock_t rx_lock;
202 spinlock_t lock;
203
204 struct phy_device *phy;
205 struct mii_bus *mdio;
206 s8 phy_id;
207};
208
209/**
210 * struct ethoc_bd - buffer descriptor
211 * @stat: buffer statistics
212 * @addr: physical memory address
213 */
214struct ethoc_bd {
215 u32 stat;
216 u32 addr;
217};
218
219static u32 ethoc_read(struct ethoc *dev, loff_t offset)
220{
221 return ioread32(dev->iobase + offset);
222}
223
224static void ethoc_write(struct ethoc *dev, loff_t offset, u32 data)
225{
226 iowrite32(data, dev->iobase + offset);
227}
228
229static void ethoc_read_bd(struct ethoc *dev, int index, struct ethoc_bd *bd)
230{
231 loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
232 bd->stat = ethoc_read(dev, offset + 0);
233 bd->addr = ethoc_read(dev, offset + 4);
234}
235
236static void ethoc_write_bd(struct ethoc *dev, int index,
237 const struct ethoc_bd *bd)
238{
239 loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
240 ethoc_write(dev, offset + 0, bd->stat);
241 ethoc_write(dev, offset + 4, bd->addr);
242}
243
244static void ethoc_enable_irq(struct ethoc *dev, u32 mask)
245{
246 u32 imask = ethoc_read(dev, INT_MASK);
247 imask |= mask;
248 ethoc_write(dev, INT_MASK, imask);
249}
250
251static void ethoc_disable_irq(struct ethoc *dev, u32 mask)
252{
253 u32 imask = ethoc_read(dev, INT_MASK);
254 imask &= ~mask;
255 ethoc_write(dev, INT_MASK, imask);
256}
257
258static void ethoc_ack_irq(struct ethoc *dev, u32 mask)
259{
260 ethoc_write(dev, INT_SOURCE, mask);
261}
262
263static void ethoc_enable_rx_and_tx(struct ethoc *dev)
264{
265 u32 mode = ethoc_read(dev, MODER);
266 mode |= MODER_RXEN | MODER_TXEN;
267 ethoc_write(dev, MODER, mode);
268}
269
270static void ethoc_disable_rx_and_tx(struct ethoc *dev)
271{
272 u32 mode = ethoc_read(dev, MODER);
273 mode &= ~(MODER_RXEN | MODER_TXEN);
274 ethoc_write(dev, MODER, mode);
275}
276
277static int ethoc_init_ring(struct ethoc *dev)
278{
279 struct ethoc_bd bd;
280 int i;
281
282 dev->cur_tx = 0;
283 dev->dty_tx = 0;
284 dev->cur_rx = 0;
285
286 /* setup transmission buffers */
287 bd.addr = 0;
288 bd.stat = TX_BD_IRQ | TX_BD_CRC;
289
290 for (i = 0; i < dev->num_tx; i++) {
291 if (i == dev->num_tx - 1)
292 bd.stat |= TX_BD_WRAP;
293
294 ethoc_write_bd(dev, i, &bd);
295 bd.addr += ETHOC_BUFSIZ;
296 }
297
298 bd.addr = dev->num_tx * ETHOC_BUFSIZ;
299 bd.stat = RX_BD_EMPTY | RX_BD_IRQ;
300
301 for (i = 0; i < dev->num_rx; i++) {
302 if (i == dev->num_rx - 1)
303 bd.stat |= RX_BD_WRAP;
304
305 ethoc_write_bd(dev, dev->num_tx + i, &bd);
306 bd.addr += ETHOC_BUFSIZ;
307 }
308
309 return 0;
310}
311
312static int ethoc_reset(struct ethoc *dev)
313{
314 u32 mode;
315
316 /* TODO: reset controller? */
317
318 ethoc_disable_rx_and_tx(dev);
319
320 /* TODO: setup registers */
321
322 /* enable FCS generation and automatic padding */
323 mode = ethoc_read(dev, MODER);
324 mode |= MODER_CRC | MODER_PAD;
325 ethoc_write(dev, MODER, mode);
326
327 /* set full-duplex mode */
328 mode = ethoc_read(dev, MODER);
329 mode |= MODER_FULLD;
330 ethoc_write(dev, MODER, mode);
331 ethoc_write(dev, IPGT, 0x15);
332
333 ethoc_ack_irq(dev, INT_MASK_ALL);
334 ethoc_enable_irq(dev, INT_MASK_ALL);
335 ethoc_enable_rx_and_tx(dev);
336 return 0;
337}
338
339static unsigned int ethoc_update_rx_stats(struct ethoc *dev,
340 struct ethoc_bd *bd)
341{
342 struct net_device *netdev = dev->netdev;
343 unsigned int ret = 0;
344
345 if (bd->stat & RX_BD_TL) {
346 dev_err(&netdev->dev, "RX: frame too long\n");
347 dev->stats.rx_length_errors++;
348 ret++;
349 }
350
351 if (bd->stat & RX_BD_SF) {
352 dev_err(&netdev->dev, "RX: frame too short\n");
353 dev->stats.rx_length_errors++;
354 ret++;
355 }
356
357 if (bd->stat & RX_BD_DN) {
358 dev_err(&netdev->dev, "RX: dribble nibble\n");
359 dev->stats.rx_frame_errors++;
360 }
361
362 if (bd->stat & RX_BD_CRC) {
363 dev_err(&netdev->dev, "RX: wrong CRC\n");
364 dev->stats.rx_crc_errors++;
365 ret++;
366 }
367
368 if (bd->stat & RX_BD_OR) {
369 dev_err(&netdev->dev, "RX: overrun\n");
370 dev->stats.rx_over_errors++;
371 ret++;
372 }
373
374 if (bd->stat & RX_BD_MISS)
375 dev->stats.rx_missed_errors++;
376
377 if (bd->stat & RX_BD_LC) {
378 dev_err(&netdev->dev, "RX: late collision\n");
379 dev->stats.collisions++;
380 ret++;
381 }
382
383 return ret;
384}
385
386static int ethoc_rx(struct net_device *dev, int limit)
387{
388 struct ethoc *priv = netdev_priv(dev);
389 int count;
390
391 for (count = 0; count < limit; ++count) {
392 unsigned int entry;
393 struct ethoc_bd bd;
394
395 entry = priv->num_tx + (priv->cur_rx % priv->num_rx);
396 ethoc_read_bd(priv, entry, &bd);
397 if (bd.stat & RX_BD_EMPTY)
398 break;
399
400 if (ethoc_update_rx_stats(priv, &bd) == 0) {
401 int size = bd.stat >> 16;
402 struct sk_buff *skb = netdev_alloc_skb(dev, size);
403 if (likely(skb)) {
404 void *src = priv->membase + bd.addr;
405 memcpy_fromio(skb_put(skb, size), src, size);
406 skb->protocol = eth_type_trans(skb, dev);
407 dev->last_rx = jiffies;
408 priv->stats.rx_packets++;
409 priv->stats.rx_bytes += size;
410 netif_receive_skb(skb);
411 } else {
412 if (net_ratelimit())
413 dev_warn(&dev->dev, "low on memory - "
414 "packet dropped\n");
415
416 priv->stats.rx_dropped++;
417 break;
418 }
419 }
420
421 /* clear the buffer descriptor so it can be reused */
422 bd.stat &= ~RX_BD_STATS;
423 bd.stat |= RX_BD_EMPTY;
424 ethoc_write_bd(priv, entry, &bd);
425 priv->cur_rx++;
426 }
427
428 return count;
429}
430
431static int ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd)
432{
433 struct net_device *netdev = dev->netdev;
434
435 if (bd->stat & TX_BD_LC) {
436 dev_err(&netdev->dev, "TX: late collision\n");
437 dev->stats.tx_window_errors++;
438 }
439
440 if (bd->stat & TX_BD_RL) {
441 dev_err(&netdev->dev, "TX: retransmit limit\n");
442 dev->stats.tx_aborted_errors++;
443 }
444
445 if (bd->stat & TX_BD_UR) {
446 dev_err(&netdev->dev, "TX: underrun\n");
447 dev->stats.tx_fifo_errors++;
448 }
449
450 if (bd->stat & TX_BD_CS) {
451 dev_err(&netdev->dev, "TX: carrier sense lost\n");
452 dev->stats.tx_carrier_errors++;
453 }
454
455 if (bd->stat & TX_BD_STATS)
456 dev->stats.tx_errors++;
457
458 dev->stats.collisions += (bd->stat >> 4) & 0xf;
459 dev->stats.tx_bytes += bd->stat >> 16;
460 dev->stats.tx_packets++;
461 return 0;
462}
463
464static void ethoc_tx(struct net_device *dev)
465{
466 struct ethoc *priv = netdev_priv(dev);
467
468 spin_lock(&priv->lock);
469
470 while (priv->dty_tx != priv->cur_tx) {
471 unsigned int entry = priv->dty_tx % priv->num_tx;
472 struct ethoc_bd bd;
473
474 ethoc_read_bd(priv, entry, &bd);
475 if (bd.stat & TX_BD_READY)
476 break;
477
478 entry = (++priv->dty_tx) % priv->num_tx;
479 (void)ethoc_update_tx_stats(priv, &bd);
480 }
481
482 if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2))
483 netif_wake_queue(dev);
484
485 ethoc_ack_irq(priv, INT_MASK_TX);
486 spin_unlock(&priv->lock);
487}
488
489static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
490{
491 struct net_device *dev = (struct net_device *)dev_id;
492 struct ethoc *priv = netdev_priv(dev);
493 u32 pending;
494
495 ethoc_disable_irq(priv, INT_MASK_ALL);
496 pending = ethoc_read(priv, INT_SOURCE);
497 if (unlikely(pending == 0)) {
498 ethoc_enable_irq(priv, INT_MASK_ALL);
499 return IRQ_NONE;
500 }
501
502 ethoc_ack_irq(priv, INT_MASK_ALL);
503
504 if (pending & INT_MASK_BUSY) {
505 dev_err(&dev->dev, "packet dropped\n");
506 priv->stats.rx_dropped++;
507 }
508
509 if (pending & INT_MASK_RX) {
510 if (napi_schedule_prep(&priv->napi))
511 __napi_schedule(&priv->napi);
512 } else {
513 ethoc_enable_irq(priv, INT_MASK_RX);
514 }
515
516 if (pending & INT_MASK_TX)
517 ethoc_tx(dev);
518
519 ethoc_enable_irq(priv, INT_MASK_ALL & ~INT_MASK_RX);
520 return IRQ_HANDLED;
521}
522
523static int ethoc_get_mac_address(struct net_device *dev, void *addr)
524{
525 struct ethoc *priv = netdev_priv(dev);
526 u8 *mac = (u8 *)addr;
527 u32 reg;
528
529 reg = ethoc_read(priv, MAC_ADDR0);
530 mac[2] = (reg >> 24) & 0xff;
531 mac[3] = (reg >> 16) & 0xff;
532 mac[4] = (reg >> 8) & 0xff;
533 mac[5] = (reg >> 0) & 0xff;
534
535 reg = ethoc_read(priv, MAC_ADDR1);
536 mac[0] = (reg >> 8) & 0xff;
537 mac[1] = (reg >> 0) & 0xff;
538
539 return 0;
540}
541
542static int ethoc_poll(struct napi_struct *napi, int budget)
543{
544 struct ethoc *priv = container_of(napi, struct ethoc, napi);
545 int work_done = 0;
546
547 work_done = ethoc_rx(priv->netdev, budget);
548 if (work_done < budget) {
549 ethoc_enable_irq(priv, INT_MASK_RX);
550 napi_complete(napi);
551 }
552
553 return work_done;
554}
555
556static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
557{
558 unsigned long timeout = jiffies + ETHOC_MII_TIMEOUT;
559 struct ethoc *priv = bus->priv;
560
561 ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
562 ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ);
563
564 while (time_before(jiffies, timeout)) {
565 u32 status = ethoc_read(priv, MIISTATUS);
566 if (!(status & MIISTATUS_BUSY)) {
567 u32 data = ethoc_read(priv, MIIRX_DATA);
568 /* reset MII command register */
569 ethoc_write(priv, MIICOMMAND, 0);
570 return data;
571 }
572
573 schedule();
574 }
575
576 return -EBUSY;
577}
578
579static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
580{
581 unsigned long timeout = jiffies + ETHOC_MII_TIMEOUT;
582 struct ethoc *priv = bus->priv;
583
584 ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
585 ethoc_write(priv, MIITX_DATA, val);
586 ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE);
587
588 while (time_before(jiffies, timeout)) {
589 u32 stat = ethoc_read(priv, MIISTATUS);
590 if (!(stat & MIISTATUS_BUSY))
591 return 0;
592
593 schedule();
594 }
595
596 return -EBUSY;
597}
598
599static int ethoc_mdio_reset(struct mii_bus *bus)
600{
601 return 0;
602}
603
604static void ethoc_mdio_poll(struct net_device *dev)
605{
606}
607
608static int ethoc_mdio_probe(struct net_device *dev)
609{
610 struct ethoc *priv = netdev_priv(dev);
611 struct phy_device *phy;
612 int i;
613
614 for (i = 0; i < PHY_MAX_ADDR; i++) {
615 phy = priv->mdio->phy_map[i];
616 if (phy) {
617 if (priv->phy_id != -1) {
618 /* attach to specified PHY */
619 if (priv->phy_id == phy->addr)
620 break;
621 } else {
622 /* autoselect PHY if none was specified */
623 if (phy->addr != 0)
624 break;
625 }
626 }
627 }
628
629 if (!phy) {
630 dev_err(&dev->dev, "no PHY found\n");
631 return -ENXIO;
632 }
633
634 phy = phy_connect(dev, dev_name(&phy->dev), &ethoc_mdio_poll, 0,
635 PHY_INTERFACE_MODE_GMII);
636 if (IS_ERR(phy)) {
637 dev_err(&dev->dev, "could not attach to PHY\n");
638 return PTR_ERR(phy);
639 }
640
641 priv->phy = phy;
642 return 0;
643}
644
645static int ethoc_open(struct net_device *dev)
646{
647 struct ethoc *priv = netdev_priv(dev);
648 unsigned int min_tx = 2;
649 unsigned int num_bd;
650 int ret;
651
652 ret = request_irq(dev->irq, ethoc_interrupt, IRQF_SHARED,
653 dev->name, dev);
654 if (ret)
655 return ret;
656
657 /* calculate the number of TX/RX buffers */
658 num_bd = (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ;
659 priv->num_tx = min(min_tx, num_bd / 4);
660 priv->num_rx = num_bd - priv->num_tx;
661 ethoc_write(priv, TX_BD_NUM, priv->num_tx);
662
663 ethoc_init_ring(priv);
664 ethoc_reset(priv);
665
666 if (netif_queue_stopped(dev)) {
667 dev_dbg(&dev->dev, " resuming queue\n");
668 netif_wake_queue(dev);
669 } else {
670 dev_dbg(&dev->dev, " starting queue\n");
671 netif_start_queue(dev);
672 }
673
674 phy_start(priv->phy);
675 napi_enable(&priv->napi);
676
677 if (netif_msg_ifup(priv)) {
678 dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n",
679 dev->base_addr, dev->mem_start, dev->mem_end);
680 }
681
682 return 0;
683}
684
685static int ethoc_stop(struct net_device *dev)
686{
687 struct ethoc *priv = netdev_priv(dev);
688
689 napi_disable(&priv->napi);
690
691 if (priv->phy)
692 phy_stop(priv->phy);
693
694 ethoc_disable_rx_and_tx(priv);
695 free_irq(dev->irq, dev);
696
697 if (!netif_queue_stopped(dev))
698 netif_stop_queue(dev);
699
700 return 0;
701}
702
703static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
704{
705 struct ethoc *priv = netdev_priv(dev);
706 struct mii_ioctl_data *mdio = if_mii(ifr);
707 struct phy_device *phy = NULL;
708
709 if (!netif_running(dev))
710 return -EINVAL;
711
712 if (cmd != SIOCGMIIPHY) {
713 if (mdio->phy_id >= PHY_MAX_ADDR)
714 return -ERANGE;
715
716 phy = priv->mdio->phy_map[mdio->phy_id];
717 if (!phy)
718 return -ENODEV;
719 } else {
720 phy = priv->phy;
721 }
722
723 return phy_mii_ioctl(phy, mdio, cmd);
724}
725
726static int ethoc_config(struct net_device *dev, struct ifmap *map)
727{
728 return -ENOSYS;
729}
730
731static int ethoc_set_mac_address(struct net_device *dev, void *addr)
732{
733 struct ethoc *priv = netdev_priv(dev);
734 u8 *mac = (u8 *)addr;
735
736 ethoc_write(priv, MAC_ADDR0, (mac[2] << 24) | (mac[3] << 16) |
737 (mac[4] << 8) | (mac[5] << 0));
738 ethoc_write(priv, MAC_ADDR1, (mac[0] << 8) | (mac[1] << 0));
739
740 return 0;
741}
742
743static void ethoc_set_multicast_list(struct net_device *dev)
744{
745 struct ethoc *priv = netdev_priv(dev);
746 u32 mode = ethoc_read(priv, MODER);
747 struct dev_mc_list *mc = NULL;
748 u32 hash[2] = { 0, 0 };
749
750 /* set loopback mode if requested */
751 if (dev->flags & IFF_LOOPBACK)
752 mode |= MODER_LOOP;
753 else
754 mode &= ~MODER_LOOP;
755
756 /* receive broadcast frames if requested */
757 if (dev->flags & IFF_BROADCAST)
758 mode &= ~MODER_BRO;
759 else
760 mode |= MODER_BRO;
761
762 /* enable promiscuous mode if requested */
763 if (dev->flags & IFF_PROMISC)
764 mode |= MODER_PRO;
765 else
766 mode &= ~MODER_PRO;
767
768 ethoc_write(priv, MODER, mode);
769
770 /* receive multicast frames */
771 if (dev->flags & IFF_ALLMULTI) {
772 hash[0] = 0xffffffff;
773 hash[1] = 0xffffffff;
774 } else {
775 for (mc = dev->mc_list; mc; mc = mc->next) {
776 u32 crc = ether_crc(mc->dmi_addrlen, mc->dmi_addr);
777 int bit = (crc >> 26) & 0x3f;
778 hash[bit >> 5] |= 1 << (bit & 0x1f);
779 }
780 }
781
782 ethoc_write(priv, ETH_HASH0, hash[0]);
783 ethoc_write(priv, ETH_HASH1, hash[1]);
784}
785
786static int ethoc_change_mtu(struct net_device *dev, int new_mtu)
787{
788 return -ENOSYS;
789}
790
791static void ethoc_tx_timeout(struct net_device *dev)
792{
793 struct ethoc *priv = netdev_priv(dev);
794 u32 pending = ethoc_read(priv, INT_SOURCE);
795 if (likely(pending))
796 ethoc_interrupt(dev->irq, dev);
797}
798
799static struct net_device_stats *ethoc_stats(struct net_device *dev)
800{
801 struct ethoc *priv = netdev_priv(dev);
802 return &priv->stats;
803}
804
805static int ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
806{
807 struct ethoc *priv = netdev_priv(dev);
808 struct ethoc_bd bd;
809 unsigned int entry;
810 void *dest;
811
812 if (unlikely(skb->len > ETHOC_BUFSIZ)) {
813 priv->stats.tx_errors++;
814 return -EMSGSIZE;
815 }
816
817 entry = priv->cur_tx % priv->num_tx;
818 spin_lock_irq(&priv->lock);
819 priv->cur_tx++;
820
821 ethoc_read_bd(priv, entry, &bd);
822 if (unlikely(skb->len < ETHOC_ZLEN))
823 bd.stat |= TX_BD_PAD;
824 else
825 bd.stat &= ~TX_BD_PAD;
826
827 dest = priv->membase + bd.addr;
828 memcpy_toio(dest, skb->data, skb->len);
829
830 bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK);
831 bd.stat |= TX_BD_LEN(skb->len);
832 ethoc_write_bd(priv, entry, &bd);
833
834 bd.stat |= TX_BD_READY;
835 ethoc_write_bd(priv, entry, &bd);
836
837 if (priv->cur_tx == (priv->dty_tx + priv->num_tx)) {
838 dev_dbg(&dev->dev, "stopping queue\n");
839 netif_stop_queue(dev);
840 }
841
842 dev->trans_start = jiffies;
843 dev_kfree_skb(skb);
844
845 spin_unlock_irq(&priv->lock);
846 return NETDEV_TX_OK;
847}
848
849static const struct net_device_ops ethoc_netdev_ops = {
850 .ndo_open = ethoc_open,
851 .ndo_stop = ethoc_stop,
852 .ndo_do_ioctl = ethoc_ioctl,
853 .ndo_set_config = ethoc_config,
854 .ndo_set_mac_address = ethoc_set_mac_address,
855 .ndo_set_multicast_list = ethoc_set_multicast_list,
856 .ndo_change_mtu = ethoc_change_mtu,
857 .ndo_tx_timeout = ethoc_tx_timeout,
858 .ndo_get_stats = ethoc_stats,
859 .ndo_start_xmit = ethoc_start_xmit,
860};
861
862/**
863 * ethoc_probe() - initialize OpenCores ethernet MAC
864 * pdev: platform device
865 */
866static int ethoc_probe(struct platform_device *pdev)
867{
868 struct net_device *netdev = NULL;
869 struct resource *res = NULL;
870 struct resource *mmio = NULL;
871 struct resource *mem = NULL;
872 struct ethoc *priv = NULL;
873 unsigned int phy;
874 int ret = 0;
875
876 /* allocate networking device */
877 netdev = alloc_etherdev(sizeof(struct ethoc));
878 if (!netdev) {
879 dev_err(&pdev->dev, "cannot allocate network device\n");
880 ret = -ENOMEM;
881 goto out;
882 }
883
884 SET_NETDEV_DEV(netdev, &pdev->dev);
885 platform_set_drvdata(pdev, netdev);
886
887 /* obtain I/O memory space */
888 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
889 if (!res) {
890 dev_err(&pdev->dev, "cannot obtain I/O memory space\n");
891 ret = -ENXIO;
892 goto free;
893 }
894
895 mmio = devm_request_mem_region(&pdev->dev, res->start,
896 res->end - res->start + 1, res->name);
897 if (!res) {
898 dev_err(&pdev->dev, "cannot request I/O memory space\n");
899 ret = -ENXIO;
900 goto free;
901 }
902
903 netdev->base_addr = mmio->start;
904
905 /* obtain buffer memory space */
906 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
907 if (!res) {
908 dev_err(&pdev->dev, "cannot obtain memory space\n");
909 ret = -ENXIO;
910 goto free;
911 }
912
913 mem = devm_request_mem_region(&pdev->dev, res->start,
914 res->end - res->start + 1, res->name);
915 if (!mem) {
916 dev_err(&pdev->dev, "cannot request memory space\n");
917 ret = -ENXIO;
918 goto free;
919 }
920
921 netdev->mem_start = mem->start;
922 netdev->mem_end = mem->end;
923
924 /* obtain device IRQ number */
925 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
926 if (!res) {
927 dev_err(&pdev->dev, "cannot obtain IRQ\n");
928 ret = -ENXIO;
929 goto free;
930 }
931
932 netdev->irq = res->start;
933
934 /* setup driver-private data */
935 priv = netdev_priv(netdev);
936 priv->netdev = netdev;
937
938 priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr,
939 mmio->end - mmio->start + 1);
940 if (!priv->iobase) {
941 dev_err(&pdev->dev, "cannot remap I/O memory space\n");
942 ret = -ENXIO;
943 goto error;
944 }
945
946 priv->membase = devm_ioremap_nocache(&pdev->dev, netdev->mem_start,
947 mem->end - mem->start + 1);
948 if (!priv->membase) {
949 dev_err(&pdev->dev, "cannot remap memory space\n");
950 ret = -ENXIO;
951 goto error;
952 }
953
954 /* Allow the platform setup code to pass in a MAC address. */
955 if (pdev->dev.platform_data) {
956 struct ethoc_platform_data *pdata =
957 (struct ethoc_platform_data *)pdev->dev.platform_data;
958 memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
959 priv->phy_id = pdata->phy_id;
960 }
961
962 /* Check that the given MAC address is valid. If it isn't, read the
963 * current MAC from the controller. */
964 if (!is_valid_ether_addr(netdev->dev_addr))
965 ethoc_get_mac_address(netdev, netdev->dev_addr);
966
967 /* Check the MAC again for validity, if it still isn't choose and
968 * program a random one. */
969 if (!is_valid_ether_addr(netdev->dev_addr))
970 random_ether_addr(netdev->dev_addr);
971
972 ethoc_set_mac_address(netdev, netdev->dev_addr);
973
974 /* register MII bus */
975 priv->mdio = mdiobus_alloc();
976 if (!priv->mdio) {
977 ret = -ENOMEM;
978 goto free;
979 }
980
981 priv->mdio->name = "ethoc-mdio";
982 snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%d",
983 priv->mdio->name, pdev->id);
984 priv->mdio->read = ethoc_mdio_read;
985 priv->mdio->write = ethoc_mdio_write;
986 priv->mdio->reset = ethoc_mdio_reset;
987 priv->mdio->priv = priv;
988
989 priv->mdio->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
990 if (!priv->mdio->irq) {
991 ret = -ENOMEM;
992 goto free_mdio;
993 }
994
995 for (phy = 0; phy < PHY_MAX_ADDR; phy++)
996 priv->mdio->irq[phy] = PHY_POLL;
997
998 ret = mdiobus_register(priv->mdio);
999 if (ret) {
1000 dev_err(&netdev->dev, "failed to register MDIO bus\n");
1001 goto free_mdio;
1002 }
1003
1004 ret = ethoc_mdio_probe(netdev);
1005 if (ret) {
1006 dev_err(&netdev->dev, "failed to probe MDIO bus\n");
1007 goto error;
1008 }
1009
1010 ether_setup(netdev);
1011
1012 /* setup the net_device structure */
1013 netdev->netdev_ops = &ethoc_netdev_ops;
1014 netdev->watchdog_timeo = ETHOC_TIMEOUT;
1015 netdev->features |= 0;
1016
1017 /* setup NAPI */
1018 memset(&priv->napi, 0, sizeof(priv->napi));
1019 netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
1020
1021 spin_lock_init(&priv->rx_lock);
1022 spin_lock_init(&priv->lock);
1023
1024 ret = register_netdev(netdev);
1025 if (ret < 0) {
1026 dev_err(&netdev->dev, "failed to register interface\n");
1027 goto error;
1028 }
1029
1030 goto out;
1031
1032error:
1033 mdiobus_unregister(priv->mdio);
1034free_mdio:
1035 kfree(priv->mdio->irq);
1036 mdiobus_free(priv->mdio);
1037free:
1038 free_netdev(netdev);
1039out:
1040 return ret;
1041}
1042
1043/**
1044 * ethoc_remove() - shutdown OpenCores ethernet MAC
1045 * @pdev: platform device
1046 */
1047static int ethoc_remove(struct platform_device *pdev)
1048{
1049 struct net_device *netdev = platform_get_drvdata(pdev);
1050 struct ethoc *priv = netdev_priv(netdev);
1051
1052 platform_set_drvdata(pdev, NULL);
1053
1054 if (netdev) {
1055 phy_disconnect(priv->phy);
1056 priv->phy = NULL;
1057
1058 if (priv->mdio) {
1059 mdiobus_unregister(priv->mdio);
1060 kfree(priv->mdio->irq);
1061 mdiobus_free(priv->mdio);
1062 }
1063
1064 unregister_netdev(netdev);
1065 free_netdev(netdev);
1066 }
1067
1068 return 0;
1069}
1070
1071#ifdef CONFIG_PM
1072static int ethoc_suspend(struct platform_device *pdev, pm_message_t state)
1073{
1074 return -ENOSYS;
1075}
1076
1077static int ethoc_resume(struct platform_device *pdev)
1078{
1079 return -ENOSYS;
1080}
1081#else
1082# define ethoc_suspend NULL
1083# define ethoc_resume NULL
1084#endif
1085
1086static struct platform_driver ethoc_driver = {
1087 .probe = ethoc_probe,
1088 .remove = ethoc_remove,
1089 .suspend = ethoc_suspend,
1090 .resume = ethoc_resume,
1091 .driver = {
1092 .name = "ethoc",
1093 },
1094};
1095
1096static int __init ethoc_init(void)
1097{
1098 return platform_driver_register(&ethoc_driver);
1099}
1100
1101static void __exit ethoc_exit(void)
1102{
1103 platform_driver_unregister(&ethoc_driver);
1104}
1105
1106module_init(ethoc_init);
1107module_exit(ethoc_exit);
1108
1109MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
1110MODULE_DESCRIPTION("OpenCores Ethernet MAC driver");
1111MODULE_LICENSE("GPL v2");
1112
diff --git a/drivers/net/ewrk3.c b/drivers/net/ewrk3.c
index b852303c9362..1a685a04d4b2 100644
--- a/drivers/net/ewrk3.c
+++ b/drivers/net/ewrk3.c
@@ -388,6 +388,18 @@ static int __init ewrk3_probe1(struct net_device *dev, u_long iobase, int irq)
388 return err; 388 return err;
389} 389}
390 390
391static const struct net_device_ops ewrk3_netdev_ops = {
392 .ndo_open = ewrk3_open,
393 .ndo_start_xmit = ewrk3_queue_pkt,
394 .ndo_stop = ewrk3_close,
395 .ndo_set_multicast_list = set_multicast_list,
396 .ndo_do_ioctl = ewrk3_ioctl,
397 .ndo_tx_timeout = ewrk3_timeout,
398 .ndo_change_mtu = eth_change_mtu,
399 .ndo_set_mac_address = eth_mac_addr,
400 .ndo_validate_addr = eth_validate_addr,
401};
402
391static int __init 403static int __init
392ewrk3_hw_init(struct net_device *dev, u_long iobase) 404ewrk3_hw_init(struct net_device *dev, u_long iobase)
393{ 405{
@@ -603,16 +615,11 @@ ewrk3_hw_init(struct net_device *dev, u_long iobase)
603 printk(version); 615 printk(version);
604 } 616 }
605 /* The EWRK3-specific entries in the device structure. */ 617 /* The EWRK3-specific entries in the device structure. */
606 dev->open = ewrk3_open; 618 dev->netdev_ops = &ewrk3_netdev_ops;
607 dev->hard_start_xmit = ewrk3_queue_pkt;
608 dev->stop = ewrk3_close;
609 dev->set_multicast_list = set_multicast_list;
610 dev->do_ioctl = ewrk3_ioctl;
611 if (lp->adapter_name[4] == '3') 619 if (lp->adapter_name[4] == '3')
612 SET_ETHTOOL_OPS(dev, &ethtool_ops_203); 620 SET_ETHTOOL_OPS(dev, &ethtool_ops_203);
613 else 621 else
614 SET_ETHTOOL_OPS(dev, &ethtool_ops); 622 SET_ETHTOOL_OPS(dev, &ethtool_ops);
615 dev->tx_timeout = ewrk3_timeout;
616 dev->watchdog_timeo = QUEUE_PKT_TIMEOUT; 623 dev->watchdog_timeo = QUEUE_PKT_TIMEOUT;
617 624
618 dev->mem_start = 0; 625 dev->mem_start = 0;
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 9d81e7a48dba..6a38800be3f1 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -1239,19 +1239,9 @@ static int gfar_enet_open(struct net_device *dev)
1239 return err; 1239 return err;
1240} 1240}
1241 1241
1242static inline struct txfcb *gfar_add_fcb(struct sk_buff **skbp) 1242static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
1243{ 1243{
1244 struct txfcb *fcb; 1244 struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
1245 struct sk_buff *skb = *skbp;
1246
1247 if (unlikely(skb_headroom(skb) < GMAC_FCB_LEN)) {
1248 struct sk_buff *old_skb = skb;
1249 skb = skb_realloc_headroom(old_skb, GMAC_FCB_LEN);
1250 if (!skb)
1251 return NULL;
1252 dev_kfree_skb_any(old_skb);
1253 }
1254 fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
1255 cacheable_memzero(fcb, GMAC_FCB_LEN); 1245 cacheable_memzero(fcb, GMAC_FCB_LEN);
1256 1246
1257 return fcb; 1247 return fcb;
@@ -1320,6 +1310,22 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1320 1310
1321 base = priv->tx_bd_base; 1311 base = priv->tx_bd_base;
1322 1312
1313 /* make space for additional header when fcb is needed */
1314 if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
1315 (priv->vlgrp && vlan_tx_tag_present(skb))) &&
1316 (skb_headroom(skb) < GMAC_FCB_LEN)) {
1317 struct sk_buff *skb_new;
1318
1319 skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN);
1320 if (!skb_new) {
1321 dev->stats.tx_errors++;
1322 kfree_skb(skb);
1323 return NETDEV_TX_OK;
1324 }
1325 kfree_skb(skb);
1326 skb = skb_new;
1327 }
1328
1323 /* total number of fragments in the SKB */ 1329 /* total number of fragments in the SKB */
1324 nr_frags = skb_shinfo(skb)->nr_frags; 1330 nr_frags = skb_shinfo(skb)->nr_frags;
1325 1331
@@ -1372,20 +1378,18 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1372 1378
1373 /* Set up checksumming */ 1379 /* Set up checksumming */
1374 if (CHECKSUM_PARTIAL == skb->ip_summed) { 1380 if (CHECKSUM_PARTIAL == skb->ip_summed) {
1375 fcb = gfar_add_fcb(&skb); 1381 fcb = gfar_add_fcb(skb);
1376 if (likely(fcb != NULL)) { 1382 lstatus |= BD_LFLAG(TXBD_TOE);
1377 lstatus |= BD_LFLAG(TXBD_TOE); 1383 gfar_tx_checksum(skb, fcb);
1378 gfar_tx_checksum(skb, fcb);
1379 }
1380 } 1384 }
1381 1385
1382 if (priv->vlgrp && vlan_tx_tag_present(skb)) { 1386 if (priv->vlgrp && vlan_tx_tag_present(skb)) {
1383 if (unlikely(NULL == fcb)) 1387 if (unlikely(NULL == fcb)) {
1384 fcb = gfar_add_fcb(&skb); 1388 fcb = gfar_add_fcb(skb);
1385 if (likely(fcb != NULL)) {
1386 lstatus |= BD_LFLAG(TXBD_TOE); 1389 lstatus |= BD_LFLAG(TXBD_TOE);
1387 gfar_tx_vlan(skb, fcb);
1388 } 1390 }
1391
1392 gfar_tx_vlan(skb, fcb);
1389 } 1393 }
1390 1394
1391 /* setup the TxBD length and buffer pointer for the first BD */ 1395 /* setup the TxBD length and buffer pointer for the first BD */
@@ -1433,7 +1437,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1433 /* Unlock priv */ 1437 /* Unlock priv */
1434 spin_unlock_irqrestore(&priv->txlock, flags); 1438 spin_unlock_irqrestore(&priv->txlock, flags);
1435 1439
1436 return 0; 1440 return NETDEV_TX_OK;
1437} 1441}
1438 1442
1439/* Stops the kernel queue, and halts the controller */ 1443/* Stops the kernel queue, and halts the controller */
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
index 5b5bf9f9861a..c25bc0bc0b25 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ibmlana.c
@@ -905,6 +905,17 @@ static char *ibmlana_adapter_names[] __devinitdata = {
905 NULL 905 NULL
906}; 906};
907 907
908
909static const struct net_device_ops ibmlana_netdev_ops = {
910 .ndo_open = ibmlana_open,
911 .ndo_stop = ibmlana_close,
912 .ndo_start_xmit = ibmlana_tx,
913 .ndo_set_multicast_list = ibmlana_set_multicast_list,
914 .ndo_change_mtu = eth_change_mtu,
915 .ndo_set_mac_address = eth_mac_addr,
916 .ndo_validate_addr = eth_validate_addr,
917};
918
908static int __devinit ibmlana_init_one(struct device *kdev) 919static int __devinit ibmlana_init_one(struct device *kdev)
909{ 920{
910 struct mca_device *mdev = to_mca_device(kdev); 921 struct mca_device *mdev = to_mca_device(kdev);
@@ -973,11 +984,7 @@ static int __devinit ibmlana_init_one(struct device *kdev)
973 mca_device_set_claim(mdev, 1); 984 mca_device_set_claim(mdev, 1);
974 985
975 /* set methods */ 986 /* set methods */
976 987 dev->netdev_ops = &ibmlana_netdev_ops;
977 dev->open = ibmlana_open;
978 dev->stop = ibmlana_close;
979 dev->hard_start_xmit = ibmlana_tx;
980 dev->set_multicast_list = ibmlana_set_multicast_list;
981 dev->flags |= IFF_MULTICAST; 988 dev->flags |= IFF_MULTICAST;
982 989
983 /* copy out MAC address */ 990 /* copy out MAC address */
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 6f3e7f71658d..6b6548b9fda0 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -1524,6 +1524,13 @@ toshoboe_close (struct pci_dev *pci_dev)
1524 free_netdev(self->netdev); 1524 free_netdev(self->netdev);
1525} 1525}
1526 1526
1527static const struct net_device_ops toshoboe_netdev_ops = {
1528 .ndo_open = toshoboe_net_open,
1529 .ndo_stop = toshoboe_net_close,
1530 .ndo_start_xmit = toshoboe_hard_xmit,
1531 .ndo_do_ioctl = toshoboe_net_ioctl,
1532};
1533
1527static int 1534static int
1528toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid) 1535toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
1529{ 1536{
@@ -1657,10 +1664,7 @@ toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
1657#endif 1664#endif
1658 1665
1659 SET_NETDEV_DEV(dev, &pci_dev->dev); 1666 SET_NETDEV_DEV(dev, &pci_dev->dev);
1660 dev->hard_start_xmit = toshoboe_hard_xmit; 1667 dev->netdev_ops = &toshoboe_netdev_ops;
1661 dev->open = toshoboe_net_open;
1662 dev->stop = toshoboe_net_close;
1663 dev->do_ioctl = toshoboe_net_ioctl;
1664 1668
1665 err = register_netdev(dev); 1669 err = register_netdev(dev);
1666 if (err) 1670 if (err)
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index d83d4010656d..633808d447be 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -454,6 +454,18 @@ out:
454} 454}
455#endif 455#endif
456 456
457static const struct net_device_ops lance_netdev_ops = {
458 .ndo_open = lance_open,
459 .ndo_start_xmit = lance_start_xmit,
460 .ndo_stop = lance_close,
461 .ndo_get_stats = lance_get_stats,
462 .ndo_set_multicast_list = set_multicast_list,
463 .ndo_tx_timeout = lance_tx_timeout,
464 .ndo_change_mtu = eth_change_mtu,
465 .ndo_set_mac_address = eth_mac_addr,
466 .ndo_validate_addr = eth_validate_addr,
467};
468
457static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int options) 469static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int options)
458{ 470{
459 struct lance_private *lp; 471 struct lance_private *lp;
@@ -714,12 +726,7 @@ static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int
714 printk(version); 726 printk(version);
715 727
716 /* The LANCE-specific entries in the device structure. */ 728 /* The LANCE-specific entries in the device structure. */
717 dev->open = lance_open; 729 dev->netdev_ops = &lance_netdev_ops;
718 dev->hard_start_xmit = lance_start_xmit;
719 dev->stop = lance_close;
720 dev->get_stats = lance_get_stats;
721 dev->set_multicast_list = set_multicast_list;
722 dev->tx_timeout = lance_tx_timeout;
723 dev->watchdog_timeo = TX_TIMEOUT; 730 dev->watchdog_timeo = TX_TIMEOUT;
724 731
725 err = register_netdev(dev); 732 err = register_netdev(dev);
diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c
index 4d1a059921c6..d44bddbee373 100644
--- a/drivers/net/lp486e.c
+++ b/drivers/net/lp486e.c
@@ -952,6 +952,17 @@ static void print_eth(char *add)
952 (unsigned char) add[12], (unsigned char) add[13]); 952 (unsigned char) add[12], (unsigned char) add[13]);
953} 953}
954 954
955static const struct net_device_ops i596_netdev_ops = {
956 .ndo_open = i596_open,
957 .ndo_stop = i596_close,
958 .ndo_start_xmit = i596_start_xmit,
959 .ndo_set_multicast_list = set_multicast_list,
960 .ndo_tx_timeout = i596_tx_timeout,
961 .ndo_change_mtu = eth_change_mtu,
962 .ndo_set_mac_address = eth_mac_addr,
963 .ndo_validate_addr = eth_validate_addr,
964};
965
955static int __init lp486e_probe(struct net_device *dev) { 966static int __init lp486e_probe(struct net_device *dev) {
956 struct i596_private *lp; 967 struct i596_private *lp;
957 unsigned char eth_addr[6] = { 0, 0xaa, 0, 0, 0, 0 }; 968 unsigned char eth_addr[6] = { 0, 0xaa, 0, 0, 0, 0 };
@@ -1014,12 +1025,8 @@ static int __init lp486e_probe(struct net_device *dev) {
1014 printk("\n"); 1025 printk("\n");
1015 1026
1016 /* The LP486E-specific entries in the device structure. */ 1027 /* The LP486E-specific entries in the device structure. */
1017 dev->open = &i596_open; 1028 dev->netdev_ops = &i596_netdev_ops;
1018 dev->stop = &i596_close;
1019 dev->hard_start_xmit = &i596_start_xmit;
1020 dev->set_multicast_list = &set_multicast_list;
1021 dev->watchdog_timeo = 5*HZ; 1029 dev->watchdog_timeo = 5*HZ;
1022 dev->tx_timeout = i596_tx_timeout;
1023 1030
1024#if 0 1031#if 0
1025 /* selftest reports 0x320925ae - don't know what that means */ 1032 /* selftest reports 0x320925ae - don't know what that means */
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index a8bcc00c3302..77d44a061703 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -441,6 +441,18 @@ out:
441 return ERR_PTR(err); 441 return ERR_PTR(err);
442} 442}
443 443
444static const struct net_device_ops ni52_netdev_ops = {
445 .ndo_open = ni52_open,
446 .ndo_stop = ni52_close,
447 .ndo_get_stats = ni52_get_stats,
448 .ndo_tx_timeout = ni52_timeout,
449 .ndo_start_xmit = ni52_send_packet,
450 .ndo_set_multicast_list = set_multicast_list,
451 .ndo_change_mtu = eth_change_mtu,
452 .ndo_set_mac_address = eth_mac_addr,
453 .ndo_validate_addr = eth_validate_addr,
454};
455
444static int __init ni52_probe1(struct net_device *dev, int ioaddr) 456static int __init ni52_probe1(struct net_device *dev, int ioaddr)
445{ 457{
446 int i, size, retval; 458 int i, size, retval;
@@ -561,15 +573,8 @@ static int __init ni52_probe1(struct net_device *dev, int ioaddr)
561 printk("IRQ %d (assigned and not checked!).\n", dev->irq); 573 printk("IRQ %d (assigned and not checked!).\n", dev->irq);
562 } 574 }
563 575
564 dev->open = ni52_open; 576 dev->netdev_ops = &ni52_netdev_ops;
565 dev->stop = ni52_close;
566 dev->get_stats = ni52_get_stats;
567 dev->tx_timeout = ni52_timeout;
568 dev->watchdog_timeo = HZ/20; 577 dev->watchdog_timeo = HZ/20;
569 dev->hard_start_xmit = ni52_send_packet;
570 dev->set_multicast_list = set_multicast_list;
571
572 dev->if_port = 0;
573 578
574 return 0; 579 return 0;
575out: 580out:
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c
index df5f869e8d8f..6474f02bf783 100644
--- a/drivers/net/ni65.c
+++ b/drivers/net/ni65.c
@@ -237,7 +237,7 @@ struct priv
237 void *tmdbounce[TMDNUM]; 237 void *tmdbounce[TMDNUM];
238 int tmdbouncenum; 238 int tmdbouncenum;
239 int lock,xmit_queued; 239 int lock,xmit_queued;
240 struct net_device_stats stats; 240
241 void *self; 241 void *self;
242 int cmdr_addr; 242 int cmdr_addr;
243 int cardno; 243 int cardno;
@@ -257,7 +257,6 @@ static void ni65_timeout(struct net_device *dev);
257static int ni65_close(struct net_device *dev); 257static int ni65_close(struct net_device *dev);
258static int ni65_alloc_buffer(struct net_device *dev); 258static int ni65_alloc_buffer(struct net_device *dev);
259static void ni65_free_buffer(struct priv *p); 259static void ni65_free_buffer(struct priv *p);
260static struct net_device_stats *ni65_get_stats(struct net_device *);
261static void set_multicast_list(struct net_device *dev); 260static void set_multicast_list(struct net_device *dev);
262 261
263static int irqtab[] __initdata = { 9,12,15,5 }; /* irq config-translate */ 262static int irqtab[] __initdata = { 9,12,15,5 }; /* irq config-translate */
@@ -401,6 +400,17 @@ out:
401 return ERR_PTR(err); 400 return ERR_PTR(err);
402} 401}
403 402
403static const struct net_device_ops ni65_netdev_ops = {
404 .ndo_open = ni65_open,
405 .ndo_stop = ni65_close,
406 .ndo_start_xmit = ni65_send_packet,
407 .ndo_tx_timeout = ni65_timeout,
408 .ndo_set_multicast_list = set_multicast_list,
409 .ndo_change_mtu = eth_change_mtu,
410 .ndo_set_mac_address = eth_mac_addr,
411 .ndo_validate_addr = eth_validate_addr,
412};
413
404/* 414/*
405 * this is the real card probe .. 415 * this is the real card probe ..
406 */ 416 */
@@ -549,13 +559,9 @@ static int __init ni65_probe1(struct net_device *dev,int ioaddr)
549 } 559 }
550 560
551 dev->base_addr = ioaddr; 561 dev->base_addr = ioaddr;
552 dev->open = ni65_open; 562 dev->netdev_ops = &ni65_netdev_ops;
553 dev->stop = ni65_close;
554 dev->hard_start_xmit = ni65_send_packet;
555 dev->tx_timeout = ni65_timeout;
556 dev->watchdog_timeo = HZ/2; 563 dev->watchdog_timeo = HZ/2;
557 dev->get_stats = ni65_get_stats; 564
558 dev->set_multicast_list = set_multicast_list;
559 return 0; /* everything is OK */ 565 return 0; /* everything is OK */
560} 566}
561 567
@@ -901,13 +907,13 @@ static irqreturn_t ni65_interrupt(int irq, void * dev_id)
901 if(debuglevel > 1) 907 if(debuglevel > 1)
902 printk(KERN_ERR "%s: general error: %04x.\n",dev->name,csr0); 908 printk(KERN_ERR "%s: general error: %04x.\n",dev->name,csr0);
903 if(csr0 & CSR0_BABL) 909 if(csr0 & CSR0_BABL)
904 p->stats.tx_errors++; 910 dev->stats.tx_errors++;
905 if(csr0 & CSR0_MISS) { 911 if(csr0 & CSR0_MISS) {
906 int i; 912 int i;
907 for(i=0;i<RMDNUM;i++) 913 for(i=0;i<RMDNUM;i++)
908 printk("%02x ",p->rmdhead[i].u.s.status); 914 printk("%02x ",p->rmdhead[i].u.s.status);
909 printk("\n"); 915 printk("\n");
910 p->stats.rx_errors++; 916 dev->stats.rx_errors++;
911 } 917 }
912 if(csr0 & CSR0_MERR) { 918 if(csr0 & CSR0_MERR) {
913 if(debuglevel > 1) 919 if(debuglevel > 1)
@@ -997,12 +1003,12 @@ static void ni65_xmit_intr(struct net_device *dev,int csr0)
997#endif 1003#endif
998 /* checking some errors */ 1004 /* checking some errors */
999 if(tmdp->status2 & XMIT_RTRY) 1005 if(tmdp->status2 & XMIT_RTRY)
1000 p->stats.tx_aborted_errors++; 1006 dev->stats.tx_aborted_errors++;
1001 if(tmdp->status2 & XMIT_LCAR) 1007 if(tmdp->status2 & XMIT_LCAR)
1002 p->stats.tx_carrier_errors++; 1008 dev->stats.tx_carrier_errors++;
1003 if(tmdp->status2 & (XMIT_BUFF | XMIT_UFLO )) { 1009 if(tmdp->status2 & (XMIT_BUFF | XMIT_UFLO )) {
1004 /* this stops the xmitter */ 1010 /* this stops the xmitter */
1005 p->stats.tx_fifo_errors++; 1011 dev->stats.tx_fifo_errors++;
1006 if(debuglevel > 0) 1012 if(debuglevel > 0)
1007 printk(KERN_ERR "%s: Xmit FIFO/BUFF error\n",dev->name); 1013 printk(KERN_ERR "%s: Xmit FIFO/BUFF error\n",dev->name);
1008 if(p->features & INIT_RING_BEFORE_START) { 1014 if(p->features & INIT_RING_BEFORE_START) {
@@ -1016,12 +1022,12 @@ static void ni65_xmit_intr(struct net_device *dev,int csr0)
1016 if(debuglevel > 2) 1022 if(debuglevel > 2)
1017 printk(KERN_ERR "%s: xmit-error: %04x %02x-%04x\n",dev->name,csr0,(int) tmdstat,(int) tmdp->status2); 1023 printk(KERN_ERR "%s: xmit-error: %04x %02x-%04x\n",dev->name,csr0,(int) tmdstat,(int) tmdp->status2);
1018 if(!(csr0 & CSR0_BABL)) /* don't count errors twice */ 1024 if(!(csr0 & CSR0_BABL)) /* don't count errors twice */
1019 p->stats.tx_errors++; 1025 dev->stats.tx_errors++;
1020 tmdp->status2 = 0; 1026 tmdp->status2 = 0;
1021 } 1027 }
1022 else { 1028 else {
1023 p->stats.tx_bytes -= (short)(tmdp->blen); 1029 dev->stats.tx_bytes -= (short)(tmdp->blen);
1024 p->stats.tx_packets++; 1030 dev->stats.tx_packets++;
1025 } 1031 }
1026 1032
1027#ifdef XMT_VIA_SKB 1033#ifdef XMT_VIA_SKB
@@ -1057,7 +1063,7 @@ static void ni65_recv_intr(struct net_device *dev,int csr0)
1057 if(!(rmdstat & RCV_ERR)) { 1063 if(!(rmdstat & RCV_ERR)) {
1058 if(rmdstat & RCV_START) 1064 if(rmdstat & RCV_START)
1059 { 1065 {
1060 p->stats.rx_length_errors++; 1066 dev->stats.rx_length_errors++;
1061 printk(KERN_ERR "%s: recv, packet too long: %d\n",dev->name,rmdp->mlen & 0x0fff); 1067 printk(KERN_ERR "%s: recv, packet too long: %d\n",dev->name,rmdp->mlen & 0x0fff);
1062 } 1068 }
1063 } 1069 }
@@ -1066,16 +1072,16 @@ static void ni65_recv_intr(struct net_device *dev,int csr0)
1066 printk(KERN_ERR "%s: receive-error: %04x, lance-status: %04x/%04x\n", 1072 printk(KERN_ERR "%s: receive-error: %04x, lance-status: %04x/%04x\n",
1067 dev->name,(int) rmdstat,csr0,(int) inw(PORT+L_DATAREG) ); 1073 dev->name,(int) rmdstat,csr0,(int) inw(PORT+L_DATAREG) );
1068 if(rmdstat & RCV_FRAM) 1074 if(rmdstat & RCV_FRAM)
1069 p->stats.rx_frame_errors++; 1075 dev->stats.rx_frame_errors++;
1070 if(rmdstat & RCV_OFLO) 1076 if(rmdstat & RCV_OFLO)
1071 p->stats.rx_over_errors++; 1077 dev->stats.rx_over_errors++;
1072 if(rmdstat & RCV_CRC) 1078 if(rmdstat & RCV_CRC)
1073 p->stats.rx_crc_errors++; 1079 dev->stats.rx_crc_errors++;
1074 if(rmdstat & RCV_BUF_ERR) 1080 if(rmdstat & RCV_BUF_ERR)
1075 p->stats.rx_fifo_errors++; 1081 dev->stats.rx_fifo_errors++;
1076 } 1082 }
1077 if(!(csr0 & CSR0_MISS)) /* don't count errors twice */ 1083 if(!(csr0 & CSR0_MISS)) /* don't count errors twice */
1078 p->stats.rx_errors++; 1084 dev->stats.rx_errors++;
1079 } 1085 }
1080 else if( (len = (rmdp->mlen & 0x0fff) - 4) >= 60) 1086 else if( (len = (rmdp->mlen & 0x0fff) - 4) >= 60)
1081 { 1087 {
@@ -1106,20 +1112,20 @@ static void ni65_recv_intr(struct net_device *dev,int csr0)
1106 skb_put(skb,len); 1112 skb_put(skb,len);
1107 skb_copy_to_linear_data(skb, (unsigned char *) p->recvbounce[p->rmdnum],len); 1113 skb_copy_to_linear_data(skb, (unsigned char *) p->recvbounce[p->rmdnum],len);
1108#endif 1114#endif
1109 p->stats.rx_packets++; 1115 dev->stats.rx_packets++;
1110 p->stats.rx_bytes += len; 1116 dev->stats.rx_bytes += len;
1111 skb->protocol=eth_type_trans(skb,dev); 1117 skb->protocol=eth_type_trans(skb,dev);
1112 netif_rx(skb); 1118 netif_rx(skb);
1113 } 1119 }
1114 else 1120 else
1115 { 1121 {
1116 printk(KERN_ERR "%s: can't alloc new sk_buff\n",dev->name); 1122 printk(KERN_ERR "%s: can't alloc new sk_buff\n",dev->name);
1117 p->stats.rx_dropped++; 1123 dev->stats.rx_dropped++;
1118 } 1124 }
1119 } 1125 }
1120 else { 1126 else {
1121 printk(KERN_INFO "%s: received runt packet\n",dev->name); 1127 printk(KERN_INFO "%s: received runt packet\n",dev->name);
1122 p->stats.rx_errors++; 1128 dev->stats.rx_errors++;
1123 } 1129 }
1124 rmdp->blen = -(R_BUF_SIZE-8); 1130 rmdp->blen = -(R_BUF_SIZE-8);
1125 rmdp->mlen = 0; 1131 rmdp->mlen = 0;
@@ -1213,23 +1219,6 @@ static int ni65_send_packet(struct sk_buff *skb, struct net_device *dev)
1213 return 0; 1219 return 0;
1214} 1220}
1215 1221
1216static struct net_device_stats *ni65_get_stats(struct net_device *dev)
1217{
1218
1219#if 0
1220 int i;
1221 struct priv *p = dev->ml_priv;
1222 for(i=0;i<RMDNUM;i++)
1223 {
1224 struct rmd *rmdp = p->rmdhead + ((p->rmdnum + i) & (RMDNUM-1));
1225 printk("%02x ",rmdp->u.s.status);
1226 }
1227 printk("\n");
1228#endif
1229
1230 return &((struct priv *)dev->ml_priv)->stats;
1231}
1232
1233static void set_multicast_list(struct net_device *dev) 1222static void set_multicast_list(struct net_device *dev)
1234{ 1223{
1235 if(!ni65_lance_reinit(dev)) 1224 if(!ni65_lance_reinit(dev))
diff --git a/drivers/net/seeq8005.c b/drivers/net/seeq8005.c
index 12a8ffffeb03..ebbbe09725fe 100644
--- a/drivers/net/seeq8005.c
+++ b/drivers/net/seeq8005.c
@@ -143,6 +143,17 @@ out:
143 return ERR_PTR(err); 143 return ERR_PTR(err);
144} 144}
145 145
146static const struct net_device_ops seeq8005_netdev_ops = {
147 .ndo_open = seeq8005_open,
148 .ndo_stop = seeq8005_close,
149 .ndo_start_xmit = seeq8005_send_packet,
150 .ndo_tx_timeout = seeq8005_timeout,
151 .ndo_set_multicast_list = set_multicast_list,
152 .ndo_change_mtu = eth_change_mtu,
153 .ndo_set_mac_address = eth_mac_addr,
154 .ndo_validate_addr = eth_validate_addr,
155};
156
146/* This is the real probe routine. Linux has a history of friendly device 157/* This is the real probe routine. Linux has a history of friendly device
147 probes on the ISA bus. A good device probes avoids doing writes, and 158 probes on the ISA bus. A good device probes avoids doing writes, and
148 verifies that the correct device exists and functions. */ 159 verifies that the correct device exists and functions. */
@@ -332,12 +343,8 @@ static int __init seeq8005_probe1(struct net_device *dev, int ioaddr)
332 } 343 }
333 } 344 }
334#endif 345#endif
335 dev->open = seeq8005_open; 346 dev->netdev_ops = &seeq8005_netdev_ops;
336 dev->stop = seeq8005_close;
337 dev->hard_start_xmit = seeq8005_send_packet;
338 dev->tx_timeout = seeq8005_timeout;
339 dev->watchdog_timeo = HZ/20; 347 dev->watchdog_timeo = HZ/20;
340 dev->set_multicast_list = set_multicast_list;
341 dev->flags &= ~IFF_MULTICAST; 348 dev->flags &= ~IFF_MULTICAST;
342 349
343 return 0; 350 return 0;
diff --git a/drivers/net/smc-ultra.c b/drivers/net/smc-ultra.c
index 2033fee3143a..0291ea098a06 100644
--- a/drivers/net/smc-ultra.c
+++ b/drivers/net/smc-ultra.c
@@ -142,9 +142,6 @@ static int __init do_ultra_probe(struct net_device *dev)
142 int base_addr = dev->base_addr; 142 int base_addr = dev->base_addr;
143 int irq = dev->irq; 143 int irq = dev->irq;
144 144
145#ifdef CONFIG_NET_POLL_CONTROLLER
146 dev->poll_controller = &ultra_poll;
147#endif
148 if (base_addr > 0x1ff) /* Check a single specified location. */ 145 if (base_addr > 0x1ff) /* Check a single specified location. */
149 return ultra_probe1(dev, base_addr); 146 return ultra_probe1(dev, base_addr);
150 else if (base_addr != 0) /* Don't probe at all. */ 147 else if (base_addr != 0) /* Don't probe at all. */
@@ -199,7 +196,7 @@ static const struct net_device_ops ultra_netdev_ops = {
199 .ndo_set_mac_address = eth_mac_addr, 196 .ndo_set_mac_address = eth_mac_addr,
200 .ndo_change_mtu = eth_change_mtu, 197 .ndo_change_mtu = eth_change_mtu,
201#ifdef CONFIG_NET_POLL_CONTROLLER 198#ifdef CONFIG_NET_POLL_CONTROLLER
202 .ndo_poll_controller = ei_poll, 199 .ndo_poll_controller = ultra_poll,
203#endif 200#endif
204}; 201};
205 202
diff --git a/drivers/net/smc-ultra32.c b/drivers/net/smc-ultra32.c
index cb6c097a2e0a..7a554adc70fb 100644
--- a/drivers/net/smc-ultra32.c
+++ b/drivers/net/smc-ultra32.c
@@ -153,6 +153,22 @@ out:
153 return ERR_PTR(err); 153 return ERR_PTR(err);
154} 154}
155 155
156
157static const struct net_device_ops ultra32_netdev_ops = {
158 .ndo_open = ultra32_open,
159 .ndo_stop = ultra32_close,
160 .ndo_start_xmit = ei_start_xmit,
161 .ndo_tx_timeout = ei_tx_timeout,
162 .ndo_get_stats = ei_get_stats,
163 .ndo_set_multicast_list = ei_set_multicast_list,
164 .ndo_validate_addr = eth_validate_addr,
165 .ndo_set_mac_address = eth_mac_addr,
166 .ndo_change_mtu = eth_change_mtu,
167#ifdef CONFIG_NET_POLL_CONTROLLER
168 .ndo_poll_controller = ei_poll,
169#endif
170};
171
156static int __init ultra32_probe1(struct net_device *dev, int ioaddr) 172static int __init ultra32_probe1(struct net_device *dev, int ioaddr)
157{ 173{
158 int i, edge, media, retval; 174 int i, edge, media, retval;
@@ -273,11 +289,8 @@ static int __init ultra32_probe1(struct net_device *dev, int ioaddr)
273 ei_status.block_output = &ultra32_block_output; 289 ei_status.block_output = &ultra32_block_output;
274 ei_status.get_8390_hdr = &ultra32_get_8390_hdr; 290 ei_status.get_8390_hdr = &ultra32_get_8390_hdr;
275 ei_status.reset_8390 = &ultra32_reset_8390; 291 ei_status.reset_8390 = &ultra32_reset_8390;
276 dev->open = &ultra32_open; 292
277 dev->stop = &ultra32_close; 293 dev->netdev_ops = &ultra32_netdev_ops;
278#ifdef CONFIG_NET_POLL_CONTROLLER
279 dev->poll_controller = ei_poll;
280#endif
281 NS8390_init(dev, 0); 294 NS8390_init(dev, 0);
282 295
283 return 0; 296 return 0;
diff --git a/drivers/net/smc9194.c b/drivers/net/smc9194.c
index 18d653bbd4e0..9a7973a54116 100644
--- a/drivers/net/smc9194.c
+++ b/drivers/net/smc9194.c
@@ -831,6 +831,17 @@ static int __init smc_findirq(int ioaddr)
831#endif 831#endif
832} 832}
833 833
834static const struct net_device_ops smc_netdev_ops = {
835 .ndo_open = smc_open,
836 .ndo_stop = smc_close,
837 .ndo_start_xmit = smc_wait_to_send_packet,
838 .ndo_tx_timeout = smc_timeout,
839 .ndo_set_multicast_list = smc_set_multicast_list,
840 .ndo_change_mtu = eth_change_mtu,
841 .ndo_set_mac_address = eth_mac_addr,
842 .ndo_validate_addr = eth_validate_addr,
843};
844
834/*---------------------------------------------------------------------- 845/*----------------------------------------------------------------------
835 . Function: smc_probe( int ioaddr ) 846 . Function: smc_probe( int ioaddr )
836 . 847 .
@@ -1044,12 +1055,8 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
1044 goto err_out; 1055 goto err_out;
1045 } 1056 }
1046 1057
1047 dev->open = smc_open; 1058 dev->netdev_ops = &smc_netdev_ops;
1048 dev->stop = smc_close;
1049 dev->hard_start_xmit = smc_wait_to_send_packet;
1050 dev->tx_timeout = smc_timeout;
1051 dev->watchdog_timeo = HZ/20; 1059 dev->watchdog_timeo = HZ/20;
1052 dev->set_multicast_list = smc_set_multicast_list;
1053 1060
1054 return 0; 1061 return 0;
1055 1062
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index ad3cbc91a8fa..af8f60ca0f57 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -1680,6 +1680,7 @@ static int smsc911x_eeprom_write_location(struct smsc911x_data *pdata,
1680 u8 address, u8 data) 1680 u8 address, u8 data)
1681{ 1681{
1682 u32 op = E2P_CMD_EPC_CMD_ERASE_ | address; 1682 u32 op = E2P_CMD_EPC_CMD_ERASE_ | address;
1683 u32 temp;
1683 int ret; 1684 int ret;
1684 1685
1685 SMSC_TRACE(DRV, "address 0x%x, data 0x%x", address, data); 1686 SMSC_TRACE(DRV, "address 0x%x, data 0x%x", address, data);
@@ -1688,6 +1689,10 @@ static int smsc911x_eeprom_write_location(struct smsc911x_data *pdata,
1688 if (!ret) { 1689 if (!ret) {
1689 op = E2P_CMD_EPC_CMD_WRITE_ | address; 1690 op = E2P_CMD_EPC_CMD_WRITE_ | address;
1690 smsc911x_reg_write(pdata, E2P_DATA, (u32)data); 1691 smsc911x_reg_write(pdata, E2P_DATA, (u32)data);
1692
1693 /* Workaround for hardware read-after-write restriction */
1694 temp = smsc911x_reg_read(pdata, BYTE_TEST);
1695
1691 ret = smsc911x_eeprom_send_cmd(pdata, op); 1696 ret = smsc911x_eeprom_send_cmd(pdata, op);
1692 } 1697 }
1693 1698
diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
index 193308118f95..456f8bff40be 100644
--- a/drivers/net/tokenring/madgemc.c
+++ b/drivers/net/tokenring/madgemc.c
@@ -142,7 +142,7 @@ static void madgemc_sifwritew(struct net_device *dev, unsigned short val, unsign
142 return; 142 return;
143} 143}
144 144
145 145static struct net_device_ops madgemc_netdev_ops __read_mostly;
146 146
147static int __devinit madgemc_probe(struct device *device) 147static int __devinit madgemc_probe(struct device *device)
148{ 148{
@@ -168,7 +168,7 @@ static int __devinit madgemc_probe(struct device *device)
168 goto getout; 168 goto getout;
169 } 169 }
170 170
171 dev->dma = 0; 171 dev->netdev_ops = &madgemc_netdev_ops;
172 172
173 card = kmalloc(sizeof(struct card_info), GFP_KERNEL); 173 card = kmalloc(sizeof(struct card_info), GFP_KERNEL);
174 if (card==NULL) { 174 if (card==NULL) {
@@ -348,9 +348,6 @@ static int __devinit madgemc_probe(struct device *device)
348 348
349 memcpy(tp->ProductID, "Madge MCA 16/4 ", PROD_ID_SIZE + 1); 349 memcpy(tp->ProductID, "Madge MCA 16/4 ", PROD_ID_SIZE + 1);
350 350
351 dev->open = madgemc_open;
352 dev->stop = madgemc_close;
353
354 tp->tmspriv = card; 351 tp->tmspriv = card;
355 dev_set_drvdata(device, dev); 352 dev_set_drvdata(device, dev);
356 353
@@ -758,6 +755,10 @@ static struct mca_driver madgemc_driver = {
758 755
759static int __init madgemc_init (void) 756static int __init madgemc_init (void)
760{ 757{
758 madgemc_netdev_ops = tms380tr_netdev_ops;
759 madgemc_netdev_ops.ndo_open = madgemc_open;
760 madgemc_netdev_ops.ndo_stop = madgemc_close;
761
761 return mca_register_driver (&madgemc_driver); 762 return mca_register_driver (&madgemc_driver);
762} 763}
763 764
diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
index b8c955f6d31a..16e8783ee9cd 100644
--- a/drivers/net/tokenring/proteon.c
+++ b/drivers/net/tokenring/proteon.c
@@ -116,6 +116,8 @@ nodev:
116 return -ENODEV; 116 return -ENODEV;
117} 117}
118 118
119static struct net_device_ops proteon_netdev_ops __read_mostly;
120
119static int __init setup_card(struct net_device *dev, struct device *pdev) 121static int __init setup_card(struct net_device *dev, struct device *pdev)
120{ 122{
121 struct net_local *tp; 123 struct net_local *tp;
@@ -167,8 +169,7 @@ static int __init setup_card(struct net_device *dev, struct device *pdev)
167 169
168 tp->tmspriv = NULL; 170 tp->tmspriv = NULL;
169 171
170 dev->open = proteon_open; 172 dev->netdev_ops = &proteon_netdev_ops;
171 dev->stop = tms380tr_close;
172 173
173 if (dev->irq == 0) 174 if (dev->irq == 0)
174 { 175 {
@@ -352,6 +353,10 @@ static int __init proteon_init(void)
352 struct platform_device *pdev; 353 struct platform_device *pdev;
353 int i, num = 0, err = 0; 354 int i, num = 0, err = 0;
354 355
356 proteon_netdev_ops = tms380tr_netdev_ops;
357 proteon_netdev_ops.ndo_open = proteon_open;
358 proteon_netdev_ops.ndo_stop = tms380tr_close;
359
355 err = platform_driver_register(&proteon_driver); 360 err = platform_driver_register(&proteon_driver);
356 if (err) 361 if (err)
357 return err; 362 return err;
diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
index c0f58f08782c..46db5c5395b2 100644
--- a/drivers/net/tokenring/skisa.c
+++ b/drivers/net/tokenring/skisa.c
@@ -133,6 +133,8 @@ static int __init sk_isa_probe1(struct net_device *dev, int ioaddr)
133 return 0; 133 return 0;
134} 134}
135 135
136static struct net_device_ops sk_isa_netdev_ops __read_mostly;
137
136static int __init setup_card(struct net_device *dev, struct device *pdev) 138static int __init setup_card(struct net_device *dev, struct device *pdev)
137{ 139{
138 struct net_local *tp; 140 struct net_local *tp;
@@ -184,8 +186,7 @@ static int __init setup_card(struct net_device *dev, struct device *pdev)
184 186
185 tp->tmspriv = NULL; 187 tp->tmspriv = NULL;
186 188
187 dev->open = sk_isa_open; 189 dev->netdev_ops = &sk_isa_netdev_ops;
188 dev->stop = tms380tr_close;
189 190
190 if (dev->irq == 0) 191 if (dev->irq == 0)
191 { 192 {
@@ -362,6 +363,10 @@ static int __init sk_isa_init(void)
362 struct platform_device *pdev; 363 struct platform_device *pdev;
363 int i, num = 0, err = 0; 364 int i, num = 0, err = 0;
364 365
366 sk_isa_netdev_ops = tms380tr_netdev_ops;
367 sk_isa_netdev_ops.ndo_open = sk_isa_open;
368 sk_isa_netdev_ops.ndo_stop = tms380tr_close;
369
365 err = platform_driver_register(&sk_isa_driver); 370 err = platform_driver_register(&sk_isa_driver);
366 if (err) 371 if (err)
367 return err; 372 return err;
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c
index 9d7db2c8d661..a91d9c55d78e 100644
--- a/drivers/net/tokenring/smctr.c
+++ b/drivers/net/tokenring/smctr.c
@@ -124,7 +124,6 @@ static unsigned int smctr_get_num_rx_bdbs(struct net_device *dev);
124static int smctr_get_physical_drop_number(struct net_device *dev); 124static int smctr_get_physical_drop_number(struct net_device *dev);
125static __u8 *smctr_get_rx_pointer(struct net_device *dev, short queue); 125static __u8 *smctr_get_rx_pointer(struct net_device *dev, short queue);
126static int smctr_get_station_id(struct net_device *dev); 126static int smctr_get_station_id(struct net_device *dev);
127static struct net_device_stats *smctr_get_stats(struct net_device *dev);
128static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue, 127static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue,
129 __u16 bytes_count); 128 __u16 bytes_count);
130static int smctr_get_upstream_neighbor_addr(struct net_device *dev); 129static int smctr_get_upstream_neighbor_addr(struct net_device *dev);
@@ -3633,6 +3632,14 @@ out:
3633 return ERR_PTR(err); 3632 return ERR_PTR(err);
3634} 3633}
3635 3634
3635static const struct net_device_ops smctr_netdev_ops = {
3636 .ndo_open = smctr_open,
3637 .ndo_stop = smctr_close,
3638 .ndo_start_xmit = smctr_send_packet,
3639 .ndo_tx_timeout = smctr_timeout,
3640 .ndo_get_stats = smctr_get_stats,
3641 .ndo_set_multicast_list = smctr_set_multicast_list,
3642};
3636 3643
3637static int __init smctr_probe1(struct net_device *dev, int ioaddr) 3644static int __init smctr_probe1(struct net_device *dev, int ioaddr)
3638{ 3645{
@@ -3683,13 +3690,8 @@ static int __init smctr_probe1(struct net_device *dev, int ioaddr)
3683 (unsigned int)dev->base_addr, 3690 (unsigned int)dev->base_addr,
3684 dev->irq, tp->rom_base, tp->ram_base); 3691 dev->irq, tp->rom_base, tp->ram_base);
3685 3692
3686 dev->open = smctr_open; 3693 dev->netdev_ops = &smctr_netdev_ops;
3687 dev->stop = smctr_close;
3688 dev->hard_start_xmit = smctr_send_packet;
3689 dev->tx_timeout = smctr_timeout;
3690 dev->watchdog_timeo = HZ; 3694 dev->watchdog_timeo = HZ;
3691 dev->get_stats = smctr_get_stats;
3692 dev->set_multicast_list = &smctr_set_multicast_list;
3693 return (0); 3695 return (0);
3694 3696
3695out: 3697out:
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index a110326dce6f..86a479f61c0c 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -2009,6 +2009,9 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth)
2009 /* Disable Rx and Tx */ 2009 /* Disable Rx and Tx */
2010 clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); 2010 clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
2011 2011
2012 phy_disconnect(ugeth->phydev);
2013 ugeth->phydev = NULL;
2014
2012 ucc_geth_memclean(ugeth); 2015 ucc_geth_memclean(ugeth);
2013} 2016}
2014 2017
@@ -3345,6 +3348,14 @@ static int ucc_geth_open(struct net_device *dev)
3345 return -EINVAL; 3348 return -EINVAL;
3346 } 3349 }
3347 3350
3351 err = init_phy(dev);
3352 if (err) {
3353 if (netif_msg_ifup(ugeth))
3354 ugeth_err("%s: Cannot initialize PHY, aborting.",
3355 dev->name);
3356 return err;
3357 }
3358
3348 err = ucc_struct_init(ugeth); 3359 err = ucc_struct_init(ugeth);
3349 if (err) { 3360 if (err) {
3350 if (netif_msg_ifup(ugeth)) 3361 if (netif_msg_ifup(ugeth))
@@ -3381,13 +3392,6 @@ static int ucc_geth_open(struct net_device *dev)
3381 &ugeth->ug_regs->macstnaddr1, 3392 &ugeth->ug_regs->macstnaddr1,
3382 &ugeth->ug_regs->macstnaddr2); 3393 &ugeth->ug_regs->macstnaddr2);
3383 3394
3384 err = init_phy(dev);
3385 if (err) {
3386 if (netif_msg_ifup(ugeth))
3387 ugeth_err("%s: Cannot initialize PHY, aborting.", dev->name);
3388 goto out_err;
3389 }
3390
3391 phy_start(ugeth->phydev); 3395 phy_start(ugeth->phydev);
3392 3396
3393 err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); 3397 err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
@@ -3430,9 +3434,6 @@ static int ucc_geth_close(struct net_device *dev)
3430 3434
3431 free_irq(ugeth->ug_info->uf_info.irq, ugeth->dev); 3435 free_irq(ugeth->ug_info->uf_info.irq, ugeth->dev);
3432 3436
3433 phy_disconnect(ugeth->phydev);
3434 ugeth->phydev = NULL;
3435
3436 netif_stop_queue(dev); 3437 netif_stop_queue(dev);
3437 3438
3438 return 0; 3439 return 0;
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index 6a07ba9371db..1d637f407a0c 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -714,19 +714,19 @@ static int sdla_transmit(struct sk_buff *skb, struct net_device *dev)
714 switch (ret) 714 switch (ret)
715 { 715 {
716 case SDLA_RET_OK: 716 case SDLA_RET_OK:
717 flp->stats.tx_packets++; 717 dev->stats.tx_packets++;
718 ret = DLCI_RET_OK; 718 ret = DLCI_RET_OK;
719 break; 719 break;
720 720
721 case SDLA_RET_CIR_OVERFLOW: 721 case SDLA_RET_CIR_OVERFLOW:
722 case SDLA_RET_BUF_OVERSIZE: 722 case SDLA_RET_BUF_OVERSIZE:
723 case SDLA_RET_NO_BUFS: 723 case SDLA_RET_NO_BUFS:
724 flp->stats.tx_dropped++; 724 dev->stats.tx_dropped++;
725 ret = DLCI_RET_DROP; 725 ret = DLCI_RET_DROP;
726 break; 726 break;
727 727
728 default: 728 default:
729 flp->stats.tx_errors++; 729 dev->stats.tx_errors++;
730 ret = DLCI_RET_ERR; 730 ret = DLCI_RET_ERR;
731 break; 731 break;
732 } 732 }
@@ -807,7 +807,7 @@ static void sdla_receive(struct net_device *dev)
807 if (i == CONFIG_DLCI_MAX) 807 if (i == CONFIG_DLCI_MAX)
808 { 808 {
809 printk(KERN_NOTICE "%s: Received packet from invalid DLCI %i, ignoring.", dev->name, dlci); 809 printk(KERN_NOTICE "%s: Received packet from invalid DLCI %i, ignoring.", dev->name, dlci);
810 flp->stats.rx_errors++; 810 dev->stats.rx_errors++;
811 success = 0; 811 success = 0;
812 } 812 }
813 } 813 }
@@ -819,7 +819,7 @@ static void sdla_receive(struct net_device *dev)
819 if (skb == NULL) 819 if (skb == NULL)
820 { 820 {
821 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); 821 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
822 flp->stats.rx_dropped++; 822 dev->stats.rx_dropped++;
823 success = 0; 823 success = 0;
824 } 824 }
825 else 825 else
@@ -859,7 +859,7 @@ static void sdla_receive(struct net_device *dev)
859 859
860 if (success) 860 if (success)
861 { 861 {
862 flp->stats.rx_packets++; 862 dev->stats.rx_packets++;
863 dlp = netdev_priv(master); 863 dlp = netdev_priv(master);
864 (*dlp->receive)(skb, master); 864 (*dlp->receive)(skb, master);
865 } 865 }
@@ -1590,13 +1590,14 @@ fail:
1590 return err; 1590 return err;
1591} 1591}
1592 1592
1593static struct net_device_stats *sdla_stats(struct net_device *dev) 1593static const struct net_device_ops sdla_netdev_ops = {
1594{ 1594 .ndo_open = sdla_open,
1595 struct frad_local *flp; 1595 .ndo_stop = sdla_close,
1596 flp = netdev_priv(dev); 1596 .ndo_do_ioctl = sdla_ioctl,
1597 1597 .ndo_set_config = sdla_set_config,
1598 return(&flp->stats); 1598 .ndo_start_xmit = sdla_transmit,
1599} 1599 .ndo_change_mtu = sdla_change_mtu,
1600};
1600 1601
1601static void setup_sdla(struct net_device *dev) 1602static void setup_sdla(struct net_device *dev)
1602{ 1603{
@@ -1604,20 +1605,13 @@ static void setup_sdla(struct net_device *dev)
1604 1605
1605 netdev_boot_setup_check(dev); 1606 netdev_boot_setup_check(dev);
1606 1607
1608 dev->netdev_ops = &sdla_netdev_ops;
1607 dev->flags = 0; 1609 dev->flags = 0;
1608 dev->type = 0xFFFF; 1610 dev->type = 0xFFFF;
1609 dev->hard_header_len = 0; 1611 dev->hard_header_len = 0;
1610 dev->addr_len = 0; 1612 dev->addr_len = 0;
1611 dev->mtu = SDLA_MAX_MTU; 1613 dev->mtu = SDLA_MAX_MTU;
1612 1614
1613 dev->open = sdla_open;
1614 dev->stop = sdla_close;
1615 dev->do_ioctl = sdla_ioctl;
1616 dev->set_config = sdla_set_config;
1617 dev->get_stats = sdla_stats;
1618 dev->hard_start_xmit = sdla_transmit;
1619 dev->change_mtu = sdla_change_mtu;
1620
1621 flp->activate = sdla_activate; 1615 flp->activate = sdla_activate;
1622 flp->deactivate = sdla_deactivate; 1616 flp->deactivate = sdla_deactivate;
1623 flp->assoc = sdla_assoc; 1617 flp->assoc = sdla_assoc;
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 612fffe100a6..8a0823588c51 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -485,6 +485,7 @@ config MWL8K
485source "drivers/net/wireless/p54/Kconfig" 485source "drivers/net/wireless/p54/Kconfig"
486source "drivers/net/wireless/ath5k/Kconfig" 486source "drivers/net/wireless/ath5k/Kconfig"
487source "drivers/net/wireless/ath9k/Kconfig" 487source "drivers/net/wireless/ath9k/Kconfig"
488source "drivers/net/wireless/ar9170/Kconfig"
488source "drivers/net/wireless/ipw2x00/Kconfig" 489source "drivers/net/wireless/ipw2x00/Kconfig"
489source "drivers/net/wireless/iwlwifi/Kconfig" 490source "drivers/net/wireless/iwlwifi/Kconfig"
490source "drivers/net/wireless/hostap/Kconfig" 491source "drivers/net/wireless/hostap/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index d780487c420f..50e7fba7f0ea 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -57,5 +57,6 @@ obj-$(CONFIG_P54_COMMON) += p54/
57 57
58obj-$(CONFIG_ATH5K) += ath5k/ 58obj-$(CONFIG_ATH5K) += ath5k/
59obj-$(CONFIG_ATH9K) += ath9k/ 59obj-$(CONFIG_ATH9K) += ath9k/
60obj-$(CONFIG_AR9170_USB) += ar9170/
60 61
61obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o 62obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o
diff --git a/drivers/net/wireless/ar9170/Kconfig b/drivers/net/wireless/ar9170/Kconfig
new file mode 100644
index 000000000000..de4281fda129
--- /dev/null
+++ b/drivers/net/wireless/ar9170/Kconfig
@@ -0,0 +1,17 @@
1config AR9170_USB
2 tristate "Atheros AR9170 802.11n USB support"
3 depends on USB && MAC80211 && WLAN_80211 && EXPERIMENTAL
4 select FW_LOADER
5 help
6 This is a driver for the Atheros "otus" 802.11n USB devices.
7
8 These devices require additional firmware (2 files).
9 For now, these files can be downloaded from here:
10 http://wireless.kernel.org/en/users/Drivers/ar9170
11
12 If you choose to build a module, it'll be called ar9170usb.
13
14config AR9170_LEDS
15 bool
16 depends on AR9170_USB && MAC80211_LEDS && (LEDS_CLASS = y || LEDS_CLASS = AR9170_USB)
17 default y
diff --git a/drivers/net/wireless/ar9170/Makefile b/drivers/net/wireless/ar9170/Makefile
new file mode 100644
index 000000000000..8d91c7ee3215
--- /dev/null
+++ b/drivers/net/wireless/ar9170/Makefile
@@ -0,0 +1,3 @@
1ar9170usb-objs := usb.o main.o cmd.o mac.o phy.o led.o
2
3obj-$(CONFIG_AR9170_USB) += ar9170usb.o
diff --git a/drivers/net/wireless/ar9170/ar9170.h b/drivers/net/wireless/ar9170/ar9170.h
new file mode 100644
index 000000000000..f4fb2e94aea0
--- /dev/null
+++ b/drivers/net/wireless/ar9170/ar9170.h
@@ -0,0 +1,209 @@
1/*
2 * Atheros AR9170 driver
3 *
4 * Driver specific definitions
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38#ifndef __AR9170_H
39#define __AR9170_H
40
41#include <linux/completion.h>
42#include <linux/spinlock.h>
43#include <net/wireless.h>
44#include <net/mac80211.h>
45#ifdef CONFIG_AR9170_LEDS
46#include <linux/leds.h>
47#endif /* CONFIG_AR9170_LEDS */
48#include "eeprom.h"
49#include "hw.h"
50
51#define PAYLOAD_MAX (AR9170_MAX_CMD_LEN/4 - 1)
52
53enum ar9170_bw {
54 AR9170_BW_20,
55 AR9170_BW_40_BELOW,
56 AR9170_BW_40_ABOVE,
57
58 __AR9170_NUM_BW,
59};
60
61enum ar9170_rf_init_mode {
62 AR9170_RFI_NONE,
63 AR9170_RFI_WARM,
64 AR9170_RFI_COLD,
65};
66
67#define AR9170_MAX_RX_BUFFER_SIZE 8192
68
69#ifdef CONFIG_AR9170_LEDS
70struct ar9170;
71
72struct ar9170_led {
73 struct ar9170 *ar;
74 struct led_classdev l;
75 char name[32];
76 unsigned int toggled;
77 bool registered;
78};
79
80#endif /* CONFIG_AR9170_LEDS */
81
82enum ar9170_device_state {
83 AR9170_UNKNOWN_STATE,
84 AR9170_STOPPED,
85 AR9170_IDLE,
86 AR9170_STARTED,
87 AR9170_ASSOCIATED,
88};
89
90struct ar9170 {
91 struct ieee80211_hw *hw;
92 struct mutex mutex;
93 enum ar9170_device_state state;
94
95 int (*open)(struct ar9170 *);
96 void (*stop)(struct ar9170 *);
97 int (*tx)(struct ar9170 *, struct sk_buff *, bool, unsigned int);
98 int (*exec_cmd)(struct ar9170 *, enum ar9170_cmd, u32 ,
99 void *, u32 , void *);
100 void (*callback_cmd)(struct ar9170 *, u32 , void *);
101
102 /* interface mode settings */
103 struct ieee80211_vif *vif;
104 u8 mac_addr[ETH_ALEN];
105 u8 bssid[ETH_ALEN];
106
107 /* beaconing */
108 struct sk_buff *beacon;
109 struct work_struct beacon_work;
110
111 /* cryptographic engine */
112 u64 usedkeys;
113 bool rx_software_decryption;
114 bool disable_offload;
115
116 /* filter settings */
117 struct work_struct filter_config_work;
118 u64 cur_mc_hash, want_mc_hash;
119 u32 cur_filter, want_filter;
120 unsigned int filter_changed;
121 bool sniffer_enabled;
122
123 /* PHY */
124 struct ieee80211_channel *channel;
125 int noise[4];
126
127 /* power calibration data */
128 u8 power_5G_leg[4];
129 u8 power_2G_cck[4];
130 u8 power_2G_ofdm[4];
131 u8 power_5G_ht20[8];
132 u8 power_5G_ht40[8];
133 u8 power_2G_ht20[8];
134 u8 power_2G_ht40[8];
135
136#ifdef CONFIG_AR9170_LEDS
137 struct delayed_work led_work;
138 struct ar9170_led leds[AR9170_NUM_LEDS];
139#endif /* CONFIG_AR9170_LEDS */
140
141 /* qos queue settings */
142 spinlock_t tx_stats_lock;
143 struct ieee80211_tx_queue_stats tx_stats[5];
144 struct ieee80211_tx_queue_params edcf[5];
145
146 spinlock_t cmdlock;
147 __le32 cmdbuf[PAYLOAD_MAX + 1];
148
149 /* MAC statistics */
150 struct ieee80211_low_level_stats stats;
151
152 /* EEPROM */
153 struct ar9170_eeprom eeprom;
154
155 /* global tx status for unregistered Stations. */
156 struct sk_buff_head global_tx_status;
157 struct sk_buff_head global_tx_status_waste;
158 struct delayed_work tx_status_janitor;
159};
160
161struct ar9170_sta_info {
162 struct sk_buff_head tx_status[__AR9170_NUM_TXQ];
163};
164
165#define IS_STARTED(a) (a->state >= AR9170_STARTED)
166#define IS_ACCEPTING_CMD(a) (a->state >= AR9170_IDLE)
167
168#define AR9170_FILTER_CHANGED_PROMISC BIT(0)
169#define AR9170_FILTER_CHANGED_MULTICAST BIT(1)
170#define AR9170_FILTER_CHANGED_FRAMEFILTER BIT(2)
171
172/* exported interface */
173void *ar9170_alloc(size_t priv_size);
174int ar9170_register(struct ar9170 *ar, struct device *pdev);
175void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb);
176void ar9170_unregister(struct ar9170 *ar);
177void ar9170_handle_tx_status(struct ar9170 *ar, struct sk_buff *skb,
178 bool update_statistics, u16 tx_status);
179
180/* MAC */
181int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
182int ar9170_init_mac(struct ar9170 *ar);
183int ar9170_set_qos(struct ar9170 *ar);
184int ar9170_update_multicast(struct ar9170 *ar);
185int ar9170_update_frame_filter(struct ar9170 *ar);
186int ar9170_set_operating_mode(struct ar9170 *ar);
187int ar9170_set_beacon_timers(struct ar9170 *ar);
188int ar9170_set_hwretry_limit(struct ar9170 *ar, u32 max_retry);
189int ar9170_update_beacon(struct ar9170 *ar);
190void ar9170_new_beacon(struct work_struct *work);
191int ar9170_upload_key(struct ar9170 *ar, u8 id, const u8 *mac, u8 ktype,
192 u8 keyidx, u8 *keydata, int keylen);
193int ar9170_disable_key(struct ar9170 *ar, u8 id);
194
195/* LEDs */
196#ifdef CONFIG_AR9170_LEDS
197int ar9170_register_leds(struct ar9170 *ar);
198void ar9170_unregister_leds(struct ar9170 *ar);
199#endif /* CONFIG_AR9170_LEDS */
200int ar9170_init_leds(struct ar9170 *ar);
201int ar9170_set_leds_state(struct ar9170 *ar, u32 led_state);
202
203/* PHY / RF */
204int ar9170_init_phy(struct ar9170 *ar, enum ieee80211_band band);
205int ar9170_init_rf(struct ar9170 *ar);
206int ar9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
207 enum ar9170_rf_init_mode rfi, enum ar9170_bw bw);
208
209#endif /* __AR9170_H */
diff --git a/drivers/net/wireless/ar9170/cmd.c b/drivers/net/wireless/ar9170/cmd.c
new file mode 100644
index 000000000000..f57a6200167b
--- /dev/null
+++ b/drivers/net/wireless/ar9170/cmd.c
@@ -0,0 +1,129 @@
1/*
2 * Atheros AR9170 driver
3 *
4 * Basic HW register/memory/command access functions
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38
39#include "ar9170.h"
40#include "cmd.h"
41
42int ar9170_write_mem(struct ar9170 *ar, const __le32 *data, size_t len)
43{
44 int err;
45
46 if (unlikely(!IS_ACCEPTING_CMD(ar)))
47 return 0;
48
49 err = ar->exec_cmd(ar, AR9170_CMD_WMEM, len, (u8 *) data, 0, NULL);
50 if (err)
51 printk(KERN_DEBUG "%s: writing memory failed\n",
52 wiphy_name(ar->hw->wiphy));
53 return err;
54}
55
56int ar9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val)
57{
58 __le32 buf[2] = {
59 cpu_to_le32(reg),
60 cpu_to_le32(val),
61 };
62 int err;
63
64 if (unlikely(!IS_ACCEPTING_CMD(ar)))
65 return 0;
66
67 err = ar->exec_cmd(ar, AR9170_CMD_WREG, sizeof(buf),
68 (u8 *) buf, 0, NULL);
69 if (err)
70 printk(KERN_DEBUG "%s: writing reg %#x (val %#x) failed\n",
71 wiphy_name(ar->hw->wiphy), reg, val);
72 return err;
73}
74
75static int ar9170_read_mreg(struct ar9170 *ar, int nregs,
76 const u32 *regs, u32 *out)
77{
78 int i, err;
79 __le32 *offs, *res;
80
81 if (unlikely(!IS_ACCEPTING_CMD(ar)))
82 return 0;
83
84 /* abuse "out" for the register offsets, must be same length */
85 offs = (__le32 *)out;
86 for (i = 0; i < nregs; i++)
87 offs[i] = cpu_to_le32(regs[i]);
88
89 /* also use the same buffer for the input */
90 res = (__le32 *)out;
91
92 err = ar->exec_cmd(ar, AR9170_CMD_RREG,
93 4 * nregs, (u8 *)offs,
94 4 * nregs, (u8 *)res);
95 if (err)
96 return err;
97
98 /* convert result to cpu endian */
99 for (i = 0; i < nregs; i++)
100 out[i] = le32_to_cpu(res[i]);
101
102 return 0;
103}
104
105int ar9170_read_reg(struct ar9170 *ar, u32 reg, u32 *val)
106{
107 return ar9170_read_mreg(ar, 1, &reg, val);
108}
109
110int ar9170_echo_test(struct ar9170 *ar, u32 v)
111{
112 __le32 echobuf = cpu_to_le32(v);
113 __le32 echores;
114 int err;
115
116 if (unlikely(!IS_ACCEPTING_CMD(ar)))
117 return -ENODEV;
118
119 err = ar->exec_cmd(ar, AR9170_CMD_ECHO,
120 4, (u8 *)&echobuf,
121 4, (u8 *)&echores);
122 if (err)
123 return err;
124
125 if (echobuf != echores)
126 return -EINVAL;
127
128 return 0;
129}
diff --git a/drivers/net/wireless/ar9170/cmd.h b/drivers/net/wireless/ar9170/cmd.h
new file mode 100644
index 000000000000..a4f0e50e52b4
--- /dev/null
+++ b/drivers/net/wireless/ar9170/cmd.h
@@ -0,0 +1,91 @@
1/*
2 * Atheros AR9170 driver
3 *
4 * Basic HW register/memory/command access functions
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38#ifndef __CMD_H
39#define __CMD_H
40
41#include "ar9170.h"
42
43/* basic HW access */
44int ar9170_write_mem(struct ar9170 *ar, const __le32 *data, size_t len);
45int ar9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val);
46int ar9170_read_reg(struct ar9170 *ar, u32 reg, u32 *val);
47int ar9170_echo_test(struct ar9170 *ar, u32 v);
48
49/*
50 * Macros to facilitate writing multiple registers in a single
51 * write-combining USB command. Note that when the first group
52 * fails the whole thing will fail without any others attempted,
53 * but you won't know which write in the group failed.
54 */
55#define ar9170_regwrite_begin(ar) \
56do { \
57 int __nreg = 0, __err = 0; \
58 struct ar9170 *__ar = ar;
59
60#define ar9170_regwrite(r, v) do { \
61 __ar->cmdbuf[2 * __nreg + 1] = cpu_to_le32(r); \
62 __ar->cmdbuf[2 * __nreg + 2] = cpu_to_le32(v); \
63 __nreg++; \
64 if ((__nreg >= PAYLOAD_MAX/2)) { \
65 if (IS_ACCEPTING_CMD(__ar)) \
66 __err = ar->exec_cmd(__ar, AR9170_CMD_WREG, \
67 8 * __nreg, \
68 (u8 *) &__ar->cmdbuf[1], \
69 0, NULL); \
70 __nreg = 0; \
71 if (__err) \
72 goto __regwrite_out; \
73 } \
74} while (0)
75
76#define ar9170_regwrite_finish() \
77__regwrite_out : \
78 if (__nreg) { \
79 if (IS_ACCEPTING_CMD(__ar)) \
80 __err = ar->exec_cmd(__ar, AR9170_CMD_WREG, \
81 8 * __nreg, \
82 (u8 *) &__ar->cmdbuf[1], \
83 0, NULL); \
84 __nreg = 0; \
85 }
86
87#define ar9170_regwrite_result() \
88 __err; \
89} while (0);
90
91#endif /* __CMD_H */
diff --git a/drivers/net/wireless/ar9170/eeprom.h b/drivers/net/wireless/ar9170/eeprom.h
new file mode 100644
index 000000000000..d2c8cc83f1dd
--- /dev/null
+++ b/drivers/net/wireless/ar9170/eeprom.h
@@ -0,0 +1,179 @@
1/*
2 * Atheros AR9170 driver
3 *
4 * EEPROM layout
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38#ifndef __AR9170_EEPROM_H
39#define __AR9170_EEPROM_H
40
41#define AR5416_MAX_CHAINS 2
42#define AR5416_MODAL_SPURS 5
43
44struct ar9170_eeprom_modal {
45 __le32 antCtrlChain[AR5416_MAX_CHAINS];
46 __le32 antCtrlCommon;
47 s8 antennaGainCh[AR5416_MAX_CHAINS];
48 u8 switchSettling;
49 u8 txRxAttenCh[AR5416_MAX_CHAINS];
50 u8 rxTxMarginCh[AR5416_MAX_CHAINS];
51 s8 adcDesiredSize;
52 s8 pgaDesiredSize;
53 u8 xlnaGainCh[AR5416_MAX_CHAINS];
54 u8 txEndToXpaOff;
55 u8 txEndToRxOn;
56 u8 txFrameToXpaOn;
57 u8 thresh62;
58 s8 noiseFloorThreshCh[AR5416_MAX_CHAINS];
59 u8 xpdGain;
60 u8 xpd;
61 s8 iqCalICh[AR5416_MAX_CHAINS];
62 s8 iqCalQCh[AR5416_MAX_CHAINS];
63 u8 pdGainOverlap;
64 u8 ob;
65 u8 db;
66 u8 xpaBiasLvl;
67 u8 pwrDecreaseFor2Chain;
68 u8 pwrDecreaseFor3Chain;
69 u8 txFrameToDataStart;
70 u8 txFrameToPaOn;
71 u8 ht40PowerIncForPdadc;
72 u8 bswAtten[AR5416_MAX_CHAINS];
73 u8 bswMargin[AR5416_MAX_CHAINS];
74 u8 swSettleHt40;
75 u8 reserved[22];
76 struct spur_channel {
77 __le16 spurChan;
78 u8 spurRangeLow;
79 u8 spurRangeHigh;
80 } __packed spur_channels[AR5416_MODAL_SPURS];
81} __packed;
82
83#define AR5416_NUM_PD_GAINS 4
84#define AR5416_PD_GAIN_ICEPTS 5
85
86struct ar9170_calibration_data_per_freq {
87 u8 pwr_pdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS];
88 u8 vpd_pdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS];
89} __packed;
90
91#define AR5416_NUM_5G_CAL_PIERS 8
92#define AR5416_NUM_2G_CAL_PIERS 4
93
94#define AR5416_NUM_5G_TARGET_PWRS 8
95#define AR5416_NUM_2G_CCK_TARGET_PWRS 3
96#define AR5416_NUM_2G_OFDM_TARGET_PWRS 4
97#define AR5416_MAX_NUM_TGT_PWRS 8
98
99struct ar9170_calibration_target_power_legacy {
100 u8 freq;
101 u8 power[4];
102} __packed;
103
104struct ar9170_calibration_target_power_ht {
105 u8 freq;
106 u8 power[8];
107} __packed;
108
109#define AR5416_NUM_CTLS 24
110
111struct ar9170_calctl_edges {
112 u8 channel;
113#define AR9170_CALCTL_EDGE_FLAGS 0xC0
114 u8 power_flags;
115} __packed;
116
117#define AR5416_NUM_BAND_EDGES 8
118
119struct ar9170_calctl_data {
120 struct ar9170_calctl_edges
121 control_edges[AR5416_MAX_CHAINS][AR5416_NUM_BAND_EDGES];
122} __packed;
123
124
125struct ar9170_eeprom {
126 __le16 length;
127 __le16 checksum;
128 __le16 version;
129 u8 operating_flags;
130#define AR9170_OPFLAG_5GHZ 1
131#define AR9170_OPFLAG_2GHZ 2
132 u8 misc;
133 __le16 reg_domain[2];
134 u8 mac_address[6];
135 u8 rx_mask;
136 u8 tx_mask;
137 __le16 rf_silent;
138 __le16 bluetooth_options;
139 __le16 device_capabilities;
140 __le32 build_number;
141 u8 deviceType;
142 u8 reserved[33];
143
144 u8 customer_data[64];
145
146 struct ar9170_eeprom_modal
147 modal_header[2];
148
149 u8 cal_freq_pier_5G[AR5416_NUM_5G_CAL_PIERS];
150 u8 cal_freq_pier_2G[AR5416_NUM_2G_CAL_PIERS];
151
152 struct ar9170_calibration_data_per_freq
153 cal_pier_data_5G[AR5416_MAX_CHAINS][AR5416_NUM_5G_CAL_PIERS],
154 cal_pier_data_2G[AR5416_MAX_CHAINS][AR5416_NUM_2G_CAL_PIERS];
155
156 /* power calibration data */
157 struct ar9170_calibration_target_power_legacy
158 cal_tgt_pwr_5G[AR5416_NUM_5G_TARGET_PWRS];
159 struct ar9170_calibration_target_power_ht
160 cal_tgt_pwr_5G_ht20[AR5416_NUM_5G_TARGET_PWRS],
161 cal_tgt_pwr_5G_ht40[AR5416_NUM_5G_TARGET_PWRS];
162
163 struct ar9170_calibration_target_power_legacy
164 cal_tgt_pwr_2G_cck[AR5416_NUM_2G_CCK_TARGET_PWRS],
165 cal_tgt_pwr_2G_ofdm[AR5416_NUM_2G_OFDM_TARGET_PWRS];
166 struct ar9170_calibration_target_power_ht
167 cal_tgt_pwr_2G_ht20[AR5416_NUM_2G_OFDM_TARGET_PWRS],
168 cal_tgt_pwr_2G_ht40[AR5416_NUM_2G_OFDM_TARGET_PWRS];
169
170 /* conformance testing limits */
171 u8 ctl_index[AR5416_NUM_CTLS];
172 struct ar9170_calctl_data
173 ctl_data[AR5416_NUM_CTLS];
174
175 u8 pad;
176 __le16 subsystem_id;
177} __packed;
178
179#endif /* __AR9170_EEPROM_H */
diff --git a/drivers/net/wireless/ar9170/hw.h b/drivers/net/wireless/ar9170/hw.h
new file mode 100644
index 000000000000..13091bd9d815
--- /dev/null
+++ b/drivers/net/wireless/ar9170/hw.h
@@ -0,0 +1,417 @@
1/*
2 * Atheros AR9170 driver
3 *
4 * Hardware-specific definitions
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38#ifndef __AR9170_HW_H
39#define __AR9170_HW_H
40
41#define AR9170_MAX_CMD_LEN 64
42
43enum ar9170_cmd {
44 AR9170_CMD_RREG = 0x00,
45 AR9170_CMD_WREG = 0x01,
46 AR9170_CMD_RMEM = 0x02,
47 AR9170_CMD_WMEM = 0x03,
48 AR9170_CMD_BITAND = 0x04,
49 AR9170_CMD_BITOR = 0x05,
50 AR9170_CMD_EKEY = 0x28,
51 AR9170_CMD_DKEY = 0x29,
52 AR9170_CMD_FREQUENCY = 0x30,
53 AR9170_CMD_RF_INIT = 0x31,
54 AR9170_CMD_SYNTH = 0x32,
55 AR9170_CMD_FREQ_START = 0x33,
56 AR9170_CMD_ECHO = 0x80,
57 AR9170_CMD_TALLY = 0x81,
58 AR9170_CMD_TALLY_APD = 0x82,
59 AR9170_CMD_CONFIG = 0x83,
60 AR9170_CMD_RESET = 0x90,
61 AR9170_CMD_DKRESET = 0x91,
62 AR9170_CMD_DKTX_STATUS = 0x92,
63 AR9170_CMD_FDC = 0xA0,
64 AR9170_CMD_WREEPROM = 0xB0,
65 AR9170_CMD_WFLASH = 0xB0,
66 AR9170_CMD_FLASH_ERASE = 0xB1,
67 AR9170_CMD_FLASH_PROG = 0xB2,
68 AR9170_CMD_FLASH_CHKSUM = 0xB3,
69 AR9170_CMD_FLASH_READ = 0xB4,
70 AR9170_CMD_FW_DL_INIT = 0xB5,
71 AR9170_CMD_MEM_WREEPROM = 0xBB,
72};
73
74/* endpoints */
75#define AR9170_EP_TX 1
76#define AR9170_EP_RX 2
77#define AR9170_EP_IRQ 3
78#define AR9170_EP_CMD 4
79
80#define AR9170_EEPROM_START 0x1600
81
82#define AR9170_GPIO_REG_BASE 0x1d0100
83#define AR9170_GPIO_REG_PORT_TYPE AR9170_GPIO_REG_BASE
84#define AR9170_GPIO_REG_DATA (AR9170_GPIO_REG_BASE + 4)
85#define AR9170_NUM_LEDS 2
86
87
88#define AR9170_USB_REG_BASE 0x1e1000
89#define AR9170_USB_REG_DMA_CTL (AR9170_USB_REG_BASE + 0x108)
90#define AR9170_DMA_CTL_ENABLE_TO_DEVICE 0x1
91#define AR9170_DMA_CTL_ENABLE_FROM_DEVICE 0x2
92#define AR9170_DMA_CTL_HIGH_SPEED 0x4
93#define AR9170_DMA_CTL_PACKET_MODE 0x8
94
95#define AR9170_USB_REG_MAX_AGG_UPLOAD (AR9170_USB_REG_BASE + 0x110)
96#define AR9170_USB_REG_UPLOAD_TIME_CTL (AR9170_USB_REG_BASE + 0x114)
97
98
99
100#define AR9170_MAC_REG_BASE 0x1c3000
101
102#define AR9170_MAC_REG_TSF_L (AR9170_MAC_REG_BASE + 0x514)
103#define AR9170_MAC_REG_TSF_H (AR9170_MAC_REG_BASE + 0x518)
104
105#define AR9170_MAC_REG_ATIM_WINDOW (AR9170_MAC_REG_BASE + 0x51C)
106#define AR9170_MAC_REG_BCN_PERIOD (AR9170_MAC_REG_BASE + 0x520)
107#define AR9170_MAC_REG_PRETBTT (AR9170_MAC_REG_BASE + 0x524)
108
109#define AR9170_MAC_REG_MAC_ADDR_L (AR9170_MAC_REG_BASE + 0x610)
110#define AR9170_MAC_REG_MAC_ADDR_H (AR9170_MAC_REG_BASE + 0x614)
111#define AR9170_MAC_REG_BSSID_L (AR9170_MAC_REG_BASE + 0x618)
112#define AR9170_MAC_REG_BSSID_H (AR9170_MAC_REG_BASE + 0x61c)
113
114#define AR9170_MAC_REG_GROUP_HASH_TBL_L (AR9170_MAC_REG_BASE + 0x624)
115#define AR9170_MAC_REG_GROUP_HASH_TBL_H (AR9170_MAC_REG_BASE + 0x628)
116
117#define AR9170_MAC_REG_RX_TIMEOUT (AR9170_MAC_REG_BASE + 0x62C)
118
119#define AR9170_MAC_REG_BASIC_RATE (AR9170_MAC_REG_BASE + 0x630)
120#define AR9170_MAC_REG_MANDATORY_RATE (AR9170_MAC_REG_BASE + 0x634)
121#define AR9170_MAC_REG_RTS_CTS_RATE (AR9170_MAC_REG_BASE + 0x638)
122#define AR9170_MAC_REG_BACKOFF_PROTECT (AR9170_MAC_REG_BASE + 0x63c)
123#define AR9170_MAC_REG_RX_THRESHOLD (AR9170_MAC_REG_BASE + 0x640)
124#define AR9170_MAC_REG_RX_PE_DELAY (AR9170_MAC_REG_BASE + 0x64C)
125
126#define AR9170_MAC_REG_DYNAMIC_SIFS_ACK (AR9170_MAC_REG_BASE + 0x658)
127#define AR9170_MAC_REG_SNIFFER (AR9170_MAC_REG_BASE + 0x674)
128#define AR9170_MAC_REG_SNIFFER_ENABLE_PROMISC BIT(0)
129#define AR9170_MAC_REG_SNIFFER_DEFAULTS 0x02000000
130#define AR9170_MAC_REG_ENCRYPTION (AR9170_MAC_REG_BASE + 0x678)
131#define AR9170_MAC_REG_ENCRYPTION_RX_SOFTWARE BIT(3)
132#define AR9170_MAC_REG_ENCRYPTION_DEFAULTS 0x70
133
134#define AR9170_MAC_REG_MISC_680 (AR9170_MAC_REG_BASE + 0x680)
135#define AR9170_MAC_REG_TX_UNDERRUN (AR9170_MAC_REG_BASE + 0x688)
136
137#define AR9170_MAC_REG_FRAMETYPE_FILTER (AR9170_MAC_REG_BASE + 0x68c)
138#define AR9170_MAC_REG_FTF_ASSOC_REQ BIT(0)
139#define AR9170_MAC_REG_FTF_ASSOC_RESP BIT(1)
140#define AR9170_MAC_REG_FTF_REASSOC_REQ BIT(2)
141#define AR9170_MAC_REG_FTF_REASSOC_RESP BIT(3)
142#define AR9170_MAC_REG_FTF_PRB_REQ BIT(4)
143#define AR9170_MAC_REG_FTF_PRB_RESP BIT(5)
144#define AR9170_MAC_REG_FTF_BIT6 BIT(6)
145#define AR9170_MAC_REG_FTF_BIT7 BIT(7)
146#define AR9170_MAC_REG_FTF_BEACON BIT(8)
147#define AR9170_MAC_REG_FTF_ATIM BIT(9)
148#define AR9170_MAC_REG_FTF_DEASSOC BIT(10)
149#define AR9170_MAC_REG_FTF_AUTH BIT(11)
150#define AR9170_MAC_REG_FTF_DEAUTH BIT(12)
151#define AR9170_MAC_REG_FTF_BIT13 BIT(13)
152#define AR9170_MAC_REG_FTF_BIT14 BIT(14)
153#define AR9170_MAC_REG_FTF_BIT15 BIT(15)
154#define AR9170_MAC_REG_FTF_BAR BIT(24)
155#define AR9170_MAC_REG_FTF_BIT25 BIT(25)
156#define AR9170_MAC_REG_FTF_PSPOLL BIT(26)
157#define AR9170_MAC_REG_FTF_RTS BIT(27)
158#define AR9170_MAC_REG_FTF_CTS BIT(28)
159#define AR9170_MAC_REG_FTF_ACK BIT(29)
160#define AR9170_MAC_REG_FTF_CFE BIT(30)
161#define AR9170_MAC_REG_FTF_CFE_ACK BIT(31)
162#define AR9170_MAC_REG_FTF_DEFAULTS 0x0500ffff
163#define AR9170_MAC_REG_FTF_MONITOR 0xfd00ffff
164
165#define AR9170_MAC_REG_RX_TOTAL (AR9170_MAC_REG_BASE + 0x6A0)
166#define AR9170_MAC_REG_RX_CRC32 (AR9170_MAC_REG_BASE + 0x6A4)
167#define AR9170_MAC_REG_RX_CRC16 (AR9170_MAC_REG_BASE + 0x6A8)
168#define AR9170_MAC_REG_RX_ERR_DECRYPTION_UNI (AR9170_MAC_REG_BASE + 0x6AC)
169#define AR9170_MAC_REG_RX_OVERRUN (AR9170_MAC_REG_BASE + 0x6B0)
170#define AR9170_MAC_REG_RX_ERR_DECRYPTION_MUL (AR9170_MAC_REG_BASE + 0x6BC)
171#define AR9170_MAC_REG_TX_RETRY (AR9170_MAC_REG_BASE + 0x6CC)
172#define AR9170_MAC_REG_TX_TOTAL (AR9170_MAC_REG_BASE + 0x6F4)
173
174
175#define AR9170_MAC_REG_ACK_EXTENSION (AR9170_MAC_REG_BASE + 0x690)
176#define AR9170_MAC_REG_EIFS_AND_SIFS (AR9170_MAC_REG_BASE + 0x698)
177
178#define AR9170_MAC_REG_SLOT_TIME (AR9170_MAC_REG_BASE + 0x6F0)
179
180#define AR9170_MAC_REG_POWERMANAGEMENT (AR9170_MAC_REG_BASE + 0x700)
181#define AR9170_MAC_REG_POWERMGT_IBSS 0xe0
182#define AR9170_MAC_REG_POWERMGT_AP 0xa1
183#define AR9170_MAC_REG_POWERMGT_STA 0x2
184#define AR9170_MAC_REG_POWERMGT_AP_WDS 0x3
185#define AR9170_MAC_REG_POWERMGT_DEFAULTS (0xf << 24)
186
187#define AR9170_MAC_REG_ROLL_CALL_TBL_L (AR9170_MAC_REG_BASE + 0x704)
188#define AR9170_MAC_REG_ROLL_CALL_TBL_H (AR9170_MAC_REG_BASE + 0x708)
189
190#define AR9170_MAC_REG_AC0_CW (AR9170_MAC_REG_BASE + 0xB00)
191#define AR9170_MAC_REG_AC1_CW (AR9170_MAC_REG_BASE + 0xB04)
192#define AR9170_MAC_REG_AC2_CW (AR9170_MAC_REG_BASE + 0xB08)
193#define AR9170_MAC_REG_AC3_CW (AR9170_MAC_REG_BASE + 0xB0C)
194#define AR9170_MAC_REG_AC4_CW (AR9170_MAC_REG_BASE + 0xB10)
195#define AR9170_MAC_REG_AC1_AC0_AIFS (AR9170_MAC_REG_BASE + 0xB14)
196#define AR9170_MAC_REG_AC3_AC2_AIFS (AR9170_MAC_REG_BASE + 0xB18)
197
198#define AR9170_MAC_REG_RETRY_MAX (AR9170_MAC_REG_BASE + 0xB28)
199
200#define AR9170_MAC_REG_FCS_SELECT (AR9170_MAC_REG_BASE + 0xBB0)
201#define AR9170_MAC_FCS_SWFCS 0x1
202#define AR9170_MAC_FCS_FIFO_PROT 0x4
203
204
205#define AR9170_MAC_REG_TXOP_NOT_ENOUGH_IND (AR9170_MAC_REG_BASE + 0xB30)
206
207#define AR9170_MAC_REG_AC1_AC0_TXOP (AR9170_MAC_REG_BASE + 0xB44)
208#define AR9170_MAC_REG_AC3_AC2_TXOP (AR9170_MAC_REG_BASE + 0xB48)
209
210#define AR9170_MAC_REG_ACK_TABLE (AR9170_MAC_REG_BASE + 0xC00)
211#define AR9170_MAC_REG_AMPDU_RX_THRESH (AR9170_MAC_REG_BASE + 0xC50)
212
213#define AR9170_MAC_REG_TXRX_MPI (AR9170_MAC_REG_BASE + 0xD7C)
214#define AR9170_MAC_TXRX_MPI_TX_MPI_MASK 0x0000000f
215#define AR9170_MAC_TXRX_MPI_TX_TO_MASK 0x0000fff0
216#define AR9170_MAC_TXRX_MPI_RX_MPI_MASK 0x000f0000
217#define AR9170_MAC_TXRX_MPI_RX_TO_MASK 0xfff00000
218
219#define AR9170_MAC_REG_BCN_ADDR (AR9170_MAC_REG_BASE + 0xD84)
220#define AR9170_MAC_REG_BCN_LENGTH (AR9170_MAC_REG_BASE + 0xD88)
221#define AR9170_MAC_REG_BCN_PLCP (AR9170_MAC_REG_BASE + 0xD90)
222#define AR9170_MAC_REG_BCN_CTRL (AR9170_MAC_REG_BASE + 0xD94)
223#define AR9170_MAC_REG_BCN_HT1 (AR9170_MAC_REG_BASE + 0xDA0)
224#define AR9170_MAC_REG_BCN_HT2 (AR9170_MAC_REG_BASE + 0xDA4)
225
226
227#define AR9170_PWR_REG_BASE 0x1D4000
228
229#define AR9170_PWR_REG_CLOCK_SEL (AR9170_PWR_REG_BASE + 0x008)
230#define AR9170_PWR_CLK_AHB_40MHZ 0
231#define AR9170_PWR_CLK_AHB_20_22MHZ 1
232#define AR9170_PWR_CLK_AHB_40_44MHZ 2
233#define AR9170_PWR_CLK_AHB_80_88MHZ 3
234#define AR9170_PWR_CLK_DAC_160_INV_DLY 0x70
235
236
237/* put beacon here in memory */
238#define AR9170_BEACON_BUFFER_ADDRESS 0x117900
239
240
241struct ar9170_tx_control {
242 __le16 length;
243 __le16 mac_control;
244 __le32 phy_control;
245 u8 frame_data[0];
246} __packed;
247
248/* these are either-or */
249#define AR9170_TX_MAC_PROT_RTS 0x0001
250#define AR9170_TX_MAC_PROT_CTS 0x0002
251
252#define AR9170_TX_MAC_NO_ACK 0x0004
253/* if unset, MAC will only do SIFS space before frame */
254#define AR9170_TX_MAC_BACKOFF 0x0008
255#define AR9170_TX_MAC_BURST 0x0010
256#define AR9170_TX_MAC_AGGR 0x0020
257
258/* encryption is a two-bit field */
259#define AR9170_TX_MAC_ENCR_NONE 0x0000
260#define AR9170_TX_MAC_ENCR_RC4 0x0040
261#define AR9170_TX_MAC_ENCR_CENC 0x0080
262#define AR9170_TX_MAC_ENCR_AES 0x00c0
263
264#define AR9170_TX_MAC_MMIC 0x0100
265#define AR9170_TX_MAC_HW_DURATION 0x0200
266#define AR9170_TX_MAC_QOS_SHIFT 10
267#define AR9170_TX_MAC_QOS_MASK (3 << AR9170_TX_MAC_QOS_SHIFT)
268#define AR9170_TX_MAC_AGGR_QOS_BIT1 0x0400
269#define AR9170_TX_MAC_AGGR_QOS_BIT2 0x0800
270#define AR9170_TX_MAC_DISABLE_TXOP 0x1000
271#define AR9170_TX_MAC_TXOP_RIFS 0x2000
272#define AR9170_TX_MAC_IMM_AMPDU 0x4000
273#define AR9170_TX_MAC_RATE_PROBE 0x8000
274
275/* either-or */
276#define AR9170_TX_PHY_MOD_CCK 0x00000000
277#define AR9170_TX_PHY_MOD_OFDM 0x00000001
278#define AR9170_TX_PHY_MOD_HT 0x00000002
279
280/* depends on modulation */
281#define AR9170_TX_PHY_SHORT_PREAMBLE 0x00000004
282#define AR9170_TX_PHY_GREENFIELD 0x00000004
283
284#define AR9170_TX_PHY_BW_SHIFT 3
285#define AR9170_TX_PHY_BW_MASK (3 << AR9170_TX_PHY_BW_SHIFT)
286#define AR9170_TX_PHY_BW_20MHZ 0
287#define AR9170_TX_PHY_BW_40MHZ 2
288#define AR9170_TX_PHY_BW_40MHZ_DUP 3
289
290#define AR9170_TX_PHY_TX_HEAVY_CLIP_SHIFT 6
291#define AR9170_TX_PHY_TX_HEAVY_CLIP_MASK (7 << AR9170_TX_PHY_TX_HEAVY_CLIP_SHIFT)
292
293#define AR9170_TX_PHY_TX_PWR_SHIFT 9
294#define AR9170_TX_PHY_TX_PWR_MASK (0x3f << AR9170_TX_PHY_TX_PWR_SHIFT)
295
296/* not part of the hw-spec */
297#define AR9170_TX_PHY_QOS_SHIFT 25
298#define AR9170_TX_PHY_QOS_MASK (3 << AR9170_TX_PHY_QOS_SHIFT)
299
300#define AR9170_TX_PHY_TXCHAIN_SHIFT 15
301#define AR9170_TX_PHY_TXCHAIN_MASK (7 << AR9170_TX_PHY_TXCHAIN_SHIFT)
302#define AR9170_TX_PHY_TXCHAIN_1 1
303/* use for cck, ofdm 6/9/12/18/24 and HT if capable */
304#define AR9170_TX_PHY_TXCHAIN_2 5
305
306#define AR9170_TX_PHY_MCS_SHIFT 18
307#define AR9170_TX_PHY_MCS_MASK (0x7f << AR9170_TX_PHY_MCS_SHIFT)
308
309#define AR9170_TX_PHY_SHORT_GI 0x80000000
310
311struct ar9170_rx_head {
312 u8 plcp[12];
313};
314
315struct ar9170_rx_tail {
316 union {
317 struct {
318 u8 rssi_ant0, rssi_ant1, rssi_ant2,
319 rssi_ant0x, rssi_ant1x, rssi_ant2x,
320 rssi_combined;
321 };
322 u8 rssi[7];
323 };
324
325 u8 evm_stream0[6], evm_stream1[6];
326 u8 phy_err;
327 u8 SAidx, DAidx;
328 u8 error;
329 u8 status;
330};
331
332#define AR9170_ENC_ALG_NONE 0x0
333#define AR9170_ENC_ALG_WEP64 0x1
334#define AR9170_ENC_ALG_TKIP 0x2
335#define AR9170_ENC_ALG_AESCCMP 0x4
336#define AR9170_ENC_ALG_WEP128 0x5
337#define AR9170_ENC_ALG_WEP256 0x6
338#define AR9170_ENC_ALG_CENC 0x7
339
340#define AR9170_RX_ENC_SOFTWARE 0x8
341
342static inline u8 ar9170_get_decrypt_type(struct ar9170_rx_tail *t)
343{
344 return (t->SAidx & 0xc0) >> 4 |
345 (t->DAidx & 0xc0) >> 6;
346}
347
348#define AR9170_RX_STATUS_MODULATION_MASK 0x03
349#define AR9170_RX_STATUS_MODULATION_CCK 0x00
350#define AR9170_RX_STATUS_MODULATION_OFDM 0x01
351#define AR9170_RX_STATUS_MODULATION_HT 0x02
352#define AR9170_RX_STATUS_MODULATION_DUPOFDM 0x03
353
354/* depends on modulation */
355#define AR9170_RX_STATUS_SHORT_PREAMBLE 0x08
356#define AR9170_RX_STATUS_GREENFIELD 0x08
357
358#define AR9170_RX_STATUS_MPDU_MASK 0x30
359#define AR9170_RX_STATUS_MPDU_SINGLE 0x00
360#define AR9170_RX_STATUS_MPDU_FIRST 0x10
361#define AR9170_RX_STATUS_MPDU_MIDDLE 0x20
362#define AR9170_RX_STATUS_MPDU_LAST 0x30
363
364
365#define AR9170_RX_ERROR_RXTO 0x01
366#define AR9170_RX_ERROR_OVERRUN 0x02
367#define AR9170_RX_ERROR_DECRYPT 0x04
368#define AR9170_RX_ERROR_FCS 0x08
369#define AR9170_RX_ERROR_WRONG_RA 0x10
370#define AR9170_RX_ERROR_PLCP 0x20
371#define AR9170_RX_ERROR_MMIC 0x40
372
373struct ar9170_cmd_tx_status {
374 __le16 unkn;
375 u8 dst[ETH_ALEN];
376 __le32 rate;
377 __le16 status;
378} __packed;
379
380#define AR9170_TX_STATUS_COMPLETE 0x00
381#define AR9170_TX_STATUS_RETRY 0x01
382#define AR9170_TX_STATUS_FAILED 0x02
383
384struct ar9170_cmd_ba_failed_count {
385 __le16 failed;
386 __le16 rate;
387} __packed;
388
389struct ar9170_cmd_response {
390 u8 flag;
391 u8 type;
392
393 union {
394 struct ar9170_cmd_tx_status tx_status;
395 struct ar9170_cmd_ba_failed_count ba_fail_cnt;
396 u8 data[0];
397 };
398} __packed;
399
400/* QoS */
401
402/* mac80211 queue to HW/FW map */
403static const u8 ar9170_qos_hwmap[4] = { 3, 2, 0, 1 };
404
405/* HW/FW queue to mac80211 map */
406static const u8 ar9170_qos_mac80211map[4] = { 2, 3, 1, 0 };
407
408enum ar9170_txq {
409 AR9170_TXQ_BE,
410 AR9170_TXQ_BK,
411 AR9170_TXQ_VI,
412 AR9170_TXQ_VO,
413
414 __AR9170_NUM_TXQ,
415};
416
417#endif /* __AR9170_HW_H */
diff --git a/drivers/net/wireless/ar9170/led.c b/drivers/net/wireless/ar9170/led.c
new file mode 100644
index 000000000000..341cead7f606
--- /dev/null
+++ b/drivers/net/wireless/ar9170/led.c
@@ -0,0 +1,171 @@
1/*
2 * Atheros AR9170 driver
3 *
4 * LED handling
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38
39#include "ar9170.h"
40#include "cmd.h"
41
42int ar9170_set_leds_state(struct ar9170 *ar, u32 led_state)
43{
44 return ar9170_write_reg(ar, AR9170_GPIO_REG_DATA, led_state);
45}
46
47int ar9170_init_leds(struct ar9170 *ar)
48{
49 int err;
50
51 /* disable LEDs */
52 /* GPIO [0/1 mode: output, 2/3: input] */
53 err = ar9170_write_reg(ar, AR9170_GPIO_REG_PORT_TYPE, 3);
54 if (err)
55 goto out;
56
57 /* GPIO 0/1 value: off */
58 err = ar9170_set_leds_state(ar, 0);
59
60out:
61 return err;
62}
63
64#ifdef CONFIG_AR9170_LEDS
65static void ar9170_update_leds(struct work_struct *work)
66{
67 struct ar9170 *ar = container_of(work, struct ar9170, led_work.work);
68 int i, tmp, blink_delay = 1000;
69 u32 led_val = 0;
70 bool rerun = false;
71
72 if (unlikely(!IS_ACCEPTING_CMD(ar)))
73 return ;
74
75 mutex_lock(&ar->mutex);
76 for (i = 0; i < AR9170_NUM_LEDS; i++)
77 if (ar->leds[i].toggled) {
78 led_val |= 1 << i;
79
80 tmp = 70 + 200 / (ar->leds[i].toggled);
81 if (tmp < blink_delay)
82 blink_delay = tmp;
83
84 if (ar->leds[i].toggled > 1)
85 ar->leds[i].toggled = 0;
86
87 rerun = true;
88 }
89
90 ar9170_set_leds_state(ar, led_val);
91 mutex_unlock(&ar->mutex);
92
93 if (rerun)
94 queue_delayed_work(ar->hw->workqueue, &ar->led_work,
95 msecs_to_jiffies(blink_delay));
96}
97
98static void ar9170_led_brightness_set(struct led_classdev *led,
99 enum led_brightness brightness)
100{
101 struct ar9170_led *arl = container_of(led, struct ar9170_led, l);
102 struct ar9170 *ar = arl->ar;
103
104 arl->toggled++;
105
106 if (likely(IS_ACCEPTING_CMD(ar) && brightness))
107 queue_delayed_work(ar->hw->workqueue, &ar->led_work, HZ/10);
108}
109
110static int ar9170_register_led(struct ar9170 *ar, int i, char *name,
111 char *trigger)
112{
113 int err;
114
115 snprintf(ar->leds[i].name, sizeof(ar->leds[i].name),
116 "ar9170-%s::%s", wiphy_name(ar->hw->wiphy), name);
117
118 ar->leds[i].ar = ar;
119 ar->leds[i].l.name = ar->leds[i].name;
120 ar->leds[i].l.brightness_set = ar9170_led_brightness_set;
121 ar->leds[i].l.brightness = 0;
122 ar->leds[i].l.default_trigger = trigger;
123
124 err = led_classdev_register(wiphy_dev(ar->hw->wiphy),
125 &ar->leds[i].l);
126 if (err)
127 printk(KERN_ERR "%s: failed to register %s LED (%d).\n",
128 wiphy_name(ar->hw->wiphy), ar->leds[i].name, err);
129 else
130 ar->leds[i].registered = true;
131
132 return err;
133}
134
135void ar9170_unregister_leds(struct ar9170 *ar)
136{
137 int i;
138
139 cancel_delayed_work_sync(&ar->led_work);
140
141 for (i = 0; i < AR9170_NUM_LEDS; i++)
142 if (ar->leds[i].registered) {
143 led_classdev_unregister(&ar->leds[i].l);
144 ar->leds[i].registered = false;
145 }
146}
147
148int ar9170_register_leds(struct ar9170 *ar)
149{
150 int err;
151
152 INIT_DELAYED_WORK(&ar->led_work, ar9170_update_leds);
153
154 err = ar9170_register_led(ar, 0, "tx",
155 ieee80211_get_tx_led_name(ar->hw));
156 if (err)
157 goto fail;
158
159 err = ar9170_register_led(ar, 1, "assoc",
160 ieee80211_get_assoc_led_name(ar->hw));
161 if (err)
162 goto fail;
163
164 return 0;
165
166fail:
167 ar9170_unregister_leds(ar);
168 return err;
169}
170
171#endif /* CONFIG_AR9170_LEDS */
diff --git a/drivers/net/wireless/ar9170/mac.c b/drivers/net/wireless/ar9170/mac.c
new file mode 100644
index 000000000000..c8fa3073169f
--- /dev/null
+++ b/drivers/net/wireless/ar9170/mac.c
@@ -0,0 +1,452 @@
1/*
2 * Atheros AR9170 driver
3 *
4 * MAC programming
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38#include "ar9170.h"
39#include "cmd.h"
40
41int ar9170_set_qos(struct ar9170 *ar)
42{
43 ar9170_regwrite_begin(ar);
44
45 ar9170_regwrite(AR9170_MAC_REG_AC0_CW, ar->edcf[0].cw_min |
46 (ar->edcf[0].cw_max << 16));
47 ar9170_regwrite(AR9170_MAC_REG_AC1_CW, ar->edcf[1].cw_min |
48 (ar->edcf[1].cw_max << 16));
49 ar9170_regwrite(AR9170_MAC_REG_AC2_CW, ar->edcf[2].cw_min |
50 (ar->edcf[2].cw_max << 16));
51 ar9170_regwrite(AR9170_MAC_REG_AC3_CW, ar->edcf[3].cw_min |
52 (ar->edcf[3].cw_max << 16));
53 ar9170_regwrite(AR9170_MAC_REG_AC4_CW, ar->edcf[4].cw_min |
54 (ar->edcf[4].cw_max << 16));
55
56 ar9170_regwrite(AR9170_MAC_REG_AC1_AC0_AIFS,
57 ((ar->edcf[0].aifs * 9 + 10)) |
58 ((ar->edcf[1].aifs * 9 + 10) << 12) |
59 ((ar->edcf[2].aifs * 9 + 10) << 24));
60 ar9170_regwrite(AR9170_MAC_REG_AC3_AC2_AIFS,
61 ((ar->edcf[2].aifs * 9 + 10) >> 8) |
62 ((ar->edcf[3].aifs * 9 + 10) << 4) |
63 ((ar->edcf[4].aifs * 9 + 10) << 16));
64
65 ar9170_regwrite(AR9170_MAC_REG_AC1_AC0_TXOP,
66 ar->edcf[0].txop | ar->edcf[1].txop << 16);
67 ar9170_regwrite(AR9170_MAC_REG_AC3_AC2_TXOP,
68 ar->edcf[1].txop | ar->edcf[3].txop << 16);
69
70 ar9170_regwrite_finish();
71
72 return ar9170_regwrite_result();
73}
74
75int ar9170_init_mac(struct ar9170 *ar)
76{
77 ar9170_regwrite_begin(ar);
78
79 ar9170_regwrite(AR9170_MAC_REG_ACK_EXTENSION, 0x40);
80
81 ar9170_regwrite(AR9170_MAC_REG_RETRY_MAX, 0);
82
83 /* enable MMIC */
84 ar9170_regwrite(AR9170_MAC_REG_SNIFFER,
85 AR9170_MAC_REG_SNIFFER_DEFAULTS);
86
87 ar9170_regwrite(AR9170_MAC_REG_RX_THRESHOLD, 0xc1f80);
88
89 ar9170_regwrite(AR9170_MAC_REG_RX_PE_DELAY, 0x70);
90 ar9170_regwrite(AR9170_MAC_REG_EIFS_AND_SIFS, 0xa144000);
91 ar9170_regwrite(AR9170_MAC_REG_SLOT_TIME, 9 << 10);
92
93 /* CF-END mode */
94 ar9170_regwrite(0x1c3b2c, 0x19000000);
95
96 /* NAV protects ACK only (in TXOP) */
97 ar9170_regwrite(0x1c3b38, 0x201);
98
99 /* Set Beacon PHY CTRL's TPC to 0x7, TA1=1 */
100 /* OTUS set AM to 0x1 */
101 ar9170_regwrite(AR9170_MAC_REG_BCN_HT1, 0x8000170);
102
103 ar9170_regwrite(AR9170_MAC_REG_BACKOFF_PROTECT, 0x105);
104
105 /* AGG test code*/
106 /* Aggregation MAX number and timeout */
107 ar9170_regwrite(0x1c3b9c, 0x10000a);
108
109 ar9170_regwrite(AR9170_MAC_REG_FRAMETYPE_FILTER,
110 AR9170_MAC_REG_FTF_DEFAULTS);
111
112 /* Enable deaggregator, response in sniffer mode */
113 ar9170_regwrite(0x1c3c40, 0x1 | 1<<30);
114
115 /* rate sets */
116 ar9170_regwrite(AR9170_MAC_REG_BASIC_RATE, 0x150f);
117 ar9170_regwrite(AR9170_MAC_REG_MANDATORY_RATE, 0x150f);
118 ar9170_regwrite(AR9170_MAC_REG_RTS_CTS_RATE, 0x10b01bb);
119
120 /* MIMO response control */
121 ar9170_regwrite(0x1c3694, 0x4003C1E);/* bit 26~28 otus-AM */
122
123 /* switch MAC to OTUS interface */
124 ar9170_regwrite(0x1c3600, 0x3);
125
126 ar9170_regwrite(AR9170_MAC_REG_AMPDU_RX_THRESH, 0xffff);
127
128 /* set PHY register read timeout (??) */
129 ar9170_regwrite(AR9170_MAC_REG_MISC_680, 0xf00008);
130
131 /* Disable Rx TimeOut, workaround for BB. */
132 ar9170_regwrite(AR9170_MAC_REG_RX_TIMEOUT, 0x0);
133
134 /* Set CPU clock frequency to 88/80MHz */
135 ar9170_regwrite(AR9170_PWR_REG_CLOCK_SEL,
136 AR9170_PWR_CLK_AHB_80_88MHZ |
137 AR9170_PWR_CLK_DAC_160_INV_DLY);
138
139 /* Set WLAN DMA interrupt mode: generate int per packet */
140 ar9170_regwrite(AR9170_MAC_REG_TXRX_MPI, 0x110011);
141
142 ar9170_regwrite(AR9170_MAC_REG_FCS_SELECT,
143 AR9170_MAC_FCS_FIFO_PROT);
144
145 /* Disables the CF_END frame, undocumented register */
146 ar9170_regwrite(AR9170_MAC_REG_TXOP_NOT_ENOUGH_IND,
147 0x141E0F48);
148
149 ar9170_regwrite_finish();
150
151 return ar9170_regwrite_result();
152}
153
154static int ar9170_set_mac_reg(struct ar9170 *ar, const u32 reg, const u8 *mac)
155{
156 static const u8 zero[ETH_ALEN] = { 0 };
157
158 if (!mac)
159 mac = zero;
160
161 ar9170_regwrite_begin(ar);
162
163 ar9170_regwrite(reg,
164 (mac[3] << 24) | (mac[2] << 16) |
165 (mac[1] << 8) | mac[0]);
166
167 ar9170_regwrite(reg + 4, (mac[5] << 8) | mac[4]);
168
169 ar9170_regwrite_finish();
170
171 return ar9170_regwrite_result();
172}
173
174int ar9170_update_multicast(struct ar9170 *ar)
175{
176 int err;
177
178 ar9170_regwrite_begin(ar);
179 ar9170_regwrite(AR9170_MAC_REG_GROUP_HASH_TBL_H,
180 ar->want_mc_hash >> 32);
181 ar9170_regwrite(AR9170_MAC_REG_GROUP_HASH_TBL_L,
182 ar->want_mc_hash);
183
184 ar9170_regwrite_finish();
185 err = ar9170_regwrite_result();
186
187 if (err)
188 return err;
189
190 ar->cur_mc_hash = ar->want_mc_hash;
191
192 return 0;
193}
194
195int ar9170_update_frame_filter(struct ar9170 *ar)
196{
197 int err;
198
199 err = ar9170_write_reg(ar, AR9170_MAC_REG_FRAMETYPE_FILTER,
200 ar->want_filter);
201
202 if (err)
203 return err;
204
205 ar->cur_filter = ar->want_filter;
206
207 return 0;
208}
209
210static int ar9170_set_promiscouous(struct ar9170 *ar)
211{
212 u32 encr_mode, sniffer;
213 int err;
214
215 err = ar9170_read_reg(ar, AR9170_MAC_REG_SNIFFER, &sniffer);
216 if (err)
217 return err;
218
219 err = ar9170_read_reg(ar, AR9170_MAC_REG_ENCRYPTION, &encr_mode);
220 if (err)
221 return err;
222
223 if (ar->sniffer_enabled) {
224 sniffer |= AR9170_MAC_REG_SNIFFER_ENABLE_PROMISC;
225
226 /*
227 * Rx decryption works in place.
228 *
229 * If we don't disable it, the hardware will render all
230 * encrypted frames which are encrypted with an unknown
231 * key useless.
232 */
233
234 encr_mode |= AR9170_MAC_REG_ENCRYPTION_RX_SOFTWARE;
235 ar->sniffer_enabled = true;
236 } else {
237 sniffer &= ~AR9170_MAC_REG_SNIFFER_ENABLE_PROMISC;
238
239 if (ar->rx_software_decryption)
240 encr_mode |= AR9170_MAC_REG_ENCRYPTION_RX_SOFTWARE;
241 else
242 encr_mode &= ~AR9170_MAC_REG_ENCRYPTION_RX_SOFTWARE;
243 }
244
245 ar9170_regwrite_begin(ar);
246 ar9170_regwrite(AR9170_MAC_REG_ENCRYPTION, encr_mode);
247 ar9170_regwrite(AR9170_MAC_REG_SNIFFER, sniffer);
248 ar9170_regwrite_finish();
249
250 return ar9170_regwrite_result();
251}
252
253int ar9170_set_operating_mode(struct ar9170 *ar)
254{
255 u32 pm_mode = AR9170_MAC_REG_POWERMGT_DEFAULTS;
256 u8 *mac_addr, *bssid;
257 int err;
258
259 if (ar->vif) {
260 mac_addr = ar->mac_addr;
261 bssid = ar->bssid;
262
263 switch (ar->vif->type) {
264 case NL80211_IFTYPE_MESH_POINT:
265 case NL80211_IFTYPE_ADHOC:
266 pm_mode |= AR9170_MAC_REG_POWERMGT_IBSS;
267 break;
268/* case NL80211_IFTYPE_AP:
269 pm_mode |= AR9170_MAC_REG_POWERMGT_AP;
270 break;*/
271 case NL80211_IFTYPE_WDS:
272 pm_mode |= AR9170_MAC_REG_POWERMGT_AP_WDS;
273 break;
274 case NL80211_IFTYPE_MONITOR:
275 ar->sniffer_enabled = true;
276 ar->rx_software_decryption = true;
277 break;
278 default:
279 pm_mode |= AR9170_MAC_REG_POWERMGT_STA;
280 break;
281 }
282 } else {
283 mac_addr = NULL;
284 bssid = NULL;
285 }
286
287 err = ar9170_set_mac_reg(ar, AR9170_MAC_REG_MAC_ADDR_L, mac_addr);
288 if (err)
289 return err;
290
291 err = ar9170_set_mac_reg(ar, AR9170_MAC_REG_BSSID_L, bssid);
292 if (err)
293 return err;
294
295 err = ar9170_set_promiscouous(ar);
296 if (err)
297 return err;
298
299 ar9170_regwrite_begin(ar);
300
301 ar9170_regwrite(AR9170_MAC_REG_POWERMANAGEMENT, pm_mode);
302 ar9170_regwrite_finish();
303
304 return ar9170_regwrite_result();
305}
306
307int ar9170_set_hwretry_limit(struct ar9170 *ar, unsigned int max_retry)
308{
309 u32 tmp = min_t(u32, 0x33333, max_retry * 0x11111);
310
311 return ar9170_write_reg(ar, AR9170_MAC_REG_RETRY_MAX, tmp);
312}
313
314int ar9170_set_beacon_timers(struct ar9170 *ar)
315{
316 u32 v = 0;
317 u32 pretbtt = 0;
318
319 v |= ar->hw->conf.beacon_int;
320
321 if (ar->vif) {
322 switch (ar->vif->type) {
323 case NL80211_IFTYPE_MESH_POINT:
324 case NL80211_IFTYPE_ADHOC:
325 v |= BIT(25);
326 break;
327 case NL80211_IFTYPE_AP:
328 v |= BIT(24);
329 pretbtt = (ar->hw->conf.beacon_int - 6) << 16;
330 break;
331 default:
332 break;
333 }
334
335 v |= ar->vif->bss_conf.dtim_period << 16;
336 }
337
338 ar9170_regwrite_begin(ar);
339
340 ar9170_regwrite(AR9170_MAC_REG_PRETBTT, pretbtt);
341 ar9170_regwrite(AR9170_MAC_REG_BCN_PERIOD, v);
342 ar9170_regwrite_finish();
343 return ar9170_regwrite_result();
344}
345
346int ar9170_update_beacon(struct ar9170 *ar)
347{
348 struct sk_buff *skb;
349 __le32 *data, *old = NULL;
350 u32 word;
351 int i;
352
353 skb = ieee80211_beacon_get(ar->hw, ar->vif);
354 if (!skb)
355 return -ENOMEM;
356
357 data = (__le32 *)skb->data;
358 if (ar->beacon)
359 old = (__le32 *)ar->beacon->data;
360
361 ar9170_regwrite_begin(ar);
362 for (i = 0; i < DIV_ROUND_UP(skb->len, 4); i++) {
363 /*
364 * XXX: This accesses beyond skb data for up
365 * to the last 3 bytes!!
366 */
367
368 if (old && (data[i] == old[i]))
369 continue;
370
371 word = le32_to_cpu(data[i]);
372 ar9170_regwrite(AR9170_BEACON_BUFFER_ADDRESS + 4 * i, word);
373 }
374
375 /* XXX: use skb->cb info */
376 if (ar->hw->conf.channel->band == IEEE80211_BAND_2GHZ)
377 ar9170_regwrite(AR9170_MAC_REG_BCN_PLCP,
378 ((skb->len + 4) << (3+16)) + 0x0400);
379 else
380 ar9170_regwrite(AR9170_MAC_REG_BCN_PLCP,
381 ((skb->len + 4) << (3+16)) + 0x0400);
382
383 ar9170_regwrite(AR9170_MAC_REG_BCN_LENGTH, skb->len + 4);
384 ar9170_regwrite(AR9170_MAC_REG_BCN_ADDR, AR9170_BEACON_BUFFER_ADDRESS);
385 ar9170_regwrite(AR9170_MAC_REG_BCN_CTRL, 1);
386
387 ar9170_regwrite_finish();
388
389 dev_kfree_skb(ar->beacon);
390 ar->beacon = skb;
391
392 return ar9170_regwrite_result();
393}
394
395void ar9170_new_beacon(struct work_struct *work)
396{
397 struct ar9170 *ar = container_of(work, struct ar9170,
398 beacon_work);
399 struct sk_buff *skb;
400
401 if (unlikely(!IS_STARTED(ar)))
402 return ;
403
404 mutex_lock(&ar->mutex);
405
406 if (!ar->vif)
407 goto out;
408
409 ar9170_update_beacon(ar);
410
411 rcu_read_lock();
412 while ((skb = ieee80211_get_buffered_bc(ar->hw, ar->vif)))
413 ar9170_op_tx(ar->hw, skb);
414
415 rcu_read_unlock();
416
417 out:
418 mutex_unlock(&ar->mutex);
419}
420
421int ar9170_upload_key(struct ar9170 *ar, u8 id, const u8 *mac, u8 ktype,
422 u8 keyidx, u8 *keydata, int keylen)
423{
424 __le32 vals[7];
425 static const u8 bcast[ETH_ALEN] =
426 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
427 u8 dummy;
428
429 mac = mac ? : bcast;
430
431 vals[0] = cpu_to_le32((keyidx << 16) + id);
432 vals[1] = cpu_to_le32(mac[1] << 24 | mac[0] << 16 | ktype);
433 vals[2] = cpu_to_le32(mac[5] << 24 | mac[4] << 16 |
434 mac[3] << 8 | mac[2]);
435 memset(&vals[3], 0, 16);
436 if (keydata)
437 memcpy(&vals[3], keydata, keylen);
438
439 return ar->exec_cmd(ar, AR9170_CMD_EKEY,
440 sizeof(vals), (u8 *)vals,
441 1, &dummy);
442}
443
444int ar9170_disable_key(struct ar9170 *ar, u8 id)
445{
446 __le32 val = cpu_to_le32(id);
447 u8 dummy;
448
449 return ar->exec_cmd(ar, AR9170_CMD_EKEY,
450 sizeof(val), (u8 *)&val,
451 1, &dummy);
452}
diff --git a/drivers/net/wireless/ar9170/main.c b/drivers/net/wireless/ar9170/main.c
new file mode 100644
index 000000000000..5996ff9f7f47
--- /dev/null
+++ b/drivers/net/wireless/ar9170/main.c
@@ -0,0 +1,1671 @@
1/*
2 * Atheros AR9170 driver
3 *
4 * mac80211 interaction code
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, Christian Lamparter <chunkeey@web.de>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
22 *
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
26 *
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38 */
39
40#include <linux/init.h>
41#include <linux/module.h>
42#include <linux/etherdevice.h>
43#include <net/mac80211.h>
44#include "ar9170.h"
45#include "hw.h"
46#include "cmd.h"
47
48static int modparam_nohwcrypt;
49module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
50MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
51
52#define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \
53 .bitrate = (_bitrate), \
54 .flags = (_flags), \
55 .hw_value = (_hw_rate) | (_txpidx) << 4, \
56}
57
58static struct ieee80211_rate __ar9170_ratetable[] = {
59 RATE(10, 0, 0, 0),
60 RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE),
61 RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE),
62 RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE),
63 RATE(60, 0xb, 0, 0),
64 RATE(90, 0xf, 0, 0),
65 RATE(120, 0xa, 0, 0),
66 RATE(180, 0xe, 0, 0),
67 RATE(240, 0x9, 0, 0),
68 RATE(360, 0xd, 1, 0),
69 RATE(480, 0x8, 2, 0),
70 RATE(540, 0xc, 3, 0),
71};
72#undef RATE
73
74#define ar9170_g_ratetable (__ar9170_ratetable + 0)
75#define ar9170_g_ratetable_size 12
76#define ar9170_a_ratetable (__ar9170_ratetable + 4)
77#define ar9170_a_ratetable_size 8
78
79/*
80 * NB: The hw_value is used as an index into the ar9170_phy_freq_params
81 * array in phy.c so that we don't have to do frequency lookups!
82 */
83#define CHAN(_freq, _idx) { \
84 .center_freq = (_freq), \
85 .hw_value = (_idx), \
86 .max_power = 18, /* XXX */ \
87}
88
89static struct ieee80211_channel ar9170_2ghz_chantable[] = {
90 CHAN(2412, 0),
91 CHAN(2417, 1),
92 CHAN(2422, 2),
93 CHAN(2427, 3),
94 CHAN(2432, 4),
95 CHAN(2437, 5),
96 CHAN(2442, 6),
97 CHAN(2447, 7),
98 CHAN(2452, 8),
99 CHAN(2457, 9),
100 CHAN(2462, 10),
101 CHAN(2467, 11),
102 CHAN(2472, 12),
103 CHAN(2484, 13),
104};
105
106static struct ieee80211_channel ar9170_5ghz_chantable[] = {
107 CHAN(4920, 14),
108 CHAN(4940, 15),
109 CHAN(4960, 16),
110 CHAN(4980, 17),
111 CHAN(5040, 18),
112 CHAN(5060, 19),
113 CHAN(5080, 20),
114 CHAN(5180, 21),
115 CHAN(5200, 22),
116 CHAN(5220, 23),
117 CHAN(5240, 24),
118 CHAN(5260, 25),
119 CHAN(5280, 26),
120 CHAN(5300, 27),
121 CHAN(5320, 28),
122 CHAN(5500, 29),
123 CHAN(5520, 30),
124 CHAN(5540, 31),
125 CHAN(5560, 32),
126 CHAN(5580, 33),
127 CHAN(5600, 34),
128 CHAN(5620, 35),
129 CHAN(5640, 36),
130 CHAN(5660, 37),
131 CHAN(5680, 38),
132 CHAN(5700, 39),
133 CHAN(5745, 40),
134 CHAN(5765, 41),
135 CHAN(5785, 42),
136 CHAN(5805, 43),
137 CHAN(5825, 44),
138 CHAN(5170, 45),
139 CHAN(5190, 46),
140 CHAN(5210, 47),
141 CHAN(5230, 48),
142};
143#undef CHAN
144
145static struct ieee80211_supported_band ar9170_band_2GHz = {
146 .channels = ar9170_2ghz_chantable,
147 .n_channels = ARRAY_SIZE(ar9170_2ghz_chantable),
148 .bitrates = ar9170_g_ratetable,
149 .n_bitrates = ar9170_g_ratetable_size,
150};
151
152#ifdef AR9170_QUEUE_DEBUG
153/*
154 * In case some wants works with AR9170's crazy tx_status queueing techniques.
155 * He might need this rather useful probing function.
156 *
157 * NOTE: caller must hold the queue's spinlock!
158 */
159
160static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb)
161{
162 struct ar9170_tx_control *txc = (void *) skb->data;
163 struct ieee80211_hdr *hdr = (void *)txc->frame_data;
164
165 printk(KERN_DEBUG "%s: => FRAME [skb:%p, queue:%d, DA:[%pM] "
166 "mac_control:%04x, phy_control:%08x]\n",
167 wiphy_name(ar->hw->wiphy), skb, skb_get_queue_mapping(skb),
168 ieee80211_get_DA(hdr), le16_to_cpu(txc->mac_control),
169 le32_to_cpu(txc->phy_control));
170}
171
172static void ar9170_dump_station_tx_status_queue(struct ar9170 *ar,
173 struct sk_buff_head *queue)
174{
175 struct sk_buff *skb;
176 int i = 0;
177
178 printk(KERN_DEBUG "---[ cut here ]---\n");
179 printk(KERN_DEBUG "%s: %d entries in tx_status queue.\n",
180 wiphy_name(ar->hw->wiphy), skb_queue_len(queue));
181
182 skb_queue_walk(queue, skb) {
183 struct ar9170_tx_control *txc = (void *) skb->data;
184 struct ieee80211_hdr *hdr = (void *)txc->frame_data;
185
186 printk(KERN_DEBUG "index:%d => \n", i);
187 ar9170_print_txheader(ar, skb);
188 }
189 printk(KERN_DEBUG "---[ end ]---\n");
190}
191#endif /* AR9170_QUEUE_DEBUG */
192
193static struct ieee80211_supported_band ar9170_band_5GHz = {
194 .channels = ar9170_5ghz_chantable,
195 .n_channels = ARRAY_SIZE(ar9170_5ghz_chantable),
196 .bitrates = ar9170_a_ratetable,
197 .n_bitrates = ar9170_a_ratetable_size,
198};
199
200void ar9170_handle_tx_status(struct ar9170 *ar, struct sk_buff *skb,
201 bool valid_status, u16 tx_status)
202{
203 struct ieee80211_tx_info *txinfo;
204 unsigned int retries = 0, queue = skb_get_queue_mapping(skb);
205 unsigned long flags;
206
207 spin_lock_irqsave(&ar->tx_stats_lock, flags);
208 ar->tx_stats[queue].len--;
209 if (ieee80211_queue_stopped(ar->hw, queue))
210 ieee80211_wake_queue(ar->hw, queue);
211 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
212
213 txinfo = IEEE80211_SKB_CB(skb);
214 ieee80211_tx_info_clear_status(txinfo);
215
216 switch (tx_status) {
217 case AR9170_TX_STATUS_RETRY:
218 retries = 2;
219 case AR9170_TX_STATUS_COMPLETE:
220 txinfo->flags |= IEEE80211_TX_STAT_ACK;
221 break;
222
223 case AR9170_TX_STATUS_FAILED:
224 retries = ar->hw->conf.long_frame_max_tx_count;
225 break;
226
227 default:
228 printk(KERN_ERR "%s: invalid tx_status response (%x).\n",
229 wiphy_name(ar->hw->wiphy), tx_status);
230 break;
231 }
232
233 if (valid_status)
234 txinfo->status.rates[0].count = retries + 1;
235
236 skb_pull(skb, sizeof(struct ar9170_tx_control));
237 ieee80211_tx_status_irqsafe(ar->hw, skb);
238}
239
240static struct sk_buff *ar9170_find_skb_in_queue(struct ar9170 *ar,
241 const u8 *mac,
242 const u32 queue,
243 struct sk_buff_head *q)
244{
245 unsigned long flags;
246 struct sk_buff *skb;
247
248 spin_lock_irqsave(&q->lock, flags);
249 skb_queue_walk(q, skb) {
250 struct ar9170_tx_control *txc = (void *) skb->data;
251 struct ieee80211_hdr *hdr = (void *) txc->frame_data;
252 u32 txc_queue = (le32_to_cpu(txc->phy_control) &
253 AR9170_TX_PHY_QOS_MASK) >>
254 AR9170_TX_PHY_QOS_SHIFT;
255
256 if ((queue != txc_queue) ||
257 (compare_ether_addr(ieee80211_get_DA(hdr), mac)))
258 continue;
259
260 __skb_unlink(skb, q);
261 spin_unlock_irqrestore(&q->lock, flags);
262 return skb;
263 }
264 spin_unlock_irqrestore(&q->lock, flags);
265 return NULL;
266}
267
268static struct sk_buff *ar9170_find_queued_skb(struct ar9170 *ar, const u8 *mac,
269 const u32 queue)
270{
271 struct ieee80211_sta *sta;
272 struct sk_buff *skb;
273
274 /*
275 * Unfortunately, the firmware does not tell to which (queued) frame
276 * this transmission status report belongs to.
277 *
278 * So we have to make risky guesses - with the scarce information
279 * the firmware provided (-> destination MAC, and phy_control) -
280 * and hope that we picked the right one...
281 */
282 rcu_read_lock();
283 sta = ieee80211_find_sta(ar->hw, mac);
284
285 if (likely(sta)) {
286 struct ar9170_sta_info *sta_priv = (void *) sta->drv_priv;
287 skb = skb_dequeue(&sta_priv->tx_status[queue]);
288 rcu_read_unlock();
289 if (likely(skb))
290 return skb;
291 } else
292 rcu_read_unlock();
293
294 /* scan the waste queue for candidates */
295 skb = ar9170_find_skb_in_queue(ar, mac, queue,
296 &ar->global_tx_status_waste);
297 if (!skb) {
298 /* so it still _must_ be in the global list. */
299 skb = ar9170_find_skb_in_queue(ar, mac, queue,
300 &ar->global_tx_status);
301 }
302
303#ifdef AR9170_QUEUE_DEBUG
304 if (unlikely((!skb) && net_ratelimit())) {
305 printk(KERN_ERR "%s: ESS:[%pM] does not have any "
306 "outstanding frames in this queue (%d).\n",
307 wiphy_name(ar->hw->wiphy), mac, queue);
308 }
309#endif /* AR9170_QUEUE_DEBUG */
310 return skb;
311}
312
313/*
314 * This worker tries to keep the global tx_status queue empty.
315 * So we can guarantee that incoming tx_status reports for
316 * unregistered stations are always synced with the actual
317 * frame - which we think - belongs to.
318 */
319
320static void ar9170_tx_status_janitor(struct work_struct *work)
321{
322 struct ar9170 *ar = container_of(work, struct ar9170,
323 tx_status_janitor.work);
324 struct sk_buff *skb;
325
326 if (unlikely(!IS_STARTED(ar)))
327 return ;
328
329 mutex_lock(&ar->mutex);
330 /* recycle the garbage back to mac80211... one by one. */
331 while ((skb = skb_dequeue(&ar->global_tx_status_waste))) {
332#ifdef AR9170_QUEUE_DEBUG
333 printk(KERN_DEBUG "%s: dispose queued frame =>\n",
334 wiphy_name(ar->hw->wiphy));
335 ar9170_print_txheader(ar, skb);
336#endif /* AR9170_QUEUE_DEBUG */
337 ar9170_handle_tx_status(ar, skb, false,
338 AR9170_TX_STATUS_FAILED);
339 }
340
341 while ((skb = skb_dequeue(&ar->global_tx_status))) {
342#ifdef AR9170_QUEUE_DEBUG
343 printk(KERN_DEBUG "%s: moving frame into waste queue =>\n",
344 wiphy_name(ar->hw->wiphy));
345
346 ar9170_print_txheader(ar, skb);
347#endif /* AR9170_QUEUE_DEBUG */
348 skb_queue_tail(&ar->global_tx_status_waste, skb);
349 }
350
351 /* recall the janitor in 100ms - if there's garbage in the can. */
352 if (skb_queue_len(&ar->global_tx_status_waste) > 0)
353 queue_delayed_work(ar->hw->workqueue, &ar->tx_status_janitor,
354 msecs_to_jiffies(100));
355
356 mutex_unlock(&ar->mutex);
357}
358
359static void ar9170_handle_command_response(struct ar9170 *ar,
360 void *buf, u32 len)
361{
362 struct ar9170_cmd_response *cmd = (void *) buf;
363
364 if ((cmd->type & 0xc0) != 0xc0) {
365 ar->callback_cmd(ar, len, buf);
366 return;
367 }
368
369 /* hardware event handlers */
370 switch (cmd->type) {
371 case 0xc1: {
372 /*
373 * TX status notification:
374 * bytes: 0c c1 XX YY M1 M2 M3 M4 M5 M6 R4 R3 R2 R1 S2 S1
375 *
376 * XX always 81
377 * YY always 00
378 * M1-M6 is the MAC address
379 * R1-R4 is the transmit rate
380 * S1-S2 is the transmit status
381 */
382
383 struct sk_buff *skb;
384 u32 queue = (le32_to_cpu(cmd->tx_status.rate) &
385 AR9170_TX_PHY_QOS_MASK) >> AR9170_TX_PHY_QOS_SHIFT;
386
387 skb = ar9170_find_queued_skb(ar, cmd->tx_status.dst, queue);
388 if (unlikely(!skb))
389 return ;
390
391 ar9170_handle_tx_status(ar, skb, true,
392 le16_to_cpu(cmd->tx_status.status));
393 break;
394 }
395
396 case 0xc0:
397 /*
398 * pre-TBTT event
399 */
400 if (ar->vif && ar->vif->type == NL80211_IFTYPE_AP)
401 queue_work(ar->hw->workqueue, &ar->beacon_work);
402 break;
403
404 case 0xc2:
405 /*
406 * (IBSS) beacon send notification
407 * bytes: 04 c2 XX YY B4 B3 B2 B1
408 *
409 * XX always 80
410 * YY always 00
411 * B1-B4 "should" be the number of send out beacons.
412 */
413 break;
414
415 case 0xc3:
416 /* End of Atim Window */
417 break;
418
419 case 0xc4:
420 case 0xc5:
421 /* BlockACK events */
422 break;
423
424 case 0xc6:
425 /* Watchdog Interrupt */
426 break;
427
428 case 0xc9:
429 /* retransmission issue / SIFS/EIFS collision ?! */
430 break;
431
432 default:
433 printk(KERN_INFO "received unhandled event %x\n", cmd->type);
434 print_hex_dump_bytes("dump:", DUMP_PREFIX_NONE, buf, len);
435 break;
436 }
437}
438
439/*
440 * If the frame alignment is right (or the kernel has
441 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS), and there
442 * is only a single MPDU in the USB frame, then we can
443 * submit to mac80211 the SKB directly. However, since
444 * there may be multiple packets in one SKB in stream
445 * mode, and we need to observe the proper ordering,
446 * this is non-trivial.
447 */
448static void ar9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
449{
450 struct sk_buff *skb;
451 struct ar9170_rx_head *head = (void *)buf;
452 struct ar9170_rx_tail *tail;
453 struct ieee80211_rx_status status;
454 int mpdu_len, i;
455 u8 error, antennas = 0, decrypt;
456 __le16 fc;
457 int reserved;
458
459 if (unlikely(!IS_STARTED(ar)))
460 return ;
461
462 /* Received MPDU */
463 mpdu_len = len;
464 mpdu_len -= sizeof(struct ar9170_rx_head);
465 mpdu_len -= sizeof(struct ar9170_rx_tail);
466 BUILD_BUG_ON(sizeof(struct ar9170_rx_head) != 12);
467 BUILD_BUG_ON(sizeof(struct ar9170_rx_tail) != 24);
468
469 if (mpdu_len <= FCS_LEN)
470 return;
471
472 tail = (void *)(buf + sizeof(struct ar9170_rx_head) + mpdu_len);
473
474 for (i = 0; i < 3; i++)
475 if (tail->rssi[i] != 0x80)
476 antennas |= BIT(i);
477
478 /* post-process RSSI */
479 for (i = 0; i < 7; i++)
480 if (tail->rssi[i] & 0x80)
481 tail->rssi[i] = ((tail->rssi[i] & 0x7f) + 1) & 0x7f;
482
483 memset(&status, 0, sizeof(status));
484
485 status.band = ar->channel->band;
486 status.freq = ar->channel->center_freq;
487 status.signal = ar->noise[0] + tail->rssi_combined;
488 status.noise = ar->noise[0];
489 status.antenna = antennas;
490
491 switch (tail->status & AR9170_RX_STATUS_MODULATION_MASK) {
492 case AR9170_RX_STATUS_MODULATION_CCK:
493 if (tail->status & AR9170_RX_STATUS_SHORT_PREAMBLE)
494 status.flag |= RX_FLAG_SHORTPRE;
495 switch (head->plcp[0]) {
496 case 0x0a:
497 status.rate_idx = 0;
498 break;
499 case 0x14:
500 status.rate_idx = 1;
501 break;
502 case 0x37:
503 status.rate_idx = 2;
504 break;
505 case 0x6e:
506 status.rate_idx = 3;
507 break;
508 default:
509 if ((!ar->sniffer_enabled) && (net_ratelimit()))
510 printk(KERN_ERR "%s: invalid plcp cck rate "
511 "(%x).\n", wiphy_name(ar->hw->wiphy),
512 head->plcp[0]);
513 return;
514 }
515 break;
516 case AR9170_RX_STATUS_MODULATION_OFDM:
517 switch (head->plcp[0] & 0xF) {
518 case 0xB:
519 status.rate_idx = 0;
520 break;
521 case 0xF:
522 status.rate_idx = 1;
523 break;
524 case 0xA:
525 status.rate_idx = 2;
526 break;
527 case 0xE:
528 status.rate_idx = 3;
529 break;
530 case 0x9:
531 status.rate_idx = 4;
532 break;
533 case 0xD:
534 status.rate_idx = 5;
535 break;
536 case 0x8:
537 status.rate_idx = 6;
538 break;
539 case 0xC:
540 status.rate_idx = 7;
541 break;
542 default:
543 if ((!ar->sniffer_enabled) && (net_ratelimit()))
544 printk(KERN_ERR "%s: invalid plcp ofdm rate "
545 "(%x).\n", wiphy_name(ar->hw->wiphy),
546 head->plcp[0]);
547 return;
548 }
549 if (status.band == IEEE80211_BAND_2GHZ)
550 status.rate_idx += 4;
551 break;
552 case AR9170_RX_STATUS_MODULATION_HT:
553 case AR9170_RX_STATUS_MODULATION_DUPOFDM:
554 /* XXX */
555
556 if (net_ratelimit())
557 printk(KERN_ERR "%s: invalid modulation\n",
558 wiphy_name(ar->hw->wiphy));
559 return;
560 }
561
562 error = tail->error;
563
564 if (error & AR9170_RX_ERROR_MMIC) {
565 status.flag |= RX_FLAG_MMIC_ERROR;
566 error &= ~AR9170_RX_ERROR_MMIC;
567 }
568
569 if (error & AR9170_RX_ERROR_PLCP) {
570 status.flag |= RX_FLAG_FAILED_PLCP_CRC;
571 error &= ~AR9170_RX_ERROR_PLCP;
572 }
573
574 if (error & AR9170_RX_ERROR_FCS) {
575 status.flag |= RX_FLAG_FAILED_FCS_CRC;
576 error &= ~AR9170_RX_ERROR_FCS;
577 }
578
579 decrypt = ar9170_get_decrypt_type(tail);
580 if (!(decrypt & AR9170_RX_ENC_SOFTWARE) &&
581 decrypt != AR9170_ENC_ALG_NONE)
582 status.flag |= RX_FLAG_DECRYPTED;
583
584 /* ignore wrong RA errors */
585 error &= ~AR9170_RX_ERROR_WRONG_RA;
586
587 if (error & AR9170_RX_ERROR_DECRYPT) {
588 error &= ~AR9170_RX_ERROR_DECRYPT;
589
590 /*
591 * Rx decryption is done in place,
592 * the original data is lost anyway.
593 */
594 return ;
595 }
596
597 /* drop any other error frames */
598 if ((error) && (net_ratelimit())) {
599 printk(KERN_DEBUG "%s: errors: %#x\n",
600 wiphy_name(ar->hw->wiphy), error);
601 return;
602 }
603
604 buf += sizeof(struct ar9170_rx_head);
605 fc = *(__le16 *)buf;
606
607 if (ieee80211_is_data_qos(fc) ^ ieee80211_has_a4(fc))
608 reserved = 32 + 2;
609 else
610 reserved = 32;
611
612 skb = dev_alloc_skb(mpdu_len + reserved);
613 if (!skb)
614 return;
615
616 skb_reserve(skb, reserved);
617 memcpy(skb_put(skb, mpdu_len), buf, mpdu_len);
618 ieee80211_rx_irqsafe(ar->hw, skb, &status);
619}
620
621void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb)
622{
623 unsigned int i, tlen, resplen;
624 u8 *tbuf, *respbuf;
625
626 tbuf = skb->data;
627 tlen = skb->len;
628
629 while (tlen >= 4) {
630 int clen = tbuf[1] << 8 | tbuf[0];
631 int wlen = (clen + 3) & ~3;
632
633 /*
634 * parse stream (if any)
635 */
636 if (tbuf[2] != 0 || tbuf[3] != 0x4e) {
637 printk(KERN_ERR "%s: missing tag!\n",
638 wiphy_name(ar->hw->wiphy));
639 return ;
640 }
641 if (wlen > tlen - 4) {
642 printk(KERN_ERR "%s: invalid RX (%d, %d, %d)\n",
643 wiphy_name(ar->hw->wiphy), clen, wlen, tlen);
644 print_hex_dump(KERN_DEBUG, "data: ",
645 DUMP_PREFIX_OFFSET,
646 16, 1, tbuf, tlen, true);
647 return ;
648 }
649 resplen = clen;
650 respbuf = tbuf + 4;
651 tbuf += wlen + 4;
652 tlen -= wlen + 4;
653
654 i = 0;
655
656 /* weird thing, but this is the same in the original driver */
657 while (resplen > 2 && i < 12 &&
658 respbuf[0] == 0xff && respbuf[1] == 0xff) {
659 i += 2;
660 resplen -= 2;
661 respbuf += 2;
662 }
663
664 if (resplen < 4)
665 continue;
666
667 /* found the 6 * 0xffff marker? */
668 if (i == 12)
669 ar9170_handle_command_response(ar, respbuf, resplen);
670 else
671 ar9170_handle_mpdu(ar, respbuf, resplen);
672 }
673
674 if (tlen)
675 printk(KERN_ERR "%s: buffer remains!\n",
676 wiphy_name(ar->hw->wiphy));
677}
678
679#define AR9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop) \
680do { \
681 queue.aifs = ai_fs; \
682 queue.cw_min = cwmin; \
683 queue.cw_max = cwmax; \
684 queue.txop = _txop; \
685} while (0)
686
687static int ar9170_op_start(struct ieee80211_hw *hw)
688{
689 struct ar9170 *ar = hw->priv;
690 int err, i;
691
692 mutex_lock(&ar->mutex);
693
694 /* reinitialize queues statistics */
695 memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
696 for (i = 0; i < ARRAY_SIZE(ar->tx_stats); i++)
697 ar->tx_stats[i].limit = 8;
698
699 /* reset QoS defaults */
700 AR9170_FILL_QUEUE(ar->edcf[0], 3, 15, 1023, 0); /* BEST EFFORT*/
701 AR9170_FILL_QUEUE(ar->edcf[1], 7, 15, 1023, 0); /* BACKGROUND */
702 AR9170_FILL_QUEUE(ar->edcf[2], 2, 7, 15, 94); /* VIDEO */
703 AR9170_FILL_QUEUE(ar->edcf[3], 2, 3, 7, 47); /* VOICE */
704 AR9170_FILL_QUEUE(ar->edcf[4], 2, 3, 7, 0); /* SPECIAL */
705
706 err = ar->open(ar);
707 if (err)
708 goto out;
709
710 err = ar9170_init_mac(ar);
711 if (err)
712 goto out;
713
714 err = ar9170_set_qos(ar);
715 if (err)
716 goto out;
717
718 err = ar9170_init_phy(ar, IEEE80211_BAND_2GHZ);
719 if (err)
720 goto out;
721
722 err = ar9170_init_rf(ar);
723 if (err)
724 goto out;
725
726 /* start DMA */
727 err = ar9170_write_reg(ar, 0x1c3d30, 0x100);
728 if (err)
729 goto out;
730
731 ar->state = AR9170_STARTED;
732
733out:
734 mutex_unlock(&ar->mutex);
735 return err;
736}
737
738static void ar9170_op_stop(struct ieee80211_hw *hw)
739{
740 struct ar9170 *ar = hw->priv;
741
742 if (IS_STARTED(ar))
743 ar->state = AR9170_IDLE;
744
745 mutex_lock(&ar->mutex);
746
747 cancel_delayed_work_sync(&ar->tx_status_janitor);
748 cancel_work_sync(&ar->filter_config_work);
749 cancel_work_sync(&ar->beacon_work);
750 skb_queue_purge(&ar->global_tx_status_waste);
751 skb_queue_purge(&ar->global_tx_status);
752
753 if (IS_ACCEPTING_CMD(ar)) {
754 ar9170_set_leds_state(ar, 0);
755
756 /* stop DMA */
757 ar9170_write_reg(ar, 0x1c3d30, 0);
758 ar->stop(ar);
759 }
760
761 mutex_unlock(&ar->mutex);
762}
763
764int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
765{
766 struct ar9170 *ar = hw->priv;
767 struct ieee80211_hdr *hdr;
768 struct ar9170_tx_control *txc;
769 struct ieee80211_tx_info *info;
770 struct ieee80211_rate *rate = NULL;
771 struct ieee80211_tx_rate *txrate;
772 unsigned int queue = skb_get_queue_mapping(skb);
773 unsigned long flags = 0;
774 struct ar9170_sta_info *sta_info = NULL;
775 u32 power, chains;
776 u16 keytype = 0;
777 u16 len, icv = 0;
778 int err;
779 bool tx_status;
780
781 if (unlikely(!IS_STARTED(ar)))
782 goto err_free;
783
784 hdr = (void *)skb->data;
785 info = IEEE80211_SKB_CB(skb);
786 len = skb->len;
787
788 spin_lock_irqsave(&ar->tx_stats_lock, flags);
789 if (ar->tx_stats[queue].limit < ar->tx_stats[queue].len) {
790 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
791 return NETDEV_TX_OK;
792 }
793
794 ar->tx_stats[queue].len++;
795 ar->tx_stats[queue].count++;
796 if (ar->tx_stats[queue].limit == ar->tx_stats[queue].len)
797 ieee80211_stop_queue(hw, queue);
798
799 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
800
801 txc = (void *)skb_push(skb, sizeof(*txc));
802
803 tx_status = (((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) != 0) ||
804 ((info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) != 0));
805
806 if (info->control.hw_key) {
807 icv = info->control.hw_key->icv_len;
808
809 switch (info->control.hw_key->alg) {
810 case ALG_WEP:
811 keytype = AR9170_TX_MAC_ENCR_RC4;
812 break;
813 case ALG_TKIP:
814 keytype = AR9170_TX_MAC_ENCR_RC4;
815 break;
816 case ALG_CCMP:
817 keytype = AR9170_TX_MAC_ENCR_AES;
818 break;
819 default:
820 WARN_ON(1);
821 goto err_dequeue;
822 }
823 }
824
825 /* Length */
826 txc->length = cpu_to_le16(len + icv + 4);
827
828 txc->mac_control = cpu_to_le16(AR9170_TX_MAC_HW_DURATION |
829 AR9170_TX_MAC_BACKOFF);
830 txc->mac_control |= cpu_to_le16(ar9170_qos_hwmap[queue] <<
831 AR9170_TX_MAC_QOS_SHIFT);
832 txc->mac_control |= cpu_to_le16(keytype);
833 txc->phy_control = cpu_to_le32(0);
834
835 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
836 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_NO_ACK);
837
838 if (info->flags & IEEE80211_TX_CTL_AMPDU)
839 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
840
841 txrate = &info->control.rates[0];
842
843 if (txrate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
844 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
845 else if (txrate->flags & IEEE80211_TX_RC_USE_RTS_CTS)
846 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
847
848 if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
849 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD);
850
851 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
852 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_SHORT_PREAMBLE);
853
854 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
855 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ);
856 /* this works because 40 MHz is 2 and dup is 3 */
857 if (txrate->flags & IEEE80211_TX_RC_DUP_DATA)
858 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ_DUP);
859
860 if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
861 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_SHORT_GI);
862
863 if (txrate->flags & IEEE80211_TX_RC_MCS) {
864 u32 r = txrate->idx;
865 u8 *txpower;
866
867 r <<= AR9170_TX_PHY_MCS_SHIFT;
868 if (WARN_ON(r & ~AR9170_TX_PHY_MCS_MASK))
869 goto err_dequeue;
870 txc->phy_control |= cpu_to_le32(r & AR9170_TX_PHY_MCS_MASK);
871 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_MOD_HT);
872
873 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
874 if (info->band == IEEE80211_BAND_5GHZ)
875 txpower = ar->power_5G_ht40;
876 else
877 txpower = ar->power_2G_ht40;
878 } else {
879 if (info->band == IEEE80211_BAND_5GHZ)
880 txpower = ar->power_5G_ht20;
881 else
882 txpower = ar->power_2G_ht20;
883 }
884
885 power = txpower[(txrate->idx) & 7];
886 } else {
887 u8 *txpower;
888 u32 mod;
889 u32 phyrate;
890 u8 idx = txrate->idx;
891
892 if (info->band != IEEE80211_BAND_2GHZ) {
893 idx += 4;
894 txpower = ar->power_5G_leg;
895 mod = AR9170_TX_PHY_MOD_OFDM;
896 } else {
897 if (idx < 4) {
898 txpower = ar->power_2G_cck;
899 mod = AR9170_TX_PHY_MOD_CCK;
900 } else {
901 mod = AR9170_TX_PHY_MOD_OFDM;
902 txpower = ar->power_2G_ofdm;
903 }
904 }
905
906 rate = &__ar9170_ratetable[idx];
907
908 phyrate = rate->hw_value & 0xF;
909 power = txpower[(rate->hw_value & 0x30) >> 4];
910 phyrate <<= AR9170_TX_PHY_MCS_SHIFT;
911
912 txc->phy_control |= cpu_to_le32(mod);
913 txc->phy_control |= cpu_to_le32(phyrate);
914 }
915
916 power <<= AR9170_TX_PHY_TX_PWR_SHIFT;
917 power &= AR9170_TX_PHY_TX_PWR_MASK;
918 txc->phy_control |= cpu_to_le32(power);
919
920 /* set TX chains */
921 if (ar->eeprom.tx_mask == 1) {
922 chains = AR9170_TX_PHY_TXCHAIN_1;
923 } else {
924 chains = AR9170_TX_PHY_TXCHAIN_2;
925
926 /* >= 36M legacy OFDM - use only one chain */
927 if (rate && rate->bitrate >= 360)
928 chains = AR9170_TX_PHY_TXCHAIN_1;
929 }
930 txc->phy_control |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_SHIFT);
931
932 if (tx_status) {
933 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
934 /*
935 * WARNING:
936 * Putting the QoS queue bits into an unexplored territory is
937 * certainly not elegant.
938 *
939 * In my defense: This idea provides a reasonable way to
940 * smuggle valuable information to the tx_status callback.
941 * Also, the idea behind this bit-abuse came straight from
942 * the original driver code.
943 */
944
945 txc->phy_control |=
946 cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT);
947
948 if (info->control.sta) {
949 sta_info = (void *) info->control.sta->drv_priv;
950 skb_queue_tail(&sta_info->tx_status[queue], skb);
951 } else {
952 skb_queue_tail(&ar->global_tx_status, skb);
953
954 queue_delayed_work(ar->hw->workqueue,
955 &ar->tx_status_janitor,
956 msecs_to_jiffies(100));
957 }
958 }
959
960 err = ar->tx(ar, skb, tx_status, 0);
961 if (unlikely(tx_status && err)) {
962 if (info->control.sta)
963 skb_unlink(skb, &sta_info->tx_status[queue]);
964 else
965 skb_unlink(skb, &ar->global_tx_status);
966 }
967
968 return NETDEV_TX_OK;
969
970err_dequeue:
971 spin_lock_irqsave(&ar->tx_stats_lock, flags);
972 ar->tx_stats[queue].len--;
973 ar->tx_stats[queue].count--;
974 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
975
976err_free:
977 dev_kfree_skb(skb);
978 return NETDEV_TX_OK;
979}
980
981static int ar9170_op_add_interface(struct ieee80211_hw *hw,
982 struct ieee80211_if_init_conf *conf)
983{
984 struct ar9170 *ar = hw->priv;
985 int err = 0;
986
987 mutex_lock(&ar->mutex);
988
989 if (ar->vif) {
990 err = -EBUSY;
991 goto unlock;
992 }
993
994 ar->vif = conf->vif;
995 memcpy(ar->mac_addr, conf->mac_addr, ETH_ALEN);
996
997 if (modparam_nohwcrypt || (ar->vif->type != NL80211_IFTYPE_STATION)) {
998 ar->rx_software_decryption = true;
999 ar->disable_offload = true;
1000 }
1001
1002 ar->cur_filter = 0;
1003 ar->want_filter = AR9170_MAC_REG_FTF_DEFAULTS;
1004 err = ar9170_update_frame_filter(ar);
1005 if (err)
1006 goto unlock;
1007
1008 err = ar9170_set_operating_mode(ar);
1009
1010unlock:
1011 mutex_unlock(&ar->mutex);
1012 return err;
1013}
1014
1015static void ar9170_op_remove_interface(struct ieee80211_hw *hw,
1016 struct ieee80211_if_init_conf *conf)
1017{
1018 struct ar9170 *ar = hw->priv;
1019
1020 mutex_lock(&ar->mutex);
1021 ar->vif = NULL;
1022 ar->want_filter = 0;
1023 ar9170_update_frame_filter(ar);
1024 ar9170_set_beacon_timers(ar);
1025 dev_kfree_skb(ar->beacon);
1026 ar->beacon = NULL;
1027 ar->sniffer_enabled = false;
1028 ar->rx_software_decryption = false;
1029 ar9170_set_operating_mode(ar);
1030 mutex_unlock(&ar->mutex);
1031}
1032
1033static int ar9170_op_config(struct ieee80211_hw *hw, u32 changed)
1034{
1035 struct ar9170 *ar = hw->priv;
1036 int err = 0;
1037
1038 mutex_lock(&ar->mutex);
1039
1040 if (changed & IEEE80211_CONF_CHANGE_RADIO_ENABLED) {
1041 /* TODO */
1042 err = 0;
1043 }
1044
1045 if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
1046 /* TODO */
1047 err = 0;
1048 }
1049
1050 if (changed & IEEE80211_CONF_CHANGE_PS) {
1051 /* TODO */
1052 err = 0;
1053 }
1054
1055 if (changed & IEEE80211_CONF_CHANGE_POWER) {
1056 /* TODO */
1057 err = 0;
1058 }
1059
1060 if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
1061 /*
1062 * is it long_frame_max_tx_count or short_frame_max_tx_count?
1063 */
1064
1065 err = ar9170_set_hwretry_limit(ar,
1066 ar->hw->conf.long_frame_max_tx_count);
1067 if (err)
1068 goto out;
1069 }
1070
1071 if (changed & IEEE80211_CONF_CHANGE_BEACON_INTERVAL) {
1072 err = ar9170_set_beacon_timers(ar);
1073 if (err)
1074 goto out;
1075 }
1076
1077 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
1078 err = ar9170_set_channel(ar, hw->conf.channel,
1079 AR9170_RFI_NONE, AR9170_BW_20);
1080 if (err)
1081 goto out;
1082 /* adjust slot time for 5 GHz */
1083 if (hw->conf.channel->band == IEEE80211_BAND_5GHZ)
1084 err = ar9170_write_reg(ar, AR9170_MAC_REG_SLOT_TIME,
1085 9 << 10);
1086 }
1087
1088out:
1089 mutex_unlock(&ar->mutex);
1090 return err;
1091}
1092
1093static int ar9170_op_config_interface(struct ieee80211_hw *hw,
1094 struct ieee80211_vif *vif,
1095 struct ieee80211_if_conf *conf)
1096{
1097 struct ar9170 *ar = hw->priv;
1098 int err = 0;
1099
1100 mutex_lock(&ar->mutex);
1101
1102 if (conf->changed & IEEE80211_IFCC_BSSID) {
1103 memcpy(ar->bssid, conf->bssid, ETH_ALEN);
1104 err = ar9170_set_operating_mode(ar);
1105 }
1106
1107 if (conf->changed & IEEE80211_IFCC_BEACON) {
1108 err = ar9170_update_beacon(ar);
1109
1110 if (err)
1111 goto out;
1112 err = ar9170_set_beacon_timers(ar);
1113 }
1114
1115out:
1116 mutex_unlock(&ar->mutex);
1117 return err;
1118}
1119
1120static void ar9170_set_filters(struct work_struct *work)
1121{
1122 struct ar9170 *ar = container_of(work, struct ar9170,
1123 filter_config_work);
1124 int err;
1125
1126 mutex_lock(&ar->mutex);
1127 if (unlikely(!IS_STARTED(ar)))
1128 goto unlock;
1129
1130 if (ar->filter_changed & AR9170_FILTER_CHANGED_PROMISC) {
1131 err = ar9170_set_operating_mode(ar);
1132 if (err)
1133 goto unlock;
1134 }
1135
1136 if (ar->filter_changed & AR9170_FILTER_CHANGED_MULTICAST) {
1137 err = ar9170_update_multicast(ar);
1138 if (err)
1139 goto unlock;
1140 }
1141
1142 if (ar->filter_changed & AR9170_FILTER_CHANGED_FRAMEFILTER)
1143 err = ar9170_update_frame_filter(ar);
1144
1145unlock:
1146 mutex_unlock(&ar->mutex);
1147}
1148
1149static void ar9170_op_configure_filter(struct ieee80211_hw *hw,
1150 unsigned int changed_flags,
1151 unsigned int *new_flags,
1152 int mc_count, struct dev_mc_list *mclist)
1153{
1154 struct ar9170 *ar = hw->priv;
1155
1156 /* mask supported flags */
1157 *new_flags &= FIF_ALLMULTI | FIF_CONTROL | FIF_BCN_PRBRESP_PROMISC |
1158 FIF_PROMISC_IN_BSS;
1159
1160 /*
1161 * We can support more by setting the sniffer bit and
1162 * then checking the error flags, later.
1163 */
1164
1165 if (changed_flags & FIF_ALLMULTI) {
1166 if (*new_flags & FIF_ALLMULTI) {
1167 ar->want_mc_hash = ~0ULL;
1168 } else {
1169 u64 mchash;
1170 int i;
1171
1172 /* always get broadcast frames */
1173 mchash = 1ULL << (0xff>>2);
1174
1175 for (i = 0; i < mc_count; i++) {
1176 if (WARN_ON(!mclist))
1177 break;
1178 mchash |= 1ULL << (mclist->dmi_addr[5] >> 2);
1179 mclist = mclist->next;
1180 }
1181 ar->want_mc_hash = mchash;
1182 }
1183 ar->filter_changed |= AR9170_FILTER_CHANGED_MULTICAST;
1184 }
1185
1186 if (changed_flags & FIF_CONTROL) {
1187 u32 filter = AR9170_MAC_REG_FTF_PSPOLL |
1188 AR9170_MAC_REG_FTF_RTS |
1189 AR9170_MAC_REG_FTF_CTS |
1190 AR9170_MAC_REG_FTF_ACK |
1191 AR9170_MAC_REG_FTF_CFE |
1192 AR9170_MAC_REG_FTF_CFE_ACK;
1193
1194 if (*new_flags & FIF_CONTROL)
1195 ar->want_filter = ar->cur_filter | filter;
1196 else
1197 ar->want_filter = ar->cur_filter & ~filter;
1198
1199 ar->filter_changed |= AR9170_FILTER_CHANGED_FRAMEFILTER;
1200 }
1201
1202 if (changed_flags & FIF_PROMISC_IN_BSS) {
1203 ar->sniffer_enabled = ((*new_flags) & FIF_PROMISC_IN_BSS) != 0;
1204 ar->filter_changed |= AR9170_FILTER_CHANGED_PROMISC;
1205 }
1206
1207 if (likely(IS_STARTED(ar)))
1208 queue_work(ar->hw->workqueue, &ar->filter_config_work);
1209}
1210
1211static void ar9170_op_bss_info_changed(struct ieee80211_hw *hw,
1212 struct ieee80211_vif *vif,
1213 struct ieee80211_bss_conf *bss_conf,
1214 u32 changed)
1215{
1216 struct ar9170 *ar = hw->priv;
1217 int err = 0;
1218
1219 mutex_lock(&ar->mutex);
1220
1221 ar9170_regwrite_begin(ar);
1222
1223 if (changed & BSS_CHANGED_ASSOC) {
1224 ar->state = bss_conf->assoc ? AR9170_ASSOCIATED : ar->state;
1225
1226#ifndef CONFIG_AR9170_LEDS
1227 /* enable assoc LED. */
1228 err = ar9170_set_leds_state(ar, bss_conf->assoc ? 2 : 0);
1229#endif /* CONFIG_AR9170_LEDS */
1230 }
1231
1232 if (changed & BSS_CHANGED_HT) {
1233 /* TODO */
1234 err = 0;
1235 }
1236
1237 if (changed & BSS_CHANGED_ERP_SLOT) {
1238 u32 slottime = 20;
1239
1240 if (bss_conf->use_short_slot)
1241 slottime = 9;
1242
1243 ar9170_regwrite(AR9170_MAC_REG_SLOT_TIME, slottime << 10);
1244 }
1245
1246 if (changed & BSS_CHANGED_BASIC_RATES) {
1247 u32 cck, ofdm;
1248
1249 if (hw->conf.channel->band == IEEE80211_BAND_5GHZ) {
1250 ofdm = bss_conf->basic_rates;
1251 cck = 0;
1252 } else {
1253 /* four cck rates */
1254 cck = bss_conf->basic_rates & 0xf;
1255 ofdm = bss_conf->basic_rates >> 4;
1256 }
1257 ar9170_regwrite(AR9170_MAC_REG_BASIC_RATE,
1258 ofdm << 8 | cck);
1259 }
1260
1261 ar9170_regwrite_finish();
1262 err = ar9170_regwrite_result();
1263 mutex_unlock(&ar->mutex);
1264}
1265
1266static u64 ar9170_op_get_tsf(struct ieee80211_hw *hw)
1267{
1268 struct ar9170 *ar = hw->priv;
1269 int err;
1270 u32 tsf_low;
1271 u32 tsf_high;
1272 u64 tsf;
1273
1274 mutex_lock(&ar->mutex);
1275 err = ar9170_read_reg(ar, AR9170_MAC_REG_TSF_L, &tsf_low);
1276 if (!err)
1277 err = ar9170_read_reg(ar, AR9170_MAC_REG_TSF_H, &tsf_high);
1278 mutex_unlock(&ar->mutex);
1279
1280 if (WARN_ON(err))
1281 return 0;
1282
1283 tsf = tsf_high;
1284 tsf = (tsf << 32) | tsf_low;
1285 return tsf;
1286}
1287
1288static int ar9170_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1289 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
1290 struct ieee80211_key_conf *key)
1291{
1292 struct ar9170 *ar = hw->priv;
1293 int err = 0, i;
1294 u8 ktype;
1295
1296 if ((!ar->vif) || (ar->disable_offload))
1297 return -EOPNOTSUPP;
1298
1299 switch (key->alg) {
1300 case ALG_WEP:
1301 if (key->keylen == LEN_WEP40)
1302 ktype = AR9170_ENC_ALG_WEP64;
1303 else
1304 ktype = AR9170_ENC_ALG_WEP128;
1305 break;
1306 case ALG_TKIP:
1307 ktype = AR9170_ENC_ALG_TKIP;
1308 break;
1309 case ALG_CCMP:
1310 ktype = AR9170_ENC_ALG_AESCCMP;
1311 break;
1312 default:
1313 return -EOPNOTSUPP;
1314 }
1315
1316 mutex_lock(&ar->mutex);
1317 if (cmd == SET_KEY) {
1318 if (unlikely(!IS_STARTED(ar))) {
1319 err = -EOPNOTSUPP;
1320 goto out;
1321 }
1322
1323 /* group keys need all-zeroes address */
1324 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1325 sta = NULL;
1326
1327 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
1328 for (i = 0; i < 64; i++)
1329 if (!(ar->usedkeys & BIT(i)))
1330 break;
1331 if (i == 64) {
1332 ar->rx_software_decryption = true;
1333 ar9170_set_operating_mode(ar);
1334 err = -ENOSPC;
1335 goto out;
1336 }
1337 } else {
1338 i = 64 + key->keyidx;
1339 }
1340
1341 key->hw_key_idx = i;
1342
1343 err = ar9170_upload_key(ar, i, sta ? sta->addr : NULL, ktype, 0,
1344 key->key, min_t(u8, 16, key->keylen));
1345 if (err)
1346 goto out;
1347
1348 if (key->alg == ALG_TKIP) {
1349 err = ar9170_upload_key(ar, i, sta ? sta->addr : NULL,
1350 ktype, 1, key->key + 16, 16);
1351 if (err)
1352 goto out;
1353
1354 /*
1355 * hardware is not capable generating the MMIC
1356 * for fragmented frames!
1357 */
1358 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1359 }
1360
1361 if (i < 64)
1362 ar->usedkeys |= BIT(i);
1363
1364 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1365 } else {
1366 if (unlikely(!IS_STARTED(ar))) {
1367 /* The device is gone... together with the key ;-) */
1368 err = 0;
1369 goto out;
1370 }
1371
1372 err = ar9170_disable_key(ar, key->hw_key_idx);
1373 if (err)
1374 goto out;
1375
1376 if (key->hw_key_idx < 64) {
1377 ar->usedkeys &= ~BIT(key->hw_key_idx);
1378 } else {
1379 err = ar9170_upload_key(ar, key->hw_key_idx, NULL,
1380 AR9170_ENC_ALG_NONE, 0,
1381 NULL, 0);
1382 if (err)
1383 goto out;
1384
1385 if (key->alg == ALG_TKIP) {
1386 err = ar9170_upload_key(ar, key->hw_key_idx,
1387 NULL,
1388 AR9170_ENC_ALG_NONE, 1,
1389 NULL, 0);
1390 if (err)
1391 goto out;
1392 }
1393
1394 }
1395 }
1396
1397 ar9170_regwrite_begin(ar);
1398 ar9170_regwrite(AR9170_MAC_REG_ROLL_CALL_TBL_L, ar->usedkeys);
1399 ar9170_regwrite(AR9170_MAC_REG_ROLL_CALL_TBL_H, ar->usedkeys >> 32);
1400 ar9170_regwrite_finish();
1401 err = ar9170_regwrite_result();
1402
1403out:
1404 mutex_unlock(&ar->mutex);
1405
1406 return err;
1407}
1408
1409static void ar9170_sta_notify(struct ieee80211_hw *hw,
1410 struct ieee80211_vif *vif,
1411 enum sta_notify_cmd cmd,
1412 struct ieee80211_sta *sta)
1413{
1414 struct ar9170 *ar = hw->priv;
1415 struct ar9170_sta_info *info = (void *) sta->drv_priv;
1416 struct sk_buff *skb;
1417 unsigned int i;
1418
1419 switch (cmd) {
1420 case STA_NOTIFY_ADD:
1421 for (i = 0; i < ar->hw->queues; i++)
1422 skb_queue_head_init(&info->tx_status[i]);
1423 break;
1424
1425 case STA_NOTIFY_REMOVE:
1426
1427 /*
1428 * transfer all outstanding frames that need a tx_status
1429 * reports to the global tx_status queue
1430 */
1431
1432 for (i = 0; i < ar->hw->queues; i++) {
1433 while ((skb = skb_dequeue(&info->tx_status[i]))) {
1434#ifdef AR9170_QUEUE_DEBUG
1435 printk(KERN_DEBUG "%s: queueing frame in "
1436 "global tx_status queue =>\n",
1437 wiphy_name(ar->hw->wiphy));
1438
1439 ar9170_print_txheader(ar, skb);
1440#endif /* AR9170_QUEUE_DEBUG */
1441 skb_queue_tail(&ar->global_tx_status, skb);
1442 }
1443 }
1444 queue_delayed_work(ar->hw->workqueue, &ar->tx_status_janitor,
1445 msecs_to_jiffies(100));
1446 break;
1447
1448 default:
1449 break;
1450 }
1451}
1452
1453static int ar9170_get_stats(struct ieee80211_hw *hw,
1454 struct ieee80211_low_level_stats *stats)
1455{
1456 struct ar9170 *ar = hw->priv;
1457 u32 val;
1458 int err;
1459
1460 mutex_lock(&ar->mutex);
1461 err = ar9170_read_reg(ar, AR9170_MAC_REG_TX_RETRY, &val);
1462 ar->stats.dot11ACKFailureCount += val;
1463
1464 memcpy(stats, &ar->stats, sizeof(*stats));
1465 mutex_unlock(&ar->mutex);
1466
1467 return 0;
1468}
1469
1470static int ar9170_get_tx_stats(struct ieee80211_hw *hw,
1471 struct ieee80211_tx_queue_stats *tx_stats)
1472{
1473 struct ar9170 *ar = hw->priv;
1474
1475 spin_lock_bh(&ar->tx_stats_lock);
1476 memcpy(tx_stats, ar->tx_stats, sizeof(tx_stats[0]) * hw->queues);
1477 spin_unlock_bh(&ar->tx_stats_lock);
1478
1479 return 0;
1480}
1481
1482static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue,
1483 const struct ieee80211_tx_queue_params *param)
1484{
1485 struct ar9170 *ar = hw->priv;
1486 int ret;
1487
1488 mutex_lock(&ar->mutex);
1489 if ((param) && !(queue > ar->hw->queues)) {
1490 memcpy(&ar->edcf[ar9170_qos_hwmap[queue]],
1491 param, sizeof(*param));
1492
1493 ret = ar9170_set_qos(ar);
1494 } else
1495 ret = -EINVAL;
1496
1497 mutex_unlock(&ar->mutex);
1498 return ret;
1499}
1500
1501static const struct ieee80211_ops ar9170_ops = {
1502 .start = ar9170_op_start,
1503 .stop = ar9170_op_stop,
1504 .tx = ar9170_op_tx,
1505 .add_interface = ar9170_op_add_interface,
1506 .remove_interface = ar9170_op_remove_interface,
1507 .config = ar9170_op_config,
1508 .config_interface = ar9170_op_config_interface,
1509 .configure_filter = ar9170_op_configure_filter,
1510 .conf_tx = ar9170_conf_tx,
1511 .bss_info_changed = ar9170_op_bss_info_changed,
1512 .get_tsf = ar9170_op_get_tsf,
1513 .set_key = ar9170_set_key,
1514 .sta_notify = ar9170_sta_notify,
1515 .get_stats = ar9170_get_stats,
1516 .get_tx_stats = ar9170_get_tx_stats,
1517};
1518
1519void *ar9170_alloc(size_t priv_size)
1520{
1521 struct ieee80211_hw *hw;
1522 struct ar9170 *ar;
1523 int i;
1524
1525 hw = ieee80211_alloc_hw(priv_size, &ar9170_ops);
1526 if (!hw)
1527 return ERR_PTR(-ENOMEM);
1528
1529 ar = hw->priv;
1530 ar->hw = hw;
1531
1532 mutex_init(&ar->mutex);
1533 spin_lock_init(&ar->cmdlock);
1534 spin_lock_init(&ar->tx_stats_lock);
1535 skb_queue_head_init(&ar->global_tx_status);
1536 skb_queue_head_init(&ar->global_tx_status_waste);
1537 INIT_WORK(&ar->filter_config_work, ar9170_set_filters);
1538 INIT_WORK(&ar->beacon_work, ar9170_new_beacon);
1539 INIT_DELAYED_WORK(&ar->tx_status_janitor, ar9170_tx_status_janitor);
1540
1541 /* all hw supports 2.4 GHz, so set channel to 1 by default */
1542 ar->channel = &ar9170_2ghz_chantable[0];
1543
1544 /* first part of wiphy init */
1545 ar->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
1546 BIT(NL80211_IFTYPE_WDS) |
1547 BIT(NL80211_IFTYPE_ADHOC);
1548 ar->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
1549 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1550 IEEE80211_HW_SIGNAL_DBM |
1551 IEEE80211_HW_NOISE_DBM;
1552
1553 ar->hw->queues = __AR9170_NUM_TXQ;
1554 ar->hw->extra_tx_headroom = 8;
1555 ar->hw->sta_data_size = sizeof(struct ar9170_sta_info);
1556
1557 ar->hw->max_rates = 1;
1558 ar->hw->max_rate_tries = 3;
1559
1560 for (i = 0; i < ARRAY_SIZE(ar->noise); i++)
1561 ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
1562
1563 return ar;
1564}
1565
1566static int ar9170_read_eeprom(struct ar9170 *ar)
1567{
1568#define RW 8 /* number of words to read at once */
1569#define RB (sizeof(u32) * RW)
1570 DECLARE_MAC_BUF(mbuf);
1571 u8 *eeprom = (void *)&ar->eeprom;
1572 u8 *addr = ar->eeprom.mac_address;
1573 __le32 offsets[RW];
1574 int i, j, err, bands = 0;
1575
1576 BUILD_BUG_ON(sizeof(ar->eeprom) & 3);
1577
1578 BUILD_BUG_ON(RB > AR9170_MAX_CMD_LEN - 4);
1579#ifndef __CHECKER__
1580 /* don't want to handle trailing remains */
1581 BUILD_BUG_ON(sizeof(ar->eeprom) % RB);
1582#endif
1583
1584 for (i = 0; i < sizeof(ar->eeprom)/RB; i++) {
1585 for (j = 0; j < RW; j++)
1586 offsets[j] = cpu_to_le32(AR9170_EEPROM_START +
1587 RB * i + 4 * j);
1588
1589 err = ar->exec_cmd(ar, AR9170_CMD_RREG,
1590 RB, (u8 *) &offsets,
1591 RB, eeprom + RB * i);
1592 if (err)
1593 return err;
1594 }
1595
1596#undef RW
1597#undef RB
1598
1599 if (ar->eeprom.length == cpu_to_le16(0xFFFF))
1600 return -ENODATA;
1601
1602 if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
1603 ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &ar9170_band_2GHz;
1604 bands++;
1605 }
1606 if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
1607 ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &ar9170_band_5GHz;
1608 bands++;
1609 }
1610 /*
1611 * I measured this, a bandswitch takes roughly
1612 * 135 ms and a frequency switch about 80.
1613 *
1614 * FIXME: measure these values again once EEPROM settings
1615 * are used, that will influence them!
1616 */
1617 if (bands == 2)
1618 ar->hw->channel_change_time = 135 * 1000;
1619 else
1620 ar->hw->channel_change_time = 80 * 1000;
1621
1622 /* second part of wiphy init */
1623 SET_IEEE80211_PERM_ADDR(ar->hw, addr);
1624
1625 return bands ? 0 : -EINVAL;
1626}
1627
1628int ar9170_register(struct ar9170 *ar, struct device *pdev)
1629{
1630 int err;
1631
1632 /* try to read EEPROM, init MAC addr */
1633 err = ar9170_read_eeprom(ar);
1634 if (err)
1635 goto err_out;
1636
1637 err = ieee80211_register_hw(ar->hw);
1638 if (err)
1639 goto err_out;
1640
1641 err = ar9170_init_leds(ar);
1642 if (err)
1643 goto err_unreg;
1644
1645#ifdef CONFIG_AR9170_LEDS
1646 err = ar9170_register_leds(ar);
1647 if (err)
1648 goto err_unreg;
1649#endif /* CONFIG_AR9170_LEDS */
1650
1651 dev_info(pdev, "Atheros AR9170 is registered as '%s'\n",
1652 wiphy_name(ar->hw->wiphy));
1653
1654 return err;
1655
1656err_unreg:
1657 ieee80211_unregister_hw(ar->hw);
1658
1659err_out:
1660 return err;
1661}
1662
1663void ar9170_unregister(struct ar9170 *ar)
1664{
1665#ifdef CONFIG_AR9170_LEDS
1666 ar9170_unregister_leds(ar);
1667#endif /* CONFIG_AR9170_LEDS */
1668
1669 ieee80211_unregister_hw(ar->hw);
1670 mutex_destroy(&ar->mutex);
1671}
diff --git a/drivers/net/wireless/ar9170/phy.c b/drivers/net/wireless/ar9170/phy.c
new file mode 100644
index 000000000000..6ce20754b8e7
--- /dev/null
+++ b/drivers/net/wireless/ar9170/phy.c
@@ -0,0 +1,1240 @@
1/*
2 * Atheros AR9170 driver
3 *
4 * PHY and RF code
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38
39#include <linux/bitrev.h>
40#include "ar9170.h"
41#include "cmd.h"
42
43static int ar9170_init_power_cal(struct ar9170 *ar)
44{
45 ar9170_regwrite_begin(ar);
46
47 ar9170_regwrite(0x1bc000 + 0x993c, 0x7f);
48 ar9170_regwrite(0x1bc000 + 0x9934, 0x3f3f3f3f);
49 ar9170_regwrite(0x1bc000 + 0x9938, 0x3f3f3f3f);
50 ar9170_regwrite(0x1bc000 + 0xa234, 0x3f3f3f3f);
51 ar9170_regwrite(0x1bc000 + 0xa238, 0x3f3f3f3f);
52 ar9170_regwrite(0x1bc000 + 0xa38c, 0x3f3f3f3f);
53 ar9170_regwrite(0x1bc000 + 0xa390, 0x3f3f3f3f);
54 ar9170_regwrite(0x1bc000 + 0xa3cc, 0x3f3f3f3f);
55 ar9170_regwrite(0x1bc000 + 0xa3d0, 0x3f3f3f3f);
56 ar9170_regwrite(0x1bc000 + 0xa3d4, 0x3f3f3f3f);
57
58 ar9170_regwrite_finish();
59 return ar9170_regwrite_result();
60}
61
62struct ar9170_phy_init {
63 u32 reg, _5ghz_20, _5ghz_40, _2ghz_40, _2ghz_20;
64};
65
66static struct ar9170_phy_init ar5416_phy_init[] = {
67 { 0x1c5800, 0x00000007, 0x00000007, 0x00000007, 0x00000007, },
68 { 0x1c5804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, },
69 { 0x1c5808, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
70 { 0x1c580c, 0xad848e19, 0xad848e19, 0xad848e19, 0xad848e19, },
71 { 0x1c5810, 0x7d14e000, 0x7d14e000, 0x7d14e000, 0x7d14e000, },
72 { 0x1c5814, 0x9c0a9f6b, 0x9c0a9f6b, 0x9c0a9f6b, 0x9c0a9f6b, },
73 { 0x1c5818, 0x00000090, 0x00000090, 0x00000090, 0x00000090, },
74 { 0x1c581c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
75 { 0x1c5820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, },
76 { 0x1c5824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, },
77 { 0x1c5828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, },
78 { 0x1c582c, 0x0000a000, 0x0000a000, 0x0000a000, 0x0000a000, },
79 { 0x1c5830, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
80 { 0x1c5834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, },
81 { 0x1c5838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, },
82 { 0x1c583c, 0x00200400, 0x00200400, 0x00200400, 0x00200400, },
83 { 0x1c5840, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e, },
84 { 0x1c5844, 0x1372161e, 0x13721c1e, 0x13721c24, 0x137216a4, },
85 { 0x1c5848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, },
86 { 0x1c584c, 0x1284233c, 0x1284233c, 0x1284233c, 0x1284233c, },
87 { 0x1c5850, 0x6c48b4e4, 0x6c48b4e4, 0x6c48b0e4, 0x6c48b0e4, },
88 { 0x1c5854, 0x00000859, 0x00000859, 0x00000859, 0x00000859, },
89 { 0x1c5858, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, },
90 { 0x1c585c, 0x31395c5e, 0x31395c5e, 0x31395c5e, 0x31395c5e, },
91 { 0x1c5860, 0x0004dd10, 0x0004dd10, 0x0004dd20, 0x0004dd20, },
92 { 0x1c5868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, },
93 { 0x1c586c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, },
94 { 0x1c5900, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
95 { 0x1c5904, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
96 { 0x1c5908, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
97 { 0x1c590c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
98 { 0x1c5914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, },
99 { 0x1c5918, 0x00000118, 0x00000230, 0x00000268, 0x00000134, },
100 { 0x1c591c, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff, },
101 { 0x1c5920, 0x0510081c, 0x0510081c, 0x0510001c, 0x0510001c, },
102 { 0x1c5924, 0xd0058a15, 0xd0058a15, 0xd0058a15, 0xd0058a15, },
103 { 0x1c5928, 0x00000001, 0x00000001, 0x00000001, 0x00000001, },
104 { 0x1c592c, 0x00000004, 0x00000004, 0x00000004, 0x00000004, },
105 { 0x1c5934, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
106 { 0x1c5938, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
107 { 0x1c593c, 0x0000007f, 0x0000007f, 0x0000007f, 0x0000007f, },
108 { 0x1c5944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, },
109 { 0x1c5948, 0x9280b212, 0x9280b212, 0x9280b212, 0x9280b212, },
110 { 0x1c594c, 0x00020028, 0x00020028, 0x00020028, 0x00020028, },
111 { 0x1c5954, 0x5d50e188, 0x5d50e188, 0x5d50e188, 0x5d50e188, },
112 { 0x1c5958, 0x00081fff, 0x00081fff, 0x00081fff, 0x00081fff, },
113 { 0x1c5960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, },
114 { 0x1c5964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, },
115 { 0x1c5970, 0x190fb515, 0x190fb515, 0x190fb515, 0x190fb515, },
116 { 0x1c5974, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
117 { 0x1c5978, 0x00000001, 0x00000001, 0x00000001, 0x00000001, },
118 { 0x1c597c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
119 { 0x1c5980, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
120 { 0x1c5984, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
121 { 0x1c5988, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
122 { 0x1c598c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
123 { 0x1c5990, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
124 { 0x1c5994, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
125 { 0x1c5998, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
126 { 0x1c599c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
127 { 0x1c59a0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
128 { 0x1c59a4, 0x00000007, 0x00000007, 0x00000007, 0x00000007, },
129 { 0x1c59a8, 0x001fff00, 0x001fff00, 0x001fff00, 0x001fff00, },
130 { 0x1c59ac, 0x006f00c4, 0x006f00c4, 0x006f00c4, 0x006f00c4, },
131 { 0x1c59b0, 0x03051000, 0x03051000, 0x03051000, 0x03051000, },
132 { 0x1c59b4, 0x00000820, 0x00000820, 0x00000820, 0x00000820, },
133 { 0x1c59c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, },
134 { 0x1c59c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, },
135 { 0x1c59c8, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c, },
136 { 0x1c59cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, },
137 { 0x1c59d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, },
138 { 0x1c59d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
139 { 0x1c59d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
140 { 0x1c59dc, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
141 { 0x1c59e0, 0x00000200, 0x00000200, 0x00000200, 0x00000200, },
142 { 0x1c59e4, 0x64646464, 0x64646464, 0x64646464, 0x64646464, },
143 { 0x1c59e8, 0x3c787878, 0x3c787878, 0x3c787878, 0x3c787878, },
144 { 0x1c59ec, 0x000000aa, 0x000000aa, 0x000000aa, 0x000000aa, },
145 { 0x1c59f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
146 { 0x1c59fc, 0x00001042, 0x00001042, 0x00001042, 0x00001042, },
147 { 0x1c5a00, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
148 { 0x1c5a04, 0x00000040, 0x00000040, 0x00000040, 0x00000040, },
149 { 0x1c5a08, 0x00000080, 0x00000080, 0x00000080, 0x00000080, },
150 { 0x1c5a0c, 0x000001a1, 0x000001a1, 0x00000141, 0x00000141, },
151 { 0x1c5a10, 0x000001e1, 0x000001e1, 0x00000181, 0x00000181, },
152 { 0x1c5a14, 0x00000021, 0x00000021, 0x000001c1, 0x000001c1, },
153 { 0x1c5a18, 0x00000061, 0x00000061, 0x00000001, 0x00000001, },
154 { 0x1c5a1c, 0x00000168, 0x00000168, 0x00000041, 0x00000041, },
155 { 0x1c5a20, 0x000001a8, 0x000001a8, 0x000001a8, 0x000001a8, },
156 { 0x1c5a24, 0x000001e8, 0x000001e8, 0x000001e8, 0x000001e8, },
157 { 0x1c5a28, 0x00000028, 0x00000028, 0x00000028, 0x00000028, },
158 { 0x1c5a2c, 0x00000068, 0x00000068, 0x00000068, 0x00000068, },
159 { 0x1c5a30, 0x00000189, 0x00000189, 0x000000a8, 0x000000a8, },
160 { 0x1c5a34, 0x000001c9, 0x000001c9, 0x00000169, 0x00000169, },
161 { 0x1c5a38, 0x00000009, 0x00000009, 0x000001a9, 0x000001a9, },
162 { 0x1c5a3c, 0x00000049, 0x00000049, 0x000001e9, 0x000001e9, },
163 { 0x1c5a40, 0x00000089, 0x00000089, 0x00000029, 0x00000029, },
164 { 0x1c5a44, 0x00000170, 0x00000170, 0x00000069, 0x00000069, },
165 { 0x1c5a48, 0x000001b0, 0x000001b0, 0x00000190, 0x00000190, },
166 { 0x1c5a4c, 0x000001f0, 0x000001f0, 0x000001d0, 0x000001d0, },
167 { 0x1c5a50, 0x00000030, 0x00000030, 0x00000010, 0x00000010, },
168 { 0x1c5a54, 0x00000070, 0x00000070, 0x00000050, 0x00000050, },
169 { 0x1c5a58, 0x00000191, 0x00000191, 0x00000090, 0x00000090, },
170 { 0x1c5a5c, 0x000001d1, 0x000001d1, 0x00000151, 0x00000151, },
171 { 0x1c5a60, 0x00000011, 0x00000011, 0x00000191, 0x00000191, },
172 { 0x1c5a64, 0x00000051, 0x00000051, 0x000001d1, 0x000001d1, },
173 { 0x1c5a68, 0x00000091, 0x00000091, 0x00000011, 0x00000011, },
174 { 0x1c5a6c, 0x000001b8, 0x000001b8, 0x00000051, 0x00000051, },
175 { 0x1c5a70, 0x000001f8, 0x000001f8, 0x00000198, 0x00000198, },
176 { 0x1c5a74, 0x00000038, 0x00000038, 0x000001d8, 0x000001d8, },
177 { 0x1c5a78, 0x00000078, 0x00000078, 0x00000018, 0x00000018, },
178 { 0x1c5a7c, 0x00000199, 0x00000199, 0x00000058, 0x00000058, },
179 { 0x1c5a80, 0x000001d9, 0x000001d9, 0x00000098, 0x00000098, },
180 { 0x1c5a84, 0x00000019, 0x00000019, 0x00000159, 0x00000159, },
181 { 0x1c5a88, 0x00000059, 0x00000059, 0x00000199, 0x00000199, },
182 { 0x1c5a8c, 0x00000099, 0x00000099, 0x000001d9, 0x000001d9, },
183 { 0x1c5a90, 0x000000d9, 0x000000d9, 0x00000019, 0x00000019, },
184 { 0x1c5a94, 0x000000f9, 0x000000f9, 0x00000059, 0x00000059, },
185 { 0x1c5a98, 0x000000f9, 0x000000f9, 0x00000099, 0x00000099, },
186 { 0x1c5a9c, 0x000000f9, 0x000000f9, 0x000000d9, 0x000000d9, },
187 { 0x1c5aa0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
188 { 0x1c5aa4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
189 { 0x1c5aa8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
190 { 0x1c5aac, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
191 { 0x1c5ab0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
192 { 0x1c5ab4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
193 { 0x1c5ab8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
194 { 0x1c5abc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
195 { 0x1c5ac0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
196 { 0x1c5ac4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
197 { 0x1c5ac8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
198 { 0x1c5acc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
199 { 0x1c5ad0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
200 { 0x1c5ad4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
201 { 0x1c5ad8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
202 { 0x1c5adc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
203 { 0x1c5ae0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
204 { 0x1c5ae4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
205 { 0x1c5ae8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
206 { 0x1c5aec, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
207 { 0x1c5af0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
208 { 0x1c5af4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
209 { 0x1c5af8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
210 { 0x1c5afc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
211 { 0x1c5b00, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
212 { 0x1c5b04, 0x00000001, 0x00000001, 0x00000001, 0x00000001, },
213 { 0x1c5b08, 0x00000002, 0x00000002, 0x00000002, 0x00000002, },
214 { 0x1c5b0c, 0x00000003, 0x00000003, 0x00000003, 0x00000003, },
215 { 0x1c5b10, 0x00000004, 0x00000004, 0x00000004, 0x00000004, },
216 { 0x1c5b14, 0x00000005, 0x00000005, 0x00000005, 0x00000005, },
217 { 0x1c5b18, 0x00000008, 0x00000008, 0x00000008, 0x00000008, },
218 { 0x1c5b1c, 0x00000009, 0x00000009, 0x00000009, 0x00000009, },
219 { 0x1c5b20, 0x0000000a, 0x0000000a, 0x0000000a, 0x0000000a, },
220 { 0x1c5b24, 0x0000000b, 0x0000000b, 0x0000000b, 0x0000000b, },
221 { 0x1c5b28, 0x0000000c, 0x0000000c, 0x0000000c, 0x0000000c, },
222 { 0x1c5b2c, 0x0000000d, 0x0000000d, 0x0000000d, 0x0000000d, },
223 { 0x1c5b30, 0x00000010, 0x00000010, 0x00000010, 0x00000010, },
224 { 0x1c5b34, 0x00000011, 0x00000011, 0x00000011, 0x00000011, },
225 { 0x1c5b38, 0x00000012, 0x00000012, 0x00000012, 0x00000012, },
226 { 0x1c5b3c, 0x00000013, 0x00000013, 0x00000013, 0x00000013, },
227 { 0x1c5b40, 0x00000014, 0x00000014, 0x00000014, 0x00000014, },
228 { 0x1c5b44, 0x00000015, 0x00000015, 0x00000015, 0x00000015, },
229 { 0x1c5b48, 0x00000018, 0x00000018, 0x00000018, 0x00000018, },
230 { 0x1c5b4c, 0x00000019, 0x00000019, 0x00000019, 0x00000019, },
231 { 0x1c5b50, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a, },
232 { 0x1c5b54, 0x0000001b, 0x0000001b, 0x0000001b, 0x0000001b, },
233 { 0x1c5b58, 0x0000001c, 0x0000001c, 0x0000001c, 0x0000001c, },
234 { 0x1c5b5c, 0x0000001d, 0x0000001d, 0x0000001d, 0x0000001d, },
235 { 0x1c5b60, 0x00000020, 0x00000020, 0x00000020, 0x00000020, },
236 { 0x1c5b64, 0x00000021, 0x00000021, 0x00000021, 0x00000021, },
237 { 0x1c5b68, 0x00000022, 0x00000022, 0x00000022, 0x00000022, },
238 { 0x1c5b6c, 0x00000023, 0x00000023, 0x00000023, 0x00000023, },
239 { 0x1c5b70, 0x00000024, 0x00000024, 0x00000024, 0x00000024, },
240 { 0x1c5b74, 0x00000025, 0x00000025, 0x00000025, 0x00000025, },
241 { 0x1c5b78, 0x00000028, 0x00000028, 0x00000028, 0x00000028, },
242 { 0x1c5b7c, 0x00000029, 0x00000029, 0x00000029, 0x00000029, },
243 { 0x1c5b80, 0x0000002a, 0x0000002a, 0x0000002a, 0x0000002a, },
244 { 0x1c5b84, 0x0000002b, 0x0000002b, 0x0000002b, 0x0000002b, },
245 { 0x1c5b88, 0x0000002c, 0x0000002c, 0x0000002c, 0x0000002c, },
246 { 0x1c5b8c, 0x0000002d, 0x0000002d, 0x0000002d, 0x0000002d, },
247 { 0x1c5b90, 0x00000030, 0x00000030, 0x00000030, 0x00000030, },
248 { 0x1c5b94, 0x00000031, 0x00000031, 0x00000031, 0x00000031, },
249 { 0x1c5b98, 0x00000032, 0x00000032, 0x00000032, 0x00000032, },
250 { 0x1c5b9c, 0x00000033, 0x00000033, 0x00000033, 0x00000033, },
251 { 0x1c5ba0, 0x00000034, 0x00000034, 0x00000034, 0x00000034, },
252 { 0x1c5ba4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
253 { 0x1c5ba8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
254 { 0x1c5bac, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
255 { 0x1c5bb0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
256 { 0x1c5bb4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
257 { 0x1c5bb8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
258 { 0x1c5bbc, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
259 { 0x1c5bc0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
260 { 0x1c5bc4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
261 { 0x1c5bc8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
262 { 0x1c5bcc, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
263 { 0x1c5bd0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
264 { 0x1c5bd4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
265 { 0x1c5bd8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
266 { 0x1c5bdc, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
267 { 0x1c5be0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
268 { 0x1c5be4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
269 { 0x1c5be8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
270 { 0x1c5bec, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
271 { 0x1c5bf0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
272 { 0x1c5bf4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
273 { 0x1c5bf8, 0x00000010, 0x00000010, 0x00000010, 0x00000010, },
274 { 0x1c5bfc, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a, },
275 { 0x1c5c00, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
276 { 0x1c5c0c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
277 { 0x1c5c10, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
278 { 0x1c5c14, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
279 { 0x1c5c18, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
280 { 0x1c5c1c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
281 { 0x1c5c20, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
282 { 0x1c5c24, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
283 { 0x1c5c28, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
284 { 0x1c5c2c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
285 { 0x1c5c30, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
286 { 0x1c5c34, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
287 { 0x1c5c38, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
288 { 0x1c5c3c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
289 { 0x1c5cf0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
290 { 0x1c5cf4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
291 { 0x1c5cf8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
292 { 0x1c5cfc, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
293 { 0x1c6200, 0x00000008, 0x00000008, 0x0000000e, 0x0000000e, },
294 { 0x1c6204, 0x00000440, 0x00000440, 0x00000440, 0x00000440, },
295 { 0x1c6208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, },
296 { 0x1c620c, 0x012e8160, 0x012e8160, 0x012a8160, 0x012a8160, },
297 { 0x1c6210, 0x40806333, 0x40806333, 0x40806333, 0x40806333, },
298 { 0x1c6214, 0x00106c10, 0x00106c10, 0x00106c10, 0x00106c10, },
299 { 0x1c6218, 0x009c4060, 0x009c4060, 0x009c4060, 0x009c4060, },
300 { 0x1c621c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, },
301 { 0x1c6220, 0x018830c6, 0x018830c6, 0x018830c6, 0x018830c6, },
302 { 0x1c6224, 0x00000400, 0x00000400, 0x00000400, 0x00000400, },
303 { 0x1c6228, 0x000009b5, 0x000009b5, 0x000009b5, 0x000009b5, },
304 { 0x1c622c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
305 { 0x1c6230, 0x00000108, 0x00000210, 0x00000210, 0x00000108, },
306 { 0x1c6234, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
307 { 0x1c6238, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
308 { 0x1c623c, 0x13c889af, 0x13c889af, 0x13c889af, 0x13c889af, },
309 { 0x1c6240, 0x38490a20, 0x38490a20, 0x38490a20, 0x38490a20, },
310 { 0x1c6244, 0x00007bb6, 0x00007bb6, 0x00007bb6, 0x00007bb6, },
311 { 0x1c6248, 0x0fff3ffc, 0x0fff3ffc, 0x0fff3ffc, 0x0fff3ffc, },
312 { 0x1c624c, 0x00000001, 0x00000001, 0x00000001, 0x00000001, },
313 { 0x1c6250, 0x0000a000, 0x0000a000, 0x0000a000, 0x0000a000, },
314 { 0x1c6254, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
315 { 0x1c6258, 0x0cc75380, 0x0cc75380, 0x0cc75380, 0x0cc75380, },
316 { 0x1c625c, 0x0f0f0f01, 0x0f0f0f01, 0x0f0f0f01, 0x0f0f0f01, },
317 { 0x1c6260, 0xdfa91f01, 0xdfa91f01, 0xdfa91f01, 0xdfa91f01, },
318 { 0x1c6264, 0x00418a11, 0x00418a11, 0x00418a11, 0x00418a11, },
319 { 0x1c6268, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
320 { 0x1c626c, 0x09249126, 0x09249126, 0x09249126, 0x09249126, },
321 { 0x1c6274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, },
322 { 0x1c6278, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, },
323 { 0x1c627c, 0x051701ce, 0x051701ce, 0x051701ce, 0x051701ce, },
324 { 0x1c6300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, },
325 { 0x1c6304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, },
326 { 0x1c6308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, },
327 { 0x1c630c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, },
328 { 0x1c6310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, },
329 { 0x1c6314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, },
330 { 0x1c6318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, },
331 { 0x1c631c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, },
332 { 0x1c6320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, },
333 { 0x1c6324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, },
334 { 0x1c6328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, },
335 { 0x1c632c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
336 { 0x1c6330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
337 { 0x1c6334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
338 { 0x1c6338, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
339 { 0x1c633c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
340 { 0x1c6340, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
341 { 0x1c6344, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
342 { 0x1c6348, 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff, },
343 { 0x1c634c, 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff, },
344 { 0x1c6350, 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff, },
345 { 0x1c6354, 0x0003ffff, 0x0003ffff, 0x0003ffff, 0x0003ffff, },
346 { 0x1c6358, 0x79a8aa1f, 0x79a8aa1f, 0x79a8aa1f, 0x79a8aa1f, },
347 { 0x1c6388, 0x08000000, 0x08000000, 0x08000000, 0x08000000, },
348 { 0x1c638c, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
349 { 0x1c6390, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
350 { 0x1c6394, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, },
351 { 0x1c6398, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce, },
352 { 0x1c639c, 0x00000007, 0x00000007, 0x00000007, 0x00000007, },
353 { 0x1c63a0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
354 { 0x1c63a4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
355 { 0x1c63a8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
356 { 0x1c63ac, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
357 { 0x1c63b0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
358 { 0x1c63b4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
359 { 0x1c63b8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
360 { 0x1c63bc, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
361 { 0x1c63c0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
362 { 0x1c63c4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
363 { 0x1c63c8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
364 { 0x1c63cc, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
365 { 0x1c63d0, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
366 { 0x1c63d4, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
367 { 0x1c63d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
368 { 0x1c63dc, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, },
369 { 0x1c63e0, 0x000000c0, 0x000000c0, 0x000000c0, 0x000000c0, },
370 { 0x1c6848, 0x00180a65, 0x00180a65, 0x00180a68, 0x00180a68, },
371 { 0x1c6920, 0x0510001c, 0x0510001c, 0x0510001c, 0x0510001c, },
372 { 0x1c6960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, },
373 { 0x1c720c, 0x012e8160, 0x012e8160, 0x012a8160, 0x012a8160, },
374 { 0x1c726c, 0x09249126, 0x09249126, 0x09249126, 0x09249126, },
375 { 0x1c7848, 0x00180a65, 0x00180a65, 0x00180a68, 0x00180a68, },
376 { 0x1c7920, 0x0510001c, 0x0510001c, 0x0510001c, 0x0510001c, },
377 { 0x1c7960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, },
378 { 0x1c820c, 0x012e8160, 0x012e8160, 0x012a8160, 0x012a8160, },
379 { 0x1c826c, 0x09249126, 0x09249126, 0x09249126, 0x09249126, },
380/* { 0x1c8864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, }, */
381 { 0x1c8864, 0x0001c600, 0x0001c600, 0x0001c600, 0x0001c600, },
382 { 0x1c895c, 0x004b6a8e, 0x004b6a8e, 0x004b6a8e, 0x004b6a8e, },
383 { 0x1c8968, 0x000003ce, 0x000003ce, 0x000003ce, 0x000003ce, },
384 { 0x1c89bc, 0x00181400, 0x00181400, 0x00181400, 0x00181400, },
385 { 0x1c9270, 0x00820820, 0x00820820, 0x00820820, 0x00820820, },
386 { 0x1c935c, 0x066c420f, 0x066c420f, 0x066c420f, 0x066c420f, },
387 { 0x1c9360, 0x0f282207, 0x0f282207, 0x0f282207, 0x0f282207, },
388 { 0x1c9364, 0x17601685, 0x17601685, 0x17601685, 0x17601685, },
389 { 0x1c9368, 0x1f801104, 0x1f801104, 0x1f801104, 0x1f801104, },
390 { 0x1c936c, 0x37a00c03, 0x37a00c03, 0x37a00c03, 0x37a00c03, },
391 { 0x1c9370, 0x3fc40883, 0x3fc40883, 0x3fc40883, 0x3fc40883, },
392 { 0x1c9374, 0x57c00803, 0x57c00803, 0x57c00803, 0x57c00803, },
393 { 0x1c9378, 0x5fd80682, 0x5fd80682, 0x5fd80682, 0x5fd80682, },
394 { 0x1c937c, 0x7fe00482, 0x7fe00482, 0x7fe00482, 0x7fe00482, },
395 { 0x1c9380, 0x7f3c7bba, 0x7f3c7bba, 0x7f3c7bba, 0x7f3c7bba, },
396 { 0x1c9384, 0xf3307ff0, 0xf3307ff0, 0xf3307ff0, 0xf3307ff0, }
397};
398
399int ar9170_init_phy(struct ar9170 *ar, enum ieee80211_band band)
400{
401 int i, err;
402 u32 val;
403 bool is_2ghz = band == IEEE80211_BAND_2GHZ;
404 bool is_40mhz = false; /* XXX: for now */
405
406 ar9170_regwrite_begin(ar);
407
408 for (i = 0; i < ARRAY_SIZE(ar5416_phy_init); i++) {
409 if (is_40mhz) {
410 if (is_2ghz)
411 val = ar5416_phy_init[i]._2ghz_40;
412 else
413 val = ar5416_phy_init[i]._5ghz_40;
414 } else {
415 if (is_2ghz)
416 val = ar5416_phy_init[i]._2ghz_20;
417 else
418 val = ar5416_phy_init[i]._5ghz_20;
419 }
420
421 ar9170_regwrite(ar5416_phy_init[i].reg, val);
422 }
423
424 ar9170_regwrite_finish();
425 err = ar9170_regwrite_result();
426 if (err)
427 return err;
428
429 /* XXX: use EEPROM data here! */
430
431 err = ar9170_init_power_cal(ar);
432 if (err)
433 return err;
434
435 /* XXX: remove magic! */
436 if (is_2ghz)
437 err = ar9170_write_reg(ar, 0x1d4014, 0x5163);
438 else
439 err = ar9170_write_reg(ar, 0x1d4014, 0x5143);
440
441 return err;
442}
443
444struct ar9170_rf_init {
445 u32 reg, _5ghz, _2ghz;
446};
447
448static struct ar9170_rf_init ar9170_rf_init[] = {
449 /* bank 0 */
450 { 0x1c58b0, 0x1e5795e5, 0x1e5795e5},
451 { 0x1c58e0, 0x02008020, 0x02008020},
452 /* bank 1 */
453 { 0x1c58b0, 0x02108421, 0x02108421},
454 { 0x1c58ec, 0x00000008, 0x00000008},
455 /* bank 2 */
456 { 0x1c58b0, 0x0e73ff17, 0x0e73ff17},
457 { 0x1c58e0, 0x00000420, 0x00000420},
458 /* bank 3 */
459 { 0x1c58f0, 0x01400018, 0x01c00018},
460 /* bank 4 */
461 { 0x1c58b0, 0x000001a1, 0x000001a1},
462 { 0x1c58e8, 0x00000001, 0x00000001},
463 /* bank 5 */
464 { 0x1c58b0, 0x00000013, 0x00000013},
465 { 0x1c58e4, 0x00000002, 0x00000002},
466 /* bank 6 */
467 { 0x1c58b0, 0x00000000, 0x00000000},
468 { 0x1c58b0, 0x00000000, 0x00000000},
469 { 0x1c58b0, 0x00000000, 0x00000000},
470 { 0x1c58b0, 0x00000000, 0x00000000},
471 { 0x1c58b0, 0x00000000, 0x00000000},
472 { 0x1c58b0, 0x00004000, 0x00004000},
473 { 0x1c58b0, 0x00006c00, 0x00006c00},
474 { 0x1c58b0, 0x00002c00, 0x00002c00},
475 { 0x1c58b0, 0x00004800, 0x00004800},
476 { 0x1c58b0, 0x00004000, 0x00004000},
477 { 0x1c58b0, 0x00006000, 0x00006000},
478 { 0x1c58b0, 0x00001000, 0x00001000},
479 { 0x1c58b0, 0x00004000, 0x00004000},
480 { 0x1c58b0, 0x00007c00, 0x00007c00},
481 { 0x1c58b0, 0x00007c00, 0x00007c00},
482 { 0x1c58b0, 0x00007c00, 0x00007c00},
483 { 0x1c58b0, 0x00007c00, 0x00007c00},
484 { 0x1c58b0, 0x00007c00, 0x00007c00},
485 { 0x1c58b0, 0x00087c00, 0x00087c00},
486 { 0x1c58b0, 0x00007c00, 0x00007c00},
487 { 0x1c58b0, 0x00005400, 0x00005400},
488 { 0x1c58b0, 0x00000c00, 0x00000c00},
489 { 0x1c58b0, 0x00001800, 0x00001800},
490 { 0x1c58b0, 0x00007c00, 0x00007c00},
491 { 0x1c58b0, 0x00006c00, 0x00006c00},
492 { 0x1c58b0, 0x00006c00, 0x00006c00},
493 { 0x1c58b0, 0x00007c00, 0x00007c00},
494 { 0x1c58b0, 0x00002c00, 0x00002c00},
495 { 0x1c58b0, 0x00003c00, 0x00003c00},
496 { 0x1c58b0, 0x00003800, 0x00003800},
497 { 0x1c58b0, 0x00001c00, 0x00001c00},
498 { 0x1c58b0, 0x00000800, 0x00000800},
499 { 0x1c58b0, 0x00000408, 0x00000408},
500 { 0x1c58b0, 0x00004c15, 0x00004c15},
501 { 0x1c58b0, 0x00004188, 0x00004188},
502 { 0x1c58b0, 0x0000201e, 0x0000201e},
503 { 0x1c58b0, 0x00010408, 0x00010408},
504 { 0x1c58b0, 0x00000801, 0x00000801},
505 { 0x1c58b0, 0x00000c08, 0x00000c08},
506 { 0x1c58b0, 0x0000181e, 0x0000181e},
507 { 0x1c58b0, 0x00001016, 0x00001016},
508 { 0x1c58b0, 0x00002800, 0x00002800},
509 { 0x1c58b0, 0x00004010, 0x00004010},
510 { 0x1c58b0, 0x0000081c, 0x0000081c},
511 { 0x1c58b0, 0x00000115, 0x00000115},
512 { 0x1c58b0, 0x00000015, 0x00000015},
513 { 0x1c58b0, 0x00000066, 0x00000066},
514 { 0x1c58b0, 0x0000001c, 0x0000001c},
515 { 0x1c58b0, 0x00000000, 0x00000000},
516 { 0x1c58b0, 0x00000004, 0x00000004},
517 { 0x1c58b0, 0x00000015, 0x00000015},
518 { 0x1c58b0, 0x0000001f, 0x0000001f},
519 { 0x1c58e0, 0x00000000, 0x00000400},
520 /* bank 7 */
521 { 0x1c58b0, 0x000000a0, 0x000000a0},
522 { 0x1c58b0, 0x00000000, 0x00000000},
523 { 0x1c58b0, 0x00000040, 0x00000040},
524 { 0x1c58f0, 0x0000001c, 0x0000001c},
525};
526
527static int ar9170_init_rf_banks_0_7(struct ar9170 *ar, bool band5ghz)
528{
529 int err, i;
530
531 ar9170_regwrite_begin(ar);
532
533 for (i = 0; i < ARRAY_SIZE(ar9170_rf_init); i++)
534 ar9170_regwrite(ar9170_rf_init[i].reg,
535 band5ghz ? ar9170_rf_init[i]._5ghz
536 : ar9170_rf_init[i]._2ghz);
537
538 ar9170_regwrite_finish();
539 err = ar9170_regwrite_result();
540 if (err)
541 printk(KERN_ERR "%s: rf init failed\n",
542 wiphy_name(ar->hw->wiphy));
543 return err;
544}
545
546static int ar9170_init_rf_bank4_pwr(struct ar9170 *ar, bool band5ghz,
547 u32 freq, enum ar9170_bw bw)
548{
549 int err;
550 u32 d0, d1, td0, td1, fd0, fd1;
551 u8 chansel;
552 u8 refsel0 = 1, refsel1 = 0;
553 u8 lf_synth = 0;
554
555 switch (bw) {
556 case AR9170_BW_40_ABOVE:
557 freq += 10;
558 break;
559 case AR9170_BW_40_BELOW:
560 freq -= 10;
561 break;
562 case AR9170_BW_20:
563 break;
564 case __AR9170_NUM_BW:
565 BUG();
566 }
567
568 if (band5ghz) {
569 if (freq % 10) {
570 chansel = (freq - 4800) / 5;
571 } else {
572 chansel = ((freq - 4800) / 10) * 2;
573 refsel0 = 0;
574 refsel1 = 1;
575 }
576 chansel = byte_rev_table[chansel];
577 } else {
578 if (freq == 2484) {
579 chansel = 10 + (freq - 2274) / 5;
580 lf_synth = 1;
581 } else
582 chansel = 16 + (freq - 2272) / 5;
583 chansel *= 4;
584 chansel = byte_rev_table[chansel];
585 }
586
587 d1 = chansel;
588 d0 = 0x21 |
589 refsel0 << 3 |
590 refsel1 << 2 |
591 lf_synth << 1;
592 td0 = d0 & 0x1f;
593 td1 = d1 & 0x1f;
594 fd0 = td1 << 5 | td0;
595
596 td0 = (d0 >> 5) & 0x7;
597 td1 = (d1 >> 5) & 0x7;
598 fd1 = td1 << 5 | td0;
599
600 ar9170_regwrite_begin(ar);
601
602 ar9170_regwrite(0x1c58b0, fd0);
603 ar9170_regwrite(0x1c58e8, fd1);
604
605 ar9170_regwrite_finish();
606 err = ar9170_regwrite_result();
607 if (err)
608 return err;
609
610 msleep(10);
611
612 return 0;
613}
614
615struct ar9170_phy_freq_params {
616 u8 coeff_exp;
617 u16 coeff_man;
618 u8 coeff_exp_shgi;
619 u16 coeff_man_shgi;
620};
621
622struct ar9170_phy_freq_entry {
623 u16 freq;
624 struct ar9170_phy_freq_params params[__AR9170_NUM_BW];
625};
626
627/* NB: must be in sync with channel tables in main! */
628static const struct ar9170_phy_freq_entry ar9170_phy_freq_params[] = {
629/*
630 * freq,
631 * 20MHz,
632 * 40MHz (below),
633 * 40Mhz (above),
634 */
635 { 2412, {
636 { 3, 21737, 3, 19563, },
637 { 3, 21827, 3, 19644, },
638 { 3, 21647, 3, 19482, },
639 } },
640 { 2417, {
641 { 3, 21692, 3, 19523, },
642 { 3, 21782, 3, 19604, },
643 { 3, 21602, 3, 19442, },
644 } },
645 { 2422, {
646 { 3, 21647, 3, 19482, },
647 { 3, 21737, 3, 19563, },
648 { 3, 21558, 3, 19402, },
649 } },
650 { 2427, {
651 { 3, 21602, 3, 19442, },
652 { 3, 21692, 3, 19523, },
653 { 3, 21514, 3, 19362, },
654 } },
655 { 2432, {
656 { 3, 21558, 3, 19402, },
657 { 3, 21647, 3, 19482, },
658 { 3, 21470, 3, 19323, },
659 } },
660 { 2437, {
661 { 3, 21514, 3, 19362, },
662 { 3, 21602, 3, 19442, },
663 { 3, 21426, 3, 19283, },
664 } },
665 { 2442, {
666 { 3, 21470, 3, 19323, },
667 { 3, 21558, 3, 19402, },
668 { 3, 21382, 3, 19244, },
669 } },
670 { 2447, {
671 { 3, 21426, 3, 19283, },
672 { 3, 21514, 3, 19362, },
673 { 3, 21339, 3, 19205, },
674 } },
675 { 2452, {
676 { 3, 21382, 3, 19244, },
677 { 3, 21470, 3, 19323, },
678 { 3, 21295, 3, 19166, },
679 } },
680 { 2457, {
681 { 3, 21339, 3, 19205, },
682 { 3, 21426, 3, 19283, },
683 { 3, 21252, 3, 19127, },
684 } },
685 { 2462, {
686 { 3, 21295, 3, 19166, },
687 { 3, 21382, 3, 19244, },
688 { 3, 21209, 3, 19088, },
689 } },
690 { 2467, {
691 { 3, 21252, 3, 19127, },
692 { 3, 21339, 3, 19205, },
693 { 3, 21166, 3, 19050, },
694 } },
695 { 2472, {
696 { 3, 21209, 3, 19088, },
697 { 3, 21295, 3, 19166, },
698 { 3, 21124, 3, 19011, },
699 } },
700 { 2484, {
701 { 3, 21107, 3, 18996, },
702 { 3, 21192, 3, 19073, },
703 { 3, 21022, 3, 18920, },
704 } },
705 { 4920, {
706 { 4, 21313, 4, 19181, },
707 { 4, 21356, 4, 19220, },
708 { 4, 21269, 4, 19142, },
709 } },
710 { 4940, {
711 { 4, 21226, 4, 19104, },
712 { 4, 21269, 4, 19142, },
713 { 4, 21183, 4, 19065, },
714 } },
715 { 4960, {
716 { 4, 21141, 4, 19027, },
717 { 4, 21183, 4, 19065, },
718 { 4, 21098, 4, 18988, },
719 } },
720 { 4980, {
721 { 4, 21056, 4, 18950, },
722 { 4, 21098, 4, 18988, },
723 { 4, 21014, 4, 18912, },
724 } },
725 { 5040, {
726 { 4, 20805, 4, 18725, },
727 { 4, 20846, 4, 18762, },
728 { 4, 20764, 4, 18687, },
729 } },
730 { 5060, {
731 { 4, 20723, 4, 18651, },
732 { 4, 20764, 4, 18687, },
733 { 4, 20682, 4, 18614, },
734 } },
735 { 5080, {
736 { 4, 20641, 4, 18577, },
737 { 4, 20682, 4, 18614, },
738 { 4, 20601, 4, 18541, },
739 } },
740 { 5180, {
741 { 4, 20243, 4, 18219, },
742 { 4, 20282, 4, 18254, },
743 { 4, 20204, 4, 18183, },
744 } },
745 { 5200, {
746 { 4, 20165, 4, 18148, },
747 { 4, 20204, 4, 18183, },
748 { 4, 20126, 4, 18114, },
749 } },
750 { 5220, {
751 { 4, 20088, 4, 18079, },
752 { 4, 20126, 4, 18114, },
753 { 4, 20049, 4, 18044, },
754 } },
755 { 5240, {
756 { 4, 20011, 4, 18010, },
757 { 4, 20049, 4, 18044, },
758 { 4, 19973, 4, 17976, },
759 } },
760 { 5260, {
761 { 4, 19935, 4, 17941, },
762 { 4, 19973, 4, 17976, },
763 { 4, 19897, 4, 17907, },
764 } },
765 { 5280, {
766 { 4, 19859, 4, 17873, },
767 { 4, 19897, 4, 17907, },
768 { 4, 19822, 4, 17840, },
769 } },
770 { 5300, {
771 { 4, 19784, 4, 17806, },
772 { 4, 19822, 4, 17840, },
773 { 4, 19747, 4, 17772, },
774 } },
775 { 5320, {
776 { 4, 19710, 4, 17739, },
777 { 4, 19747, 4, 17772, },
778 { 4, 19673, 4, 17706, },
779 } },
780 { 5500, {
781 { 4, 19065, 4, 17159, },
782 { 4, 19100, 4, 17190, },
783 { 4, 19030, 4, 17127, },
784 } },
785 { 5520, {
786 { 4, 18996, 4, 17096, },
787 { 4, 19030, 4, 17127, },
788 { 4, 18962, 4, 17065, },
789 } },
790 { 5540, {
791 { 4, 18927, 4, 17035, },
792 { 4, 18962, 4, 17065, },
793 { 4, 18893, 4, 17004, },
794 } },
795 { 5560, {
796 { 4, 18859, 4, 16973, },
797 { 4, 18893, 4, 17004, },
798 { 4, 18825, 4, 16943, },
799 } },
800 { 5580, {
801 { 4, 18792, 4, 16913, },
802 { 4, 18825, 4, 16943, },
803 { 4, 18758, 4, 16882, },
804 } },
805 { 5600, {
806 { 4, 18725, 4, 16852, },
807 { 4, 18758, 4, 16882, },
808 { 4, 18691, 4, 16822, },
809 } },
810 { 5620, {
811 { 4, 18658, 4, 16792, },
812 { 4, 18691, 4, 16822, },
813 { 4, 18625, 4, 16762, },
814 } },
815 { 5640, {
816 { 4, 18592, 4, 16733, },
817 { 4, 18625, 4, 16762, },
818 { 4, 18559, 4, 16703, },
819 } },
820 { 5660, {
821 { 4, 18526, 4, 16673, },
822 { 4, 18559, 4, 16703, },
823 { 4, 18493, 4, 16644, },
824 } },
825 { 5680, {
826 { 4, 18461, 4, 16615, },
827 { 4, 18493, 4, 16644, },
828 { 4, 18428, 4, 16586, },
829 } },
830 { 5700, {
831 { 4, 18396, 4, 16556, },
832 { 4, 18428, 4, 16586, },
833 { 4, 18364, 4, 16527, },
834 } },
835 { 5745, {
836 { 4, 18252, 4, 16427, },
837 { 4, 18284, 4, 16455, },
838 { 4, 18220, 4, 16398, },
839 } },
840 { 5765, {
841 { 4, 18189, 5, 32740, },
842 { 4, 18220, 4, 16398, },
843 { 4, 18157, 5, 32683, },
844 } },
845 { 5785, {
846 { 4, 18126, 5, 32626, },
847 { 4, 18157, 5, 32683, },
848 { 4, 18094, 5, 32570, },
849 } },
850 { 5805, {
851 { 4, 18063, 5, 32514, },
852 { 4, 18094, 5, 32570, },
853 { 4, 18032, 5, 32458, },
854 } },
855 { 5825, {
856 { 4, 18001, 5, 32402, },
857 { 4, 18032, 5, 32458, },
858 { 4, 17970, 5, 32347, },
859 } },
860 { 5170, {
861 { 4, 20282, 4, 18254, },
862 { 4, 20321, 4, 18289, },
863 { 4, 20243, 4, 18219, },
864 } },
865 { 5190, {
866 { 4, 20204, 4, 18183, },
867 { 4, 20243, 4, 18219, },
868 { 4, 20165, 4, 18148, },
869 } },
870 { 5210, {
871 { 4, 20126, 4, 18114, },
872 { 4, 20165, 4, 18148, },
873 { 4, 20088, 4, 18079, },
874 } },
875 { 5230, {
876 { 4, 20049, 4, 18044, },
877 { 4, 20088, 4, 18079, },
878 { 4, 20011, 4, 18010, },
879 } },
880};
881
882static const struct ar9170_phy_freq_params *
883ar9170_get_hw_dyn_params(struct ieee80211_channel *channel,
884 enum ar9170_bw bw)
885{
886 unsigned int chanidx = 0;
887 u16 freq = 2412;
888
889 if (channel) {
890 chanidx = channel->hw_value;
891 freq = channel->center_freq;
892 }
893
894 BUG_ON(chanidx >= ARRAY_SIZE(ar9170_phy_freq_params));
895
896 BUILD_BUG_ON(__AR9170_NUM_BW != 3);
897
898 WARN_ON(ar9170_phy_freq_params[chanidx].freq != freq);
899
900 return &ar9170_phy_freq_params[chanidx].params[bw];
901}
902
903
904int ar9170_init_rf(struct ar9170 *ar)
905{
906 const struct ar9170_phy_freq_params *freqpar;
907 __le32 cmd[7];
908 int err;
909
910 err = ar9170_init_rf_banks_0_7(ar, false);
911 if (err)
912 return err;
913
914 err = ar9170_init_rf_bank4_pwr(ar, false, 2412, AR9170_BW_20);
915 if (err)
916 return err;
917
918 freqpar = ar9170_get_hw_dyn_params(NULL, AR9170_BW_20);
919
920 cmd[0] = cpu_to_le32(2412 * 1000);
921 cmd[1] = cpu_to_le32(0);
922 cmd[2] = cpu_to_le32(1);
923 cmd[3] = cpu_to_le32(freqpar->coeff_exp);
924 cmd[4] = cpu_to_le32(freqpar->coeff_man);
925 cmd[5] = cpu_to_le32(freqpar->coeff_exp_shgi);
926 cmd[6] = cpu_to_le32(freqpar->coeff_man_shgi);
927
928 /* RF_INIT echoes the command back to us */
929 err = ar->exec_cmd(ar, AR9170_CMD_RF_INIT,
930 sizeof(cmd), (u8 *)cmd,
931 sizeof(cmd), (u8 *)cmd);
932 if (err)
933 return err;
934
935 msleep(1000);
936
937 return ar9170_echo_test(ar, 0xaabbccdd);
938}
939
940static int ar9170_find_freq_idx(int nfreqs, u8 *freqs, u8 f)
941{
942 int idx = nfreqs - 2;
943
944 while (idx >= 0) {
945 if (f >= freqs[idx])
946 return idx;
947 idx--;
948 }
949
950 return 0;
951}
952
953static s32 ar9170_interpolate_s32(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
954{
955 /* nothing to interpolate, it's horizontal */
956 if (y2 == y1)
957 return y1;
958
959 /* check if we hit one of the edges */
960 if (x == x1)
961 return y1;
962 if (x == x2)
963 return y2;
964
965 /* x1 == x2 is bad, hopefully == x */
966 if (x2 == x1)
967 return y1;
968
969 return y1 + (((y2 - y1) * (x - x1)) / (x2 - x1));
970}
971
972static u8 ar9170_interpolate_u8(u8 x, u8 x1, u8 y1, u8 x2, u8 y2)
973{
974#define SHIFT 8
975 s32 y;
976
977 y = ar9170_interpolate_s32(x << SHIFT,
978 x1 << SHIFT, y1 << SHIFT,
979 x2 << SHIFT, y2 << SHIFT);
980
981 /*
982 * XXX: unwrap this expression
983 * Isn't it just DIV_ROUND_UP(y, 1<<SHIFT)?
984 * Can we rely on the compiler to optimise away the div?
985 */
986 return (y >> SHIFT) + ((y & (1<<(SHIFT-1))) >> (SHIFT - 1));
987#undef SHIFT
988}
989
990static int ar9170_set_power_cal(struct ar9170 *ar, u32 freq, enum ar9170_bw bw)
991{
992 struct ar9170_calibration_target_power_legacy *ctpl;
993 struct ar9170_calibration_target_power_ht *ctph;
994 u8 *ctpres;
995 int ntargets;
996 int idx, i, n;
997 u8 ackpower, ackchains, f;
998 u8 pwr_freqs[AR5416_MAX_NUM_TGT_PWRS];
999
1000 if (freq < 3000)
1001 f = freq - 2300;
1002 else
1003 f = (freq - 4800)/5;
1004
1005 /*
1006 * cycle through the various modes
1007 *
1008 * legacy modes first: 5G, 2G CCK, 2G OFDM
1009 */
1010 for (i = 0; i < 3; i++) {
1011 switch (i) {
1012 case 0: /* 5 GHz legacy */
1013 ctpl = &ar->eeprom.cal_tgt_pwr_5G[0];
1014 ntargets = AR5416_NUM_5G_TARGET_PWRS;
1015 ctpres = ar->power_5G_leg;
1016 break;
1017 case 1: /* 2.4 GHz CCK */
1018 ctpl = &ar->eeprom.cal_tgt_pwr_2G_cck[0];
1019 ntargets = AR5416_NUM_2G_CCK_TARGET_PWRS;
1020 ctpres = ar->power_2G_cck;
1021 break;
1022 case 2: /* 2.4 GHz OFDM */
1023 ctpl = &ar->eeprom.cal_tgt_pwr_2G_ofdm[0];
1024 ntargets = AR5416_NUM_2G_OFDM_TARGET_PWRS;
1025 ctpres = ar->power_2G_ofdm;
1026 break;
1027 default:
1028 BUG();
1029 }
1030
1031 for (n = 0; n < ntargets; n++) {
1032 if (ctpl[n].freq == 0xff)
1033 break;
1034 pwr_freqs[n] = ctpl[n].freq;
1035 }
1036 ntargets = n;
1037 idx = ar9170_find_freq_idx(ntargets, pwr_freqs, f);
1038 for (n = 0; n < 4; n++)
1039 ctpres[n] = ar9170_interpolate_u8(
1040 f,
1041 ctpl[idx + 0].freq,
1042 ctpl[idx + 0].power[n],
1043 ctpl[idx + 1].freq,
1044 ctpl[idx + 1].power[n]);
1045 }
1046
1047 /*
1048 * HT modes now: 5G HT20, 5G HT40, 2G CCK, 2G OFDM, 2G HT20, 2G HT40
1049 */
1050 for (i = 0; i < 4; i++) {
1051 switch (i) {
1052 case 0: /* 5 GHz HT 20 */
1053 ctph = &ar->eeprom.cal_tgt_pwr_5G_ht20[0];
1054 ntargets = AR5416_NUM_5G_TARGET_PWRS;
1055 ctpres = ar->power_5G_ht20;
1056 break;
1057 case 1: /* 5 GHz HT 40 */
1058 ctph = &ar->eeprom.cal_tgt_pwr_5G_ht40[0];
1059 ntargets = AR5416_NUM_5G_TARGET_PWRS;
1060 ctpres = ar->power_5G_ht40;
1061 break;
1062 case 2: /* 2.4 GHz HT 20 */
1063 ctph = &ar->eeprom.cal_tgt_pwr_2G_ht20[0];
1064 ntargets = AR5416_NUM_2G_OFDM_TARGET_PWRS;
1065 ctpres = ar->power_2G_ht20;
1066 break;
1067 case 3: /* 2.4 GHz HT 40 */
1068 ctph = &ar->eeprom.cal_tgt_pwr_2G_ht40[0];
1069 ntargets = AR5416_NUM_2G_OFDM_TARGET_PWRS;
1070 ctpres = ar->power_2G_ht40;
1071 break;
1072 default:
1073 BUG();
1074 }
1075
1076 for (n = 0; n < ntargets; n++) {
1077 if (ctph[n].freq == 0xff)
1078 break;
1079 pwr_freqs[n] = ctph[n].freq;
1080 }
1081 ntargets = n;
1082 idx = ar9170_find_freq_idx(ntargets, pwr_freqs, f);
1083 for (n = 0; n < 8; n++)
1084 ctpres[n] = ar9170_interpolate_u8(
1085 f,
1086 ctph[idx + 0].freq,
1087 ctph[idx + 0].power[n],
1088 ctph[idx + 1].freq,
1089 ctph[idx + 1].power[n]);
1090 }
1091
1092 /* set ACK/CTS TX power */
1093 ar9170_regwrite_begin(ar);
1094
1095 if (ar->eeprom.tx_mask != 1)
1096 ackchains = AR9170_TX_PHY_TXCHAIN_2;
1097 else
1098 ackchains = AR9170_TX_PHY_TXCHAIN_1;
1099
1100 if (freq < 3000)
1101 ackpower = ar->power_2G_ofdm[0] & 0x3f;
1102 else
1103 ackpower = ar->power_5G_leg[0] & 0x3f;
1104
1105 ar9170_regwrite(0x1c3694, ackpower << 20 | ackchains << 26);
1106 ar9170_regwrite(0x1c3bb4, ackpower << 5 | ackchains << 11 |
1107 ackpower << 21 | ackchains << 27);
1108
1109 ar9170_regwrite_finish();
1110 return ar9170_regwrite_result();
1111}
1112
1113static int ar9170_calc_noise_dbm(u32 raw_noise)
1114{
1115 if (raw_noise & 0x100)
1116 return ~((raw_noise & 0x0ff) >> 1);
1117 else
1118 return (raw_noise & 0xff) >> 1;
1119}
1120
1121int ar9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
1122 enum ar9170_rf_init_mode rfi, enum ar9170_bw bw)
1123{
1124 const struct ar9170_phy_freq_params *freqpar;
1125 u32 cmd, tmp, offs;
1126 __le32 vals[8];
1127 int i, err;
1128 bool bandswitch;
1129
1130 /* clear BB heavy clip enable */
1131 err = ar9170_write_reg(ar, 0x1c59e0, 0x200);
1132 if (err)
1133 return err;
1134
1135 /* may be NULL at first setup */
1136 if (ar->channel)
1137 bandswitch = ar->channel->band != channel->band;
1138 else
1139 bandswitch = true;
1140
1141 /* HW workaround */
1142 if (!ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] &&
1143 channel->center_freq <= 2417)
1144 bandswitch = true;
1145
1146 err = ar->exec_cmd(ar, AR9170_CMD_FREQ_START, 0, NULL, 0, NULL);
1147 if (err)
1148 return err;
1149
1150 if (rfi != AR9170_RFI_NONE || bandswitch) {
1151 u32 val = 0x400;
1152
1153 if (rfi == AR9170_RFI_COLD)
1154 val = 0x800;
1155
1156 /* warm/cold reset BB/ADDA */
1157 err = ar9170_write_reg(ar, 0x1d4004, val);
1158 if (err)
1159 return err;
1160
1161 err = ar9170_write_reg(ar, 0x1d4004, 0x0);
1162 if (err)
1163 return err;
1164
1165 err = ar9170_init_phy(ar, channel->band);
1166 if (err)
1167 return err;
1168
1169 err = ar9170_init_rf_banks_0_7(ar,
1170 channel->band == IEEE80211_BAND_5GHZ);
1171 if (err)
1172 return err;
1173
1174 cmd = AR9170_CMD_RF_INIT;
1175 } else {
1176 cmd = AR9170_CMD_FREQUENCY;
1177 }
1178
1179 err = ar9170_init_rf_bank4_pwr(ar,
1180 channel->band == IEEE80211_BAND_5GHZ,
1181 channel->center_freq, bw);
1182 if (err)
1183 return err;
1184
1185 switch (bw) {
1186 case AR9170_BW_20:
1187 tmp = 0x240;
1188 offs = 0;
1189 break;
1190 case AR9170_BW_40_BELOW:
1191 tmp = 0x2c4;
1192 offs = 3;
1193 break;
1194 case AR9170_BW_40_ABOVE:
1195 tmp = 0x2d4;
1196 offs = 1;
1197 break;
1198 default:
1199 BUG();
1200 return -ENOSYS;
1201 }
1202
1203 if (0 /* 2 streams capable */)
1204 tmp |= 0x100;
1205
1206 err = ar9170_write_reg(ar, 0x1c5804, tmp);
1207 if (err)
1208 return err;
1209
1210 err = ar9170_set_power_cal(ar, channel->center_freq, bw);
1211 if (err)
1212 return err;
1213
1214 freqpar = ar9170_get_hw_dyn_params(channel, bw);
1215
1216 vals[0] = cpu_to_le32(channel->center_freq * 1000);
1217 vals[1] = cpu_to_le32(bw == AR9170_BW_20 ? 0 : 1);
1218 vals[2] = cpu_to_le32(offs << 2 | 1);
1219 vals[3] = cpu_to_le32(freqpar->coeff_exp);
1220 vals[4] = cpu_to_le32(freqpar->coeff_man);
1221 vals[5] = cpu_to_le32(freqpar->coeff_exp_shgi);
1222 vals[6] = cpu_to_le32(freqpar->coeff_man_shgi);
1223 vals[7] = cpu_to_le32(1000);
1224
1225 err = ar->exec_cmd(ar, cmd, sizeof(vals), (u8 *)vals,
1226 sizeof(vals), (u8 *)vals);
1227 if (err)
1228 return err;
1229
1230 for (i = 0; i < 2; i++) {
1231 ar->noise[i] = ar9170_calc_noise_dbm(
1232 (le32_to_cpu(vals[2 + i]) >> 19) & 0x1ff);
1233
1234 ar->noise[i + 2] = ar9170_calc_noise_dbm(
1235 (le32_to_cpu(vals[5 + i]) >> 23) & 0x1ff);
1236 }
1237
1238 ar->channel = channel;
1239 return 0;
1240}
diff --git a/drivers/net/wireless/ar9170/usb.c b/drivers/net/wireless/ar9170/usb.c
new file mode 100644
index 000000000000..ad296840893e
--- /dev/null
+++ b/drivers/net/wireless/ar9170/usb.c
@@ -0,0 +1,748 @@
1/*
2 * Atheros AR9170 driver
3 *
4 * USB - frontend
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, Christian Lamparter <chunkeey@web.de>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
22 *
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
26 *
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38 */
39
40#include <linux/module.h>
41#include <linux/usb.h>
42#include <linux/firmware.h>
43#include <linux/etherdevice.h>
44#include <net/mac80211.h>
45#include "ar9170.h"
46#include "cmd.h"
47#include "hw.h"
48#include "usb.h"
49
50MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
51MODULE_AUTHOR("Christian Lamparter <chunkeey@web.de>");
52MODULE_LICENSE("GPL");
53MODULE_DESCRIPTION("Atheros AR9170 802.11n USB wireless");
54MODULE_FIRMWARE("ar9170-1.fw");
55MODULE_FIRMWARE("ar9170-2.fw");
56
57static struct usb_device_id ar9170_usb_ids[] = {
58 /* Atheros 9170 */
59 { USB_DEVICE(0x0cf3, 0x9170) },
60 /* Atheros TG121N */
61 { USB_DEVICE(0x0cf3, 0x1001) },
62 /* D-Link DWA 160A */
63 { USB_DEVICE(0x07d1, 0x3c10) },
64 /* Netgear WNDA3100 */
65 { USB_DEVICE(0x0846, 0x9010) },
66 /* Netgear WN111 v2 */
67 { USB_DEVICE(0x0846, 0x9001) },
68 /* Zydas ZD1221 */
69 { USB_DEVICE(0x0ace, 0x1221) },
70 /* Z-Com UB81 BG */
71 { USB_DEVICE(0x0cde, 0x0023) },
72 /* Z-Com UB82 ABG */
73 { USB_DEVICE(0x0cde, 0x0026) },
74 /* Arcadyan WN7512 */
75 { USB_DEVICE(0x083a, 0xf522) },
76 /* Planex GWUS300 */
77 { USB_DEVICE(0x2019, 0x5304) },
78 /* IO-Data WNGDNUS2 */
79 { USB_DEVICE(0x04bb, 0x093f) },
80
81 /* terminate */
82 {}
83};
84MODULE_DEVICE_TABLE(usb, ar9170_usb_ids);
85
86static void ar9170_usb_tx_urb_complete_free(struct urb *urb)
87{
88 struct sk_buff *skb = urb->context;
89 struct ar9170_usb *aru = (struct ar9170_usb *)
90 usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
91
92 if (!aru) {
93 dev_kfree_skb_irq(skb);
94 return ;
95 }
96
97 ar9170_handle_tx_status(&aru->common, skb, false,
98 AR9170_TX_STATUS_COMPLETE);
99}
100
101static void ar9170_usb_tx_urb_complete(struct urb *urb)
102{
103}
104
105static void ar9170_usb_irq_completed(struct urb *urb)
106{
107 struct ar9170_usb *aru = urb->context;
108
109 switch (urb->status) {
110 /* everything is fine */
111 case 0:
112 break;
113
114 /* disconnect */
115 case -ENOENT:
116 case -ECONNRESET:
117 case -ENODEV:
118 case -ESHUTDOWN:
119 goto free;
120
121 default:
122 goto resubmit;
123 }
124
125 print_hex_dump_bytes("ar9170 irq: ", DUMP_PREFIX_OFFSET,
126 urb->transfer_buffer, urb->actual_length);
127
128resubmit:
129 usb_anchor_urb(urb, &aru->rx_submitted);
130 if (usb_submit_urb(urb, GFP_ATOMIC)) {
131 usb_unanchor_urb(urb);
132 goto free;
133 }
134
135 return;
136
137free:
138 usb_buffer_free(aru->udev, 64, urb->transfer_buffer, urb->transfer_dma);
139}
140
141static void ar9170_usb_rx_completed(struct urb *urb)
142{
143 struct sk_buff *skb = urb->context;
144 struct ar9170_usb *aru = (struct ar9170_usb *)
145 usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
146 int err;
147
148 if (!aru)
149 goto free;
150
151 switch (urb->status) {
152 /* everything is fine */
153 case 0:
154 break;
155
156 /* disconnect */
157 case -ENOENT:
158 case -ECONNRESET:
159 case -ENODEV:
160 case -ESHUTDOWN:
161 goto free;
162
163 default:
164 goto resubmit;
165 }
166
167 skb_put(skb, urb->actual_length);
168 ar9170_rx(&aru->common, skb);
169
170resubmit:
171 skb_reset_tail_pointer(skb);
172 skb_trim(skb, 0);
173
174 usb_anchor_urb(urb, &aru->rx_submitted);
175 err = usb_submit_urb(urb, GFP_ATOMIC);
176 if (err) {
177 usb_unanchor_urb(urb);
178 dev_kfree_skb_irq(skb);
179 }
180
181 return ;
182
183free:
184 dev_kfree_skb_irq(skb);
185 return;
186}
187
188static int ar9170_usb_prep_rx_urb(struct ar9170_usb *aru,
189 struct urb *urb, gfp_t gfp)
190{
191 struct sk_buff *skb;
192
193 skb = __dev_alloc_skb(AR9170_MAX_RX_BUFFER_SIZE + 32, gfp);
194 if (!skb)
195 return -ENOMEM;
196
197 /* reserve some space for mac80211's radiotap */
198 skb_reserve(skb, 32);
199
200 usb_fill_bulk_urb(urb, aru->udev,
201 usb_rcvbulkpipe(aru->udev, AR9170_EP_RX),
202 skb->data, min(skb_tailroom(skb),
203 AR9170_MAX_RX_BUFFER_SIZE),
204 ar9170_usb_rx_completed, skb);
205
206 return 0;
207}
208
209static int ar9170_usb_alloc_rx_irq_urb(struct ar9170_usb *aru)
210{
211 struct urb *urb = NULL;
212 void *ibuf;
213 int err = -ENOMEM;
214
215 /* initialize interrupt endpoint */
216 urb = usb_alloc_urb(0, GFP_KERNEL);
217 if (!urb)
218 goto out;
219
220 ibuf = usb_buffer_alloc(aru->udev, 64, GFP_KERNEL, &urb->transfer_dma);
221 if (!ibuf)
222 goto out;
223
224 usb_fill_int_urb(urb, aru->udev,
225 usb_rcvintpipe(aru->udev, AR9170_EP_IRQ), ibuf,
226 64, ar9170_usb_irq_completed, aru, 1);
227 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
228
229 usb_anchor_urb(urb, &aru->rx_submitted);
230 err = usb_submit_urb(urb, GFP_KERNEL);
231 if (err) {
232 usb_unanchor_urb(urb);
233 usb_buffer_free(aru->udev, 64, urb->transfer_buffer,
234 urb->transfer_dma);
235 }
236
237out:
238 usb_free_urb(urb);
239 return err;
240}
241
242static int ar9170_usb_alloc_rx_bulk_urbs(struct ar9170_usb *aru)
243{
244 struct urb *urb;
245 int i;
246 int err = -EINVAL;
247
248 for (i = 0; i < AR9170_NUM_RX_URBS; i++) {
249 err = -ENOMEM;
250 urb = usb_alloc_urb(0, GFP_KERNEL);
251 if (!urb)
252 goto err_out;
253
254 err = ar9170_usb_prep_rx_urb(aru, urb, GFP_KERNEL);
255 if (err) {
256 usb_free_urb(urb);
257 goto err_out;
258 }
259
260 usb_anchor_urb(urb, &aru->rx_submitted);
261 err = usb_submit_urb(urb, GFP_KERNEL);
262 if (err) {
263 usb_unanchor_urb(urb);
264 dev_kfree_skb_any((void *) urb->transfer_buffer);
265 usb_free_urb(urb);
266 goto err_out;
267 }
268 usb_free_urb(urb);
269 }
270
271 /* the device now waiting for a firmware. */
272 aru->common.state = AR9170_IDLE;
273 return 0;
274
275err_out:
276
277 usb_kill_anchored_urbs(&aru->rx_submitted);
278 return err;
279}
280
281static void ar9170_usb_cancel_urbs(struct ar9170_usb *aru)
282{
283 int ret;
284
285 aru->common.state = AR9170_UNKNOWN_STATE;
286
287 usb_unlink_anchored_urbs(&aru->tx_submitted);
288
289 /* give the LED OFF command and the deauth frame a chance to air. */
290 ret = usb_wait_anchor_empty_timeout(&aru->tx_submitted,
291 msecs_to_jiffies(100));
292 if (ret == 0)
293 dev_err(&aru->udev->dev, "kill pending tx urbs.\n");
294 usb_poison_anchored_urbs(&aru->tx_submitted);
295
296 usb_poison_anchored_urbs(&aru->rx_submitted);
297}
298
299static int ar9170_usb_exec_cmd(struct ar9170 *ar, enum ar9170_cmd cmd,
300 unsigned int plen, void *payload,
301 unsigned int outlen, void *out)
302{
303 struct ar9170_usb *aru = (void *) ar;
304 struct urb *urb = NULL;
305 unsigned long flags;
306 int err = -ENOMEM;
307
308 if (unlikely(!IS_ACCEPTING_CMD(ar)))
309 return -EPERM;
310
311 if (WARN_ON(plen > AR9170_MAX_CMD_LEN - 4))
312 return -EINVAL;
313
314 urb = usb_alloc_urb(0, GFP_ATOMIC);
315 if (unlikely(!urb))
316 goto err_free;
317
318 ar->cmdbuf[0] = cpu_to_le32(plen);
319 ar->cmdbuf[0] |= cpu_to_le32(cmd << 8);
320 /* writing multiple regs fills this buffer already */
321 if (plen && payload != (u8 *)(&ar->cmdbuf[1]))
322 memcpy(&ar->cmdbuf[1], payload, plen);
323
324 spin_lock_irqsave(&aru->common.cmdlock, flags);
325 aru->readbuf = (u8 *)out;
326 aru->readlen = outlen;
327 spin_unlock_irqrestore(&aru->common.cmdlock, flags);
328
329 usb_fill_int_urb(urb, aru->udev,
330 usb_sndbulkpipe(aru->udev, AR9170_EP_CMD),
331 aru->common.cmdbuf, plen + 4,
332 ar9170_usb_tx_urb_complete, NULL, 1);
333
334 usb_anchor_urb(urb, &aru->tx_submitted);
335 err = usb_submit_urb(urb, GFP_ATOMIC);
336 if (err) {
337 usb_unanchor_urb(urb);
338 usb_free_urb(urb);
339 goto err_unbuf;
340 }
341 usb_free_urb(urb);
342
343 err = wait_for_completion_timeout(&aru->cmd_wait, HZ);
344 if (err == 0) {
345 err = -ETIMEDOUT;
346 goto err_unbuf;
347 }
348
349 if (outlen >= 0 && aru->readlen != outlen) {
350 err = -EMSGSIZE;
351 goto err_unbuf;
352 }
353
354 return 0;
355
356err_unbuf:
357 /* Maybe the device was removed in the second we were waiting? */
358 if (IS_STARTED(ar)) {
359 dev_err(&aru->udev->dev, "no command feedback "
360 "received (%d).\n", err);
361
362 /* provide some maybe useful debug information */
363 print_hex_dump_bytes("ar9170 cmd: ", DUMP_PREFIX_NONE,
364 aru->common.cmdbuf, plen + 4);
365 dump_stack();
366 }
367
368 /* invalidate to avoid completing the next prematurely */
369 spin_lock_irqsave(&aru->common.cmdlock, flags);
370 aru->readbuf = NULL;
371 aru->readlen = 0;
372 spin_unlock_irqrestore(&aru->common.cmdlock, flags);
373
374err_free:
375
376 return err;
377}
378
379static int ar9170_usb_tx(struct ar9170 *ar, struct sk_buff *skb,
380 bool txstatus_needed, unsigned int extra_len)
381{
382 struct ar9170_usb *aru = (struct ar9170_usb *) ar;
383 struct urb *urb;
384 int err;
385
386 if (unlikely(!IS_STARTED(ar))) {
387 /* Seriously, what were you drink... err... thinking!? */
388 return -EPERM;
389 }
390
391 urb = usb_alloc_urb(0, GFP_ATOMIC);
392 if (unlikely(!urb))
393 return -ENOMEM;
394
395 usb_fill_bulk_urb(urb, aru->udev,
396 usb_sndbulkpipe(aru->udev, AR9170_EP_TX),
397 skb->data, skb->len + extra_len, (txstatus_needed ?
398 ar9170_usb_tx_urb_complete :
399 ar9170_usb_tx_urb_complete_free), skb);
400 urb->transfer_flags |= URB_ZERO_PACKET;
401
402 usb_anchor_urb(urb, &aru->tx_submitted);
403 err = usb_submit_urb(urb, GFP_ATOMIC);
404 if (unlikely(err))
405 usb_unanchor_urb(urb);
406
407 usb_free_urb(urb);
408 return err;
409}
410
411static void ar9170_usb_callback_cmd(struct ar9170 *ar, u32 len , void *buffer)
412{
413 struct ar9170_usb *aru = (void *) ar;
414 unsigned long flags;
415 u32 in, out;
416
417 if (!buffer)
418 return ;
419
420 in = le32_to_cpup((__le32 *)buffer);
421 out = le32_to_cpu(ar->cmdbuf[0]);
422
423 /* mask off length byte */
424 out &= ~0xFF;
425
426 if (aru->readlen >= 0) {
427 /* add expected length */
428 out |= aru->readlen;
429 } else {
430 /* add obtained length */
431 out |= in & 0xFF;
432 }
433
434 /*
435 * Some commands (e.g: AR9170_CMD_FREQUENCY) have a variable response
436 * length and we cannot predict the correct length in advance.
437 * So we only check if we provided enough space for the data.
438 */
439 if (unlikely(out < in)) {
440 dev_warn(&aru->udev->dev, "received invalid command response "
441 "got %d bytes, instead of %d bytes "
442 "and the resp length is %d bytes\n",
443 in, out, len);
444 print_hex_dump_bytes("ar9170 invalid resp: ",
445 DUMP_PREFIX_OFFSET, buffer, len);
446 /*
447 * Do not complete, then the command times out,
448 * and we get a stack trace from there.
449 */
450 return ;
451 }
452
453 spin_lock_irqsave(&aru->common.cmdlock, flags);
454 if (aru->readbuf && len > 0) {
455 memcpy(aru->readbuf, buffer + 4, len - 4);
456 aru->readbuf = NULL;
457 }
458 complete(&aru->cmd_wait);
459 spin_unlock_irqrestore(&aru->common.cmdlock, flags);
460}
461
462static int ar9170_usb_upload(struct ar9170_usb *aru, const void *data,
463 size_t len, u32 addr, bool complete)
464{
465 int transfer, err;
466 u8 *buf = kmalloc(4096, GFP_KERNEL);
467
468 if (!buf)
469 return -ENOMEM;
470
471 while (len) {
472 transfer = min_t(int, len, 4096);
473 memcpy(buf, data, transfer);
474
475 err = usb_control_msg(aru->udev, usb_sndctrlpipe(aru->udev, 0),
476 0x30 /* FW DL */, 0x40 | USB_DIR_OUT,
477 addr >> 8, 0, buf, transfer, 1000);
478
479 if (err < 0) {
480 kfree(buf);
481 return err;
482 }
483
484 len -= transfer;
485 data += transfer;
486 addr += transfer;
487 }
488 kfree(buf);
489
490 if (complete) {
491 err = usb_control_msg(aru->udev, usb_sndctrlpipe(aru->udev, 0),
492 0x31 /* FW DL COMPLETE */,
493 0x40 | USB_DIR_OUT, 0, 0, NULL, 0, 5000);
494 }
495
496 return 0;
497}
498
499static int ar9170_usb_request_firmware(struct ar9170_usb *aru)
500{
501 int err = 0;
502
503 err = request_firmware(&aru->init_values, "ar9170-1.fw",
504 &aru->udev->dev);
505 if (err) {
506 dev_err(&aru->udev->dev, "file with init values not found.\n");
507 return err;
508 }
509
510 err = request_firmware(&aru->firmware, "ar9170-2.fw", &aru->udev->dev);
511 if (err) {
512 release_firmware(aru->init_values);
513 dev_err(&aru->udev->dev, "firmware file not found.\n");
514 return err;
515 }
516
517 return err;
518}
519
520static int ar9170_usb_reset(struct ar9170_usb *aru)
521{
522 int ret, lock = (aru->intf->condition != USB_INTERFACE_BINDING);
523
524 if (lock) {
525 ret = usb_lock_device_for_reset(aru->udev, aru->intf);
526 if (ret < 0) {
527 dev_err(&aru->udev->dev, "unable to lock device "
528 "for reset (%d).\n", ret);
529 return ret;
530 }
531 }
532
533 ret = usb_reset_device(aru->udev);
534 if (lock)
535 usb_unlock_device(aru->udev);
536
537 /* let it rest - for a second - */
538 msleep(1000);
539
540 return ret;
541}
542
543static int ar9170_usb_upload_firmware(struct ar9170_usb *aru)
544{
545 int err;
546
547 /* First, upload initial values to device RAM */
548 err = ar9170_usb_upload(aru, aru->init_values->data,
549 aru->init_values->size, 0x102800, false);
550 if (err) {
551 dev_err(&aru->udev->dev, "firmware part 1 "
552 "upload failed (%d).\n", err);
553 return err;
554 }
555
556 /* Then, upload the firmware itself and start it */
557 return ar9170_usb_upload(aru, aru->firmware->data, aru->firmware->size,
558 0x200000, true);
559}
560
561static int ar9170_usb_init_transport(struct ar9170_usb *aru)
562{
563 struct ar9170 *ar = (void *) &aru->common;
564 int err;
565
566 ar9170_regwrite_begin(ar);
567
568 /* Set USB Rx stream mode MAX packet number to 2 */
569 ar9170_regwrite(AR9170_USB_REG_MAX_AGG_UPLOAD, 0x4);
570
571 /* Set USB Rx stream mode timeout to 10us */
572 ar9170_regwrite(AR9170_USB_REG_UPLOAD_TIME_CTL, 0x80);
573
574 ar9170_regwrite_finish();
575
576 err = ar9170_regwrite_result();
577 if (err)
578 dev_err(&aru->udev->dev, "USB setup failed (%d).\n", err);
579
580 return err;
581}
582
583static void ar9170_usb_stop(struct ar9170 *ar)
584{
585 struct ar9170_usb *aru = (void *) ar;
586 int ret;
587
588 if (IS_ACCEPTING_CMD(ar))
589 aru->common.state = AR9170_STOPPED;
590
591 /* lets wait a while until the tx - queues are dried out */
592 ret = usb_wait_anchor_empty_timeout(&aru->tx_submitted,
593 msecs_to_jiffies(1000));
594 if (ret == 0)
595 dev_err(&aru->udev->dev, "kill pending tx urbs.\n");
596
597 usb_poison_anchored_urbs(&aru->tx_submitted);
598
599 /*
600 * Note:
601 * So far we freed all tx urbs, but we won't dare to touch any rx urbs.
602 * Else we would end up with a unresponsive device...
603 */
604}
605
606static int ar9170_usb_open(struct ar9170 *ar)
607{
608 struct ar9170_usb *aru = (void *) ar;
609 int err;
610
611 usb_unpoison_anchored_urbs(&aru->tx_submitted);
612 err = ar9170_usb_init_transport(aru);
613 if (err) {
614 usb_poison_anchored_urbs(&aru->tx_submitted);
615 return err;
616 }
617
618 aru->common.state = AR9170_IDLE;
619 return 0;
620}
621
622static int ar9170_usb_probe(struct usb_interface *intf,
623 const struct usb_device_id *id)
624{
625 struct ar9170_usb *aru;
626 struct ar9170 *ar;
627 struct usb_device *udev;
628 int err;
629
630 aru = ar9170_alloc(sizeof(*aru));
631 if (IS_ERR(aru)) {
632 err = PTR_ERR(aru);
633 goto out;
634 }
635
636 udev = interface_to_usbdev(intf);
637 usb_get_dev(udev);
638 aru->udev = udev;
639 aru->intf = intf;
640 ar = &aru->common;
641
642 usb_set_intfdata(intf, aru);
643 SET_IEEE80211_DEV(ar->hw, &udev->dev);
644
645 init_usb_anchor(&aru->rx_submitted);
646 init_usb_anchor(&aru->tx_submitted);
647 init_completion(&aru->cmd_wait);
648
649 aru->common.stop = ar9170_usb_stop;
650 aru->common.open = ar9170_usb_open;
651 aru->common.tx = ar9170_usb_tx;
652 aru->common.exec_cmd = ar9170_usb_exec_cmd;
653 aru->common.callback_cmd = ar9170_usb_callback_cmd;
654
655 err = ar9170_usb_reset(aru);
656 if (err)
657 goto err_unlock;
658
659 err = ar9170_usb_request_firmware(aru);
660 if (err)
661 goto err_unlock;
662
663 err = ar9170_usb_alloc_rx_irq_urb(aru);
664 if (err)
665 goto err_freefw;
666
667 err = ar9170_usb_alloc_rx_bulk_urbs(aru);
668 if (err)
669 goto err_unrx;
670
671 err = ar9170_usb_upload_firmware(aru);
672 if (err) {
673 err = ar9170_echo_test(&aru->common, 0x60d43110);
674 if (err) {
675 /* force user invention, by disabling the device */
676 err = usb_driver_set_configuration(aru->udev, -1);
677 dev_err(&aru->udev->dev, "device is in a bad state. "
678 "please reconnect it!\n");
679 goto err_unrx;
680 }
681 }
682
683 err = ar9170_usb_open(ar);
684 if (err)
685 goto err_unrx;
686
687 err = ar9170_register(ar, &udev->dev);
688
689 ar9170_usb_stop(ar);
690 if (err)
691 goto err_unrx;
692
693 return 0;
694
695err_unrx:
696 ar9170_usb_cancel_urbs(aru);
697
698err_freefw:
699 release_firmware(aru->init_values);
700 release_firmware(aru->firmware);
701
702err_unlock:
703 usb_set_intfdata(intf, NULL);
704 usb_put_dev(udev);
705 ieee80211_free_hw(ar->hw);
706out:
707 return err;
708}
709
710static void ar9170_usb_disconnect(struct usb_interface *intf)
711{
712 struct ar9170_usb *aru = usb_get_intfdata(intf);
713
714 if (!aru)
715 return;
716
717 aru->common.state = AR9170_IDLE;
718 ar9170_unregister(&aru->common);
719 ar9170_usb_cancel_urbs(aru);
720
721 release_firmware(aru->init_values);
722 release_firmware(aru->firmware);
723
724 usb_put_dev(aru->udev);
725 usb_set_intfdata(intf, NULL);
726 ieee80211_free_hw(aru->common.hw);
727}
728
729static struct usb_driver ar9170_driver = {
730 .name = "ar9170usb",
731 .probe = ar9170_usb_probe,
732 .disconnect = ar9170_usb_disconnect,
733 .id_table = ar9170_usb_ids,
734 .soft_unbind = 1,
735};
736
737static int __init ar9170_init(void)
738{
739 return usb_register(&ar9170_driver);
740}
741
742static void __exit ar9170_exit(void)
743{
744 usb_deregister(&ar9170_driver);
745}
746
747module_init(ar9170_init);
748module_exit(ar9170_exit);
diff --git a/drivers/net/wireless/ar9170/usb.h b/drivers/net/wireless/ar9170/usb.h
new file mode 100644
index 000000000000..f5852924cd64
--- /dev/null
+++ b/drivers/net/wireless/ar9170/usb.h
@@ -0,0 +1,74 @@
1/*
2 * Atheros AR9170 USB driver
3 *
4 * Driver specific definitions
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, Christian Lamparter <chunkeey@web.de>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
22 *
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
26 *
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38 */
39#ifndef __USB_H
40#define __USB_H
41
42#include <linux/usb.h>
43#include <linux/completion.h>
44#include <linux/spinlock.h>
45#include <linux/leds.h>
46#include <net/wireless.h>
47#include <net/mac80211.h>
48#include <linux/firmware.h>
49#include "eeprom.h"
50#include "hw.h"
51#include "ar9170.h"
52
53#define AR9170_NUM_RX_URBS 16
54
55struct firmware;
56
57struct ar9170_usb {
58 struct ar9170 common;
59 struct usb_device *udev;
60 struct usb_interface *intf;
61
62 struct usb_anchor rx_submitted;
63 struct usb_anchor tx_submitted;
64
65 spinlock_t cmdlock;
66 struct completion cmd_wait;
67 int readlen;
68 u8 *readbuf;
69
70 const struct firmware *init_values;
71 const struct firmware *firmware;
72};
73
74#endif /* __USB_H */
diff --git a/drivers/net/wireless/arlan-main.c b/drivers/net/wireless/arlan-main.c
index bfca15da6f0f..5b9f1e06ebf6 100644
--- a/drivers/net/wireless/arlan-main.c
+++ b/drivers/net/wireless/arlan-main.c
@@ -1030,7 +1030,17 @@ static int arlan_mac_addr(struct net_device *dev, void *p)
1030 return 0; 1030 return 0;
1031} 1031}
1032 1032
1033 1033static const struct net_device_ops arlan_netdev_ops = {
1034 .ndo_open = arlan_open,
1035 .ndo_stop = arlan_close,
1036 .ndo_start_xmit = arlan_tx,
1037 .ndo_get_stats = arlan_statistics,
1038 .ndo_set_multicast_list = arlan_set_multicast,
1039 .ndo_change_mtu = arlan_change_mtu,
1040 .ndo_set_mac_address = arlan_mac_addr,
1041 .ndo_tx_timeout = arlan_tx_timeout,
1042 .ndo_validate_addr = eth_validate_addr,
1043};
1034 1044
1035static int __init arlan_setup_device(struct net_device *dev, int num) 1045static int __init arlan_setup_device(struct net_device *dev, int num)
1036{ 1046{
@@ -1042,14 +1052,7 @@ static int __init arlan_setup_device(struct net_device *dev, int num)
1042 ap->conf = (struct arlan_shmem *)(ap+1); 1052 ap->conf = (struct arlan_shmem *)(ap+1);
1043 1053
1044 dev->tx_queue_len = tx_queue_len; 1054 dev->tx_queue_len = tx_queue_len;
1045 dev->open = arlan_open; 1055 dev->netdev_ops = &arlan_netdev_ops;
1046 dev->stop = arlan_close;
1047 dev->hard_start_xmit = arlan_tx;
1048 dev->get_stats = arlan_statistics;
1049 dev->set_multicast_list = arlan_set_multicast;
1050 dev->change_mtu = arlan_change_mtu;
1051 dev->set_mac_address = arlan_mac_addr;
1052 dev->tx_timeout = arlan_tx_timeout;
1053 dev->watchdog_timeo = 3*HZ; 1056 dev->watchdog_timeo = 3*HZ;
1054 1057
1055 ap->irq_test_done = 0; 1058 ap->irq_test_done = 0;
diff --git a/drivers/net/wireless/ath5k/ath5k.h b/drivers/net/wireless/ath5k/ath5k.h
index 0dc2c7321c8b..0b616e72fe05 100644
--- a/drivers/net/wireless/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath5k/ath5k.h
@@ -204,9 +204,9 @@
204#define AR5K_TUNE_CWMAX_11B 1023 204#define AR5K_TUNE_CWMAX_11B 1023
205#define AR5K_TUNE_CWMAX_XR 7 205#define AR5K_TUNE_CWMAX_XR 7
206#define AR5K_TUNE_NOISE_FLOOR -72 206#define AR5K_TUNE_NOISE_FLOOR -72
207#define AR5K_TUNE_MAX_TXPOWER 60 207#define AR5K_TUNE_MAX_TXPOWER 63
208#define AR5K_TUNE_DEFAULT_TXPOWER 30 208#define AR5K_TUNE_DEFAULT_TXPOWER 25
209#define AR5K_TUNE_TPC_TXPOWER true 209#define AR5K_TUNE_TPC_TXPOWER false
210#define AR5K_TUNE_ANT_DIVERSITY true 210#define AR5K_TUNE_ANT_DIVERSITY true
211#define AR5K_TUNE_HWTXTRIES 4 211#define AR5K_TUNE_HWTXTRIES 4
212 212
@@ -551,11 +551,11 @@ enum ath5k_pkt_type {
551 */ 551 */
552#define AR5K_TXPOWER_OFDM(_r, _v) ( \ 552#define AR5K_TXPOWER_OFDM(_r, _v) ( \
553 ((0 & 1) << ((_v) + 6)) | \ 553 ((0 & 1) << ((_v) + 6)) | \
554 (((ah->ah_txpower.txp_rates[(_r)]) & 0x3f) << (_v)) \ 554 (((ah->ah_txpower.txp_rates_power_table[(_r)]) & 0x3f) << (_v)) \
555) 555)
556 556
557#define AR5K_TXPOWER_CCK(_r, _v) ( \ 557#define AR5K_TXPOWER_CCK(_r, _v) ( \
558 (ah->ah_txpower.txp_rates[(_r)] & 0x3f) << (_v) \ 558 (ah->ah_txpower.txp_rates_power_table[(_r)] & 0x3f) << (_v) \
559) 559)
560 560
561/* 561/*
@@ -1085,13 +1085,25 @@ struct ath5k_hw {
1085 struct ath5k_gain ah_gain; 1085 struct ath5k_gain ah_gain;
1086 u8 ah_offset[AR5K_MAX_RF_BANKS]; 1086 u8 ah_offset[AR5K_MAX_RF_BANKS];
1087 1087
1088
1088 struct { 1089 struct {
1089 u16 txp_pcdac[AR5K_EEPROM_POWER_TABLE_SIZE]; 1090 /* Temporary tables used for interpolation */
1090 u16 txp_rates[AR5K_MAX_RATES]; 1091 u8 tmpL[AR5K_EEPROM_N_PD_GAINS]
1091 s16 txp_min; 1092 [AR5K_EEPROM_POWER_TABLE_SIZE];
1092 s16 txp_max; 1093 u8 tmpR[AR5K_EEPROM_N_PD_GAINS]
1094 [AR5K_EEPROM_POWER_TABLE_SIZE];
1095 u8 txp_pd_table[AR5K_EEPROM_POWER_TABLE_SIZE * 2];
1096 u16 txp_rates_power_table[AR5K_MAX_RATES];
1097 u8 txp_min_idx;
1093 bool txp_tpc; 1098 bool txp_tpc;
1099 /* Values in 0.25dB units */
1100 s16 txp_min_pwr;
1101 s16 txp_max_pwr;
1102 s16 txp_offset;
1094 s16 txp_ofdm; 1103 s16 txp_ofdm;
1104 /* Values in dB units */
1105 s16 txp_cck_ofdm_pwr_delta;
1106 s16 txp_cck_ofdm_gainf_delta;
1095 } ah_txpower; 1107 } ah_txpower;
1096 1108
1097 struct { 1109 struct {
@@ -1161,6 +1173,7 @@ extern void ath5k_hw_update_mib_counters(struct ath5k_hw *ah, struct ieee80211_l
1161 1173
1162/* EEPROM access functions */ 1174/* EEPROM access functions */
1163extern int ath5k_eeprom_init(struct ath5k_hw *ah); 1175extern int ath5k_eeprom_init(struct ath5k_hw *ah);
1176extern void ath5k_eeprom_detach(struct ath5k_hw *ah);
1164extern int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac); 1177extern int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac);
1165extern bool ath5k_eeprom_is_hb63(struct ath5k_hw *ah); 1178extern bool ath5k_eeprom_is_hb63(struct ath5k_hw *ah);
1166 1179
@@ -1256,8 +1269,8 @@ extern void ath5k_hw_set_def_antenna(struct ath5k_hw *ah, unsigned int ant);
1256extern unsigned int ath5k_hw_get_def_antenna(struct ath5k_hw *ah); 1269extern unsigned int ath5k_hw_get_def_antenna(struct ath5k_hw *ah);
1257extern int ath5k_hw_phy_disable(struct ath5k_hw *ah); 1270extern int ath5k_hw_phy_disable(struct ath5k_hw *ah);
1258/* TX power setup */ 1271/* TX power setup */
1259extern int ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, unsigned int txpower); 1272extern int ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, u8 ee_mode, u8 txpower);
1260extern int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, unsigned int power); 1273extern int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 ee_mode, u8 txpower);
1261 1274
1262/* 1275/*
1263 * Functions used internaly 1276 * Functions used internaly
diff --git a/drivers/net/wireless/ath5k/attach.c b/drivers/net/wireless/ath5k/attach.c
index 656cb9dc833b..70d376c63aac 100644
--- a/drivers/net/wireless/ath5k/attach.c
+++ b/drivers/net/wireless/ath5k/attach.c
@@ -341,6 +341,8 @@ void ath5k_hw_detach(struct ath5k_hw *ah)
341 if (ah->ah_rf_banks != NULL) 341 if (ah->ah_rf_banks != NULL)
342 kfree(ah->ah_rf_banks); 342 kfree(ah->ah_rf_banks);
343 343
344 ath5k_eeprom_detach(ah);
345
344 /* assume interrupts are down */ 346 /* assume interrupts are down */
345 kfree(ah); 347 kfree(ah);
346} 348}
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c
index cad3ccf61b00..5d57d774e466 100644
--- a/drivers/net/wireless/ath5k/base.c
+++ b/drivers/net/wireless/ath5k/base.c
@@ -685,13 +685,6 @@ ath5k_pci_resume(struct pci_dev *pdev)
685 if (err) 685 if (err)
686 return err; 686 return err;
687 687
688 /*
689 * Suspend/Resume resets the PCI configuration space, so we have to
690 * re-disable the RETRY_TIMEOUT register (0x41) to keep
691 * PCI Tx retries from interfering with C3 CPU state
692 */
693 pci_write_config_byte(pdev, 0x41, 0);
694
695 err = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc); 688 err = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
696 if (err) { 689 if (err) {
697 ATH5K_ERR(sc, "request_irq failed\n"); 690 ATH5K_ERR(sc, "request_irq failed\n");
@@ -1095,9 +1088,18 @@ ath5k_mode_setup(struct ath5k_softc *sc)
1095static inline int 1088static inline int
1096ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix) 1089ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix)
1097{ 1090{
1098 WARN(hw_rix < 0 || hw_rix >= AR5K_MAX_RATES, 1091 int rix;
1099 "hw_rix out of bounds: %x\n", hw_rix); 1092
1100 return sc->rate_idx[sc->curband->band][hw_rix]; 1093 /* return base rate on errors */
1094 if (WARN(hw_rix < 0 || hw_rix >= AR5K_MAX_RATES,
1095 "hw_rix out of bounds: %x\n", hw_rix))
1096 return 0;
1097
1098 rix = sc->rate_idx[sc->curband->band][hw_rix];
1099 if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix))
1100 rix = 0;
1101
1102 return rix;
1101} 1103}
1102 1104
1103/***************\ 1105/***************\
@@ -1216,6 +1218,9 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
1216 1218
1217 pktlen = skb->len; 1219 pktlen = skb->len;
1218 1220
1221 /* FIXME: If we are in g mode and rate is a CCK rate
1222 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
1223 * from tx power (value is in dB units already) */
1219 if (info->control.hw_key) { 1224 if (info->control.hw_key) {
1220 keyidx = info->control.hw_key->hw_key_idx; 1225 keyidx = info->control.hw_key->hw_key_idx;
1221 pktlen += info->control.hw_key->icv_len; 1226 pktlen += info->control.hw_key->icv_len;
@@ -2044,6 +2049,9 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
2044 antenna = sc->bsent & 4 ? 2 : 1; 2049 antenna = sc->bsent & 4 ? 2 : 1;
2045 } 2050 }
2046 2051
2052 /* FIXME: If we are in g mode and rate is a CCK rate
2053 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
2054 * from tx power (value is in dB units already) */
2047 ds->ds_data = bf->skbaddr; 2055 ds->ds_data = bf->skbaddr;
2048 ret = ah->ah_setup_tx_desc(ah, ds, skb->len, 2056 ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
2049 ieee80211_get_hdrlen_from_skb(skb), 2057 ieee80211_get_hdrlen_from_skb(skb),
@@ -2305,7 +2313,7 @@ ath5k_init(struct ath5k_softc *sc)
2305 sc->curband = &sc->sbands[sc->curchan->band]; 2313 sc->curband = &sc->sbands[sc->curchan->band];
2306 sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL | 2314 sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
2307 AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL | 2315 AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
2308 AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB; 2316 AR5K_INT_FATAL | AR5K_INT_GLOBAL;
2309 ret = ath5k_reset(sc, false, false); 2317 ret = ath5k_reset(sc, false, false);
2310 if (ret) 2318 if (ret)
2311 goto done; 2319 goto done;
@@ -2554,7 +2562,7 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2554 if (skb_headroom(skb) < padsize) { 2562 if (skb_headroom(skb) < padsize) {
2555 ATH5K_ERR(sc, "tx hdrlen not %%4: %d not enough" 2563 ATH5K_ERR(sc, "tx hdrlen not %%4: %d not enough"
2556 " headroom to pad %d\n", hdrlen, padsize); 2564 " headroom to pad %d\n", hdrlen, padsize);
2557 return NETDEV_TX_BUSY; 2565 goto drop_packet;
2558 } 2566 }
2559 skb_push(skb, padsize); 2567 skb_push(skb, padsize);
2560 memmove(skb->data, skb->data+padsize, hdrlen); 2568 memmove(skb->data, skb->data+padsize, hdrlen);
@@ -2565,7 +2573,7 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2565 ATH5K_ERR(sc, "no further txbuf available, dropping packet\n"); 2573 ATH5K_ERR(sc, "no further txbuf available, dropping packet\n");
2566 spin_unlock_irqrestore(&sc->txbuflock, flags); 2574 spin_unlock_irqrestore(&sc->txbuflock, flags);
2567 ieee80211_stop_queue(hw, skb_get_queue_mapping(skb)); 2575 ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
2568 return NETDEV_TX_BUSY; 2576 goto drop_packet;
2569 } 2577 }
2570 bf = list_first_entry(&sc->txbuf, struct ath5k_buf, list); 2578 bf = list_first_entry(&sc->txbuf, struct ath5k_buf, list);
2571 list_del(&bf->list); 2579 list_del(&bf->list);
@@ -2582,10 +2590,12 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2582 list_add_tail(&bf->list, &sc->txbuf); 2590 list_add_tail(&bf->list, &sc->txbuf);
2583 sc->txbuf_len++; 2591 sc->txbuf_len++;
2584 spin_unlock_irqrestore(&sc->txbuflock, flags); 2592 spin_unlock_irqrestore(&sc->txbuflock, flags);
2585 dev_kfree_skb_any(skb); 2593 goto drop_packet;
2586 return NETDEV_TX_OK;
2587 } 2594 }
2595 return NETDEV_TX_OK;
2588 2596
2597drop_packet:
2598 dev_kfree_skb_any(skb);
2589 return NETDEV_TX_OK; 2599 return NETDEV_TX_OK;
2590} 2600}
2591 2601
@@ -2608,12 +2618,6 @@ ath5k_reset(struct ath5k_softc *sc, bool stop, bool change_channel)
2608 goto err; 2618 goto err;
2609 } 2619 }
2610 2620
2611 /*
2612 * This is needed only to setup initial state
2613 * but it's best done after a reset.
2614 */
2615 ath5k_hw_set_txpower_limit(sc->ah, 0);
2616
2617 ret = ath5k_rx_start(sc); 2621 ret = ath5k_rx_start(sc);
2618 if (ret) { 2622 if (ret) {
2619 ATH5K_ERR(sc, "can't start recv logic\n"); 2623 ATH5K_ERR(sc, "can't start recv logic\n");
diff --git a/drivers/net/wireless/ath5k/base.h b/drivers/net/wireless/ath5k/base.h
index 20e0d14b41ec..822956114cd7 100644
--- a/drivers/net/wireless/ath5k/base.h
+++ b/drivers/net/wireless/ath5k/base.h
@@ -112,7 +112,7 @@ struct ath5k_softc {
112 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; 112 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
113 struct ieee80211_channel channels[ATH_CHAN_MAX]; 113 struct ieee80211_channel channels[ATH_CHAN_MAX];
114 struct ieee80211_rate rates[IEEE80211_NUM_BANDS][AR5K_MAX_RATES]; 114 struct ieee80211_rate rates[IEEE80211_NUM_BANDS][AR5K_MAX_RATES];
115 u8 rate_idx[IEEE80211_NUM_BANDS][AR5K_MAX_RATES]; 115 s8 rate_idx[IEEE80211_NUM_BANDS][AR5K_MAX_RATES];
116 enum nl80211_iftype opmode; 116 enum nl80211_iftype opmode;
117 struct ath5k_hw *ah; /* Atheros HW */ 117 struct ath5k_hw *ah; /* Atheros HW */
118 118
diff --git a/drivers/net/wireless/ath5k/desc.c b/drivers/net/wireless/ath5k/desc.c
index b40a9287a39a..dc30a2b70a6b 100644
--- a/drivers/net/wireless/ath5k/desc.c
+++ b/drivers/net/wireless/ath5k/desc.c
@@ -194,6 +194,10 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
194 return -EINVAL; 194 return -EINVAL;
195 } 195 }
196 196
197 tx_power += ah->ah_txpower.txp_offset;
198 if (tx_power > AR5K_TUNE_MAX_TXPOWER)
199 tx_power = AR5K_TUNE_MAX_TXPOWER;
200
197 /* Clear descriptor */ 201 /* Clear descriptor */
198 memset(&desc->ud.ds_tx5212, 0, sizeof(struct ath5k_hw_5212_tx_desc)); 202 memset(&desc->ud.ds_tx5212, 0, sizeof(struct ath5k_hw_5212_tx_desc));
199 203
diff --git a/drivers/net/wireless/ath5k/eeprom.c b/drivers/net/wireless/ath5k/eeprom.c
index ac45ca47ca87..c0fb3b09ba45 100644
--- a/drivers/net/wireless/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath5k/eeprom.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org> 2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com> 3 * Copyright (c) 2006-2009 Nick Kossifidis <mickflemm@gmail.com>
4 * Copyright (c) 2008 Felix Fietkau <nbd@openwrt.org> 4 * Copyright (c) 2008-2009 Felix Fietkau <nbd@openwrt.org>
5 * 5 *
6 * Permission to use, copy, modify, and distribute this software for any 6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above 7 * purpose with or without fee is hereby granted, provided that the above
@@ -98,11 +98,6 @@ ath5k_eeprom_init_header(struct ath5k_hw *ah)
98 int ret; 98 int ret;
99 u16 val; 99 u16 val;
100 100
101 /* Initial TX thermal adjustment values */
102 ee->ee_tx_clip = 4;
103 ee->ee_pwd_84 = ee->ee_pwd_90 = 1;
104 ee->ee_gain_select = 1;
105
106 /* 101 /*
107 * Read values from EEPROM and store them in the capability structure 102 * Read values from EEPROM and store them in the capability structure
108 */ 103 */
@@ -241,22 +236,22 @@ static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
241 ee->ee_adc_desired_size[mode] = (s8)((val >> 8) & 0xff); 236 ee->ee_adc_desired_size[mode] = (s8)((val >> 8) & 0xff);
242 switch(mode) { 237 switch(mode) {
243 case AR5K_EEPROM_MODE_11A: 238 case AR5K_EEPROM_MODE_11A:
244 ee->ee_ob[mode][3] = (val >> 5) & 0x7; 239 ee->ee_ob[mode][3] = (val >> 5) & 0x7;
245 ee->ee_db[mode][3] = (val >> 2) & 0x7; 240 ee->ee_db[mode][3] = (val >> 2) & 0x7;
246 ee->ee_ob[mode][2] = (val << 1) & 0x7; 241 ee->ee_ob[mode][2] = (val << 1) & 0x7;
247 242
248 AR5K_EEPROM_READ(o++, val); 243 AR5K_EEPROM_READ(o++, val);
249 ee->ee_ob[mode][2] |= (val >> 15) & 0x1; 244 ee->ee_ob[mode][2] |= (val >> 15) & 0x1;
250 ee->ee_db[mode][2] = (val >> 12) & 0x7; 245 ee->ee_db[mode][2] = (val >> 12) & 0x7;
251 ee->ee_ob[mode][1] = (val >> 9) & 0x7; 246 ee->ee_ob[mode][1] = (val >> 9) & 0x7;
252 ee->ee_db[mode][1] = (val >> 6) & 0x7; 247 ee->ee_db[mode][1] = (val >> 6) & 0x7;
253 ee->ee_ob[mode][0] = (val >> 3) & 0x7; 248 ee->ee_ob[mode][0] = (val >> 3) & 0x7;
254 ee->ee_db[mode][0] = val & 0x7; 249 ee->ee_db[mode][0] = val & 0x7;
255 break; 250 break;
256 case AR5K_EEPROM_MODE_11G: 251 case AR5K_EEPROM_MODE_11G:
257 case AR5K_EEPROM_MODE_11B: 252 case AR5K_EEPROM_MODE_11B:
258 ee->ee_ob[mode][1] = (val >> 4) & 0x7; 253 ee->ee_ob[mode][1] = (val >> 4) & 0x7;
259 ee->ee_db[mode][1] = val & 0x7; 254 ee->ee_db[mode][1] = val & 0x7;
260 break; 255 break;
261 } 256 }
262 257
@@ -504,35 +499,6 @@ ath5k_eeprom_init_modes(struct ath5k_hw *ah)
504 return 0; 499 return 0;
505} 500}
506 501
507/* Used to match PCDAC steps with power values on RF5111 chips
508 * (eeprom versions < 4). For RF5111 we have 10 pre-defined PCDAC
509 * steps that match with the power values we read from eeprom. On
510 * older eeprom versions (< 3.2) these steps are equaly spaced at
511 * 10% of the pcdac curve -until the curve reaches it's maximum-
512 * (10 steps from 0 to 100%) but on newer eeprom versions (>= 3.2)
513 * these 10 steps are spaced in a different way. This function returns
514 * the pcdac steps based on eeprom version and curve min/max so that we
515 * can have pcdac/pwr points.
516 */
517static inline void
518ath5k_get_pcdac_intercepts(struct ath5k_hw *ah, u8 min, u8 max, u8 *vp)
519{
520 static const u16 intercepts3[] =
521 { 0, 5, 10, 20, 30, 50, 70, 85, 90, 95, 100 };
522 static const u16 intercepts3_2[] =
523 { 0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100 };
524 const u16 *ip;
525 int i;
526
527 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_3_2)
528 ip = intercepts3_2;
529 else
530 ip = intercepts3;
531
532 for (i = 0; i < ARRAY_SIZE(intercepts3); i++)
533 *vp++ = (ip[i] * max + (100 - ip[i]) * min) / 100;
534}
535
536/* Read the frequency piers for each mode (mostly used on newer eeproms with 0xff 502/* Read the frequency piers for each mode (mostly used on newer eeproms with 0xff
537 * frequency mask) */ 503 * frequency mask) */
538static inline int 504static inline int
@@ -546,26 +512,25 @@ ath5k_eeprom_read_freq_list(struct ath5k_hw *ah, int *offset, int max,
546 int ret; 512 int ret;
547 u16 val; 513 u16 val;
548 514
515 ee->ee_n_piers[mode] = 0;
549 while(i < max) { 516 while(i < max) {
550 AR5K_EEPROM_READ(o++, val); 517 AR5K_EEPROM_READ(o++, val);
551 518
552 freq1 = (val >> 8) & 0xff; 519 freq1 = val & 0xff;
553 freq2 = val & 0xff; 520 if (!freq1)
554 521 break;
555 if (freq1) {
556 pc[i++].freq = ath5k_eeprom_bin2freq(ee,
557 freq1, mode);
558 ee->ee_n_piers[mode]++;
559 }
560 522
561 if (freq2) { 523 pc[i++].freq = ath5k_eeprom_bin2freq(ee,
562 pc[i++].freq = ath5k_eeprom_bin2freq(ee, 524 freq1, mode);
563 freq2, mode); 525 ee->ee_n_piers[mode]++;
564 ee->ee_n_piers[mode]++;
565 }
566 526
567 if (!freq1 || !freq2) 527 freq2 = (val >> 8) & 0xff;
528 if (!freq2)
568 break; 529 break;
530
531 pc[i++].freq = ath5k_eeprom_bin2freq(ee,
532 freq2, mode);
533 ee->ee_n_piers[mode]++;
569 } 534 }
570 535
571 /* return new offset */ 536 /* return new offset */
@@ -652,13 +617,122 @@ ath5k_eeprom_init_11bg_2413(struct ath5k_hw *ah, unsigned int mode, int offset)
652 return 0; 617 return 0;
653} 618}
654 619
655/* Read power calibration for RF5111 chips 620/*
621 * Read power calibration for RF5111 chips
622 *
656 * For RF5111 we have an XPD -eXternal Power Detector- curve 623 * For RF5111 we have an XPD -eXternal Power Detector- curve
657 * for each calibrated channel. Each curve has PCDAC steps on 624 * for each calibrated channel. Each curve has 0,5dB Power steps
658 * x axis and power on y axis and looks like a logarithmic 625 * on x axis and PCDAC steps (offsets) on y axis and looks like an
659 * function. To recreate the curve and pass the power values 626 * exponential function. To recreate the curve we read 11 points
660 * on the pcdac table, we read 10 points here and interpolate later. 627 * here and interpolate later.
661 */ 628 */
629
630/* Used to match PCDAC steps with power values on RF5111 chips
631 * (eeprom versions < 4). For RF5111 we have 11 pre-defined PCDAC
632 * steps that match with the power values we read from eeprom. On
633 * older eeprom versions (< 3.2) these steps are equaly spaced at
634 * 10% of the pcdac curve -until the curve reaches it's maximum-
635 * (11 steps from 0 to 100%) but on newer eeprom versions (>= 3.2)
636 * these 11 steps are spaced in a different way. This function returns
637 * the pcdac steps based on eeprom version and curve min/max so that we
638 * can have pcdac/pwr points.
639 */
640static inline void
641ath5k_get_pcdac_intercepts(struct ath5k_hw *ah, u8 min, u8 max, u8 *vp)
642{
643 const static u16 intercepts3[] =
644 { 0, 5, 10, 20, 30, 50, 70, 85, 90, 95, 100 };
645 const static u16 intercepts3_2[] =
646 { 0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100 };
647 const u16 *ip;
648 int i;
649
650 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_3_2)
651 ip = intercepts3_2;
652 else
653 ip = intercepts3;
654
655 for (i = 0; i < ARRAY_SIZE(intercepts3); i++)
656 vp[i] = (ip[i] * max + (100 - ip[i]) * min) / 100;
657}
658
659/* Convert RF5111 specific data to generic raw data
660 * used by interpolation code */
661static int
662ath5k_eeprom_convert_pcal_info_5111(struct ath5k_hw *ah, int mode,
663 struct ath5k_chan_pcal_info *chinfo)
664{
665 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
666 struct ath5k_chan_pcal_info_rf5111 *pcinfo;
667 struct ath5k_pdgain_info *pd;
668 u8 pier, point, idx;
669 u8 *pdgain_idx = ee->ee_pdc_to_idx[mode];
670
671 /* Fill raw data for each calibration pier */
672 for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) {
673
674 pcinfo = &chinfo[pier].rf5111_info;
675
676 /* Allocate pd_curves for this cal pier */
677 chinfo[pier].pd_curves =
678 kcalloc(AR5K_EEPROM_N_PD_CURVES,
679 sizeof(struct ath5k_pdgain_info),
680 GFP_KERNEL);
681
682 if (!chinfo[pier].pd_curves)
683 return -ENOMEM;
684
685 /* Only one curve for RF5111
686 * find out which one and place
687 * in in pd_curves.
688 * Note: ee_x_gain is reversed here */
689 for (idx = 0; idx < AR5K_EEPROM_N_PD_CURVES; idx++) {
690
691 if (!((ee->ee_x_gain[mode] >> idx) & 0x1)) {
692 pdgain_idx[0] = idx;
693 break;
694 }
695 }
696
697 ee->ee_pd_gains[mode] = 1;
698
699 pd = &chinfo[pier].pd_curves[idx];
700
701 pd->pd_points = AR5K_EEPROM_N_PWR_POINTS_5111;
702
703 /* Allocate pd points for this curve */
704 pd->pd_step = kcalloc(AR5K_EEPROM_N_PWR_POINTS_5111,
705 sizeof(u8), GFP_KERNEL);
706 if (!pd->pd_step)
707 return -ENOMEM;
708
709 pd->pd_pwr = kcalloc(AR5K_EEPROM_N_PWR_POINTS_5111,
710 sizeof(s16), GFP_KERNEL);
711 if (!pd->pd_pwr)
712 return -ENOMEM;
713
714 /* Fill raw dataset
715 * (convert power to 0.25dB units
716 * for RF5112 combatibility) */
717 for (point = 0; point < pd->pd_points; point++) {
718
719 /* Absolute values */
720 pd->pd_pwr[point] = 2 * pcinfo->pwr[point];
721
722 /* Already sorted */
723 pd->pd_step[point] = pcinfo->pcdac[point];
724 }
725
726 /* Set min/max pwr */
727 chinfo[pier].min_pwr = pd->pd_pwr[0];
728 chinfo[pier].max_pwr = pd->pd_pwr[10];
729
730 }
731
732 return 0;
733}
734
735/* Parse EEPROM data */
662static int 736static int
663ath5k_eeprom_read_pcal_info_5111(struct ath5k_hw *ah, int mode) 737ath5k_eeprom_read_pcal_info_5111(struct ath5k_hw *ah, int mode)
664{ 738{
@@ -747,30 +821,165 @@ ath5k_eeprom_read_pcal_info_5111(struct ath5k_hw *ah, int mode)
747 cdata->pcdac_max, cdata->pcdac); 821 cdata->pcdac_max, cdata->pcdac);
748 } 822 }
749 823
750 return 0; 824 return ath5k_eeprom_convert_pcal_info_5111(ah, mode, pcal);
751} 825}
752 826
753/* Read power calibration for RF5112 chips 827
828/*
829 * Read power calibration for RF5112 chips
830 *
754 * For RF5112 we have 4 XPD -eXternal Power Detector- curves 831 * For RF5112 we have 4 XPD -eXternal Power Detector- curves
755 * for each calibrated channel on 0, -6, -12 and -18dbm but we only 832 * for each calibrated channel on 0, -6, -12 and -18dbm but we only
756 * use the higher (3) and the lower (0) curves. Each curve has PCDAC 833 * use the higher (3) and the lower (0) curves. Each curve has 0.5dB
757 * steps on x axis and power on y axis and looks like a linear 834 * power steps on x axis and PCDAC steps on y axis and looks like a
758 * function. To recreate the curve and pass the power values 835 * linear function. To recreate the curve and pass the power values
759 * on the pcdac table, we read 4 points for xpd 0 and 3 points 836 * on hw, we read 4 points for xpd 0 (lower gain -> max power)
760 * for xpd 3 here and interpolate later. 837 * and 3 points for xpd 3 (higher gain -> lower power) here and
838 * interpolate later.
761 * 839 *
762 * Note: Many vendors just use xpd 0 so xpd 3 is zeroed. 840 * Note: Many vendors just use xpd 0 so xpd 3 is zeroed.
763 */ 841 */
842
843/* Convert RF5112 specific data to generic raw data
844 * used by interpolation code */
845static int
846ath5k_eeprom_convert_pcal_info_5112(struct ath5k_hw *ah, int mode,
847 struct ath5k_chan_pcal_info *chinfo)
848{
849 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
850 struct ath5k_chan_pcal_info_rf5112 *pcinfo;
851 u8 *pdgain_idx = ee->ee_pdc_to_idx[mode];
852 unsigned int pier, pdg, point;
853
854 /* Fill raw data for each calibration pier */
855 for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) {
856
857 pcinfo = &chinfo[pier].rf5112_info;
858
859 /* Allocate pd_curves for this cal pier */
860 chinfo[pier].pd_curves =
861 kcalloc(AR5K_EEPROM_N_PD_CURVES,
862 sizeof(struct ath5k_pdgain_info),
863 GFP_KERNEL);
864
865 if (!chinfo[pier].pd_curves)
866 return -ENOMEM;
867
868 /* Fill pd_curves */
869 for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) {
870
871 u8 idx = pdgain_idx[pdg];
872 struct ath5k_pdgain_info *pd =
873 &chinfo[pier].pd_curves[idx];
874
875 /* Lowest gain curve (max power) */
876 if (pdg == 0) {
877 /* One more point for better accuracy */
878 pd->pd_points = AR5K_EEPROM_N_XPD0_POINTS;
879
880 /* Allocate pd points for this curve */
881 pd->pd_step = kcalloc(pd->pd_points,
882 sizeof(u8), GFP_KERNEL);
883
884 if (!pd->pd_step)
885 return -ENOMEM;
886
887 pd->pd_pwr = kcalloc(pd->pd_points,
888 sizeof(s16), GFP_KERNEL);
889
890 if (!pd->pd_pwr)
891 return -ENOMEM;
892
893
894 /* Fill raw dataset
895 * (all power levels are in 0.25dB units) */
896 pd->pd_step[0] = pcinfo->pcdac_x0[0];
897 pd->pd_pwr[0] = pcinfo->pwr_x0[0];
898
899 for (point = 1; point < pd->pd_points;
900 point++) {
901 /* Absolute values */
902 pd->pd_pwr[point] =
903 pcinfo->pwr_x0[point];
904
905 /* Deltas */
906 pd->pd_step[point] =
907 pd->pd_step[point - 1] +
908 pcinfo->pcdac_x0[point];
909 }
910
911 /* Set min power for this frequency */
912 chinfo[pier].min_pwr = pd->pd_pwr[0];
913
914 /* Highest gain curve (min power) */
915 } else if (pdg == 1) {
916
917 pd->pd_points = AR5K_EEPROM_N_XPD3_POINTS;
918
919 /* Allocate pd points for this curve */
920 pd->pd_step = kcalloc(pd->pd_points,
921 sizeof(u8), GFP_KERNEL);
922
923 if (!pd->pd_step)
924 return -ENOMEM;
925
926 pd->pd_pwr = kcalloc(pd->pd_points,
927 sizeof(s16), GFP_KERNEL);
928
929 if (!pd->pd_pwr)
930 return -ENOMEM;
931
932 /* Fill raw dataset
933 * (all power levels are in 0.25dB units) */
934 for (point = 0; point < pd->pd_points;
935 point++) {
936 /* Absolute values */
937 pd->pd_pwr[point] =
938 pcinfo->pwr_x3[point];
939
940 /* Fixed points */
941 pd->pd_step[point] =
942 pcinfo->pcdac_x3[point];
943 }
944
945 /* Since we have a higher gain curve
946 * override min power */
947 chinfo[pier].min_pwr = pd->pd_pwr[0];
948 }
949 }
950 }
951
952 return 0;
953}
954
955/* Parse EEPROM data */
764static int 956static int
765ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode) 957ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode)
766{ 958{
767 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; 959 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
768 struct ath5k_chan_pcal_info_rf5112 *chan_pcal_info; 960 struct ath5k_chan_pcal_info_rf5112 *chan_pcal_info;
769 struct ath5k_chan_pcal_info *gen_chan_info; 961 struct ath5k_chan_pcal_info *gen_chan_info;
962 u8 *pdgain_idx = ee->ee_pdc_to_idx[mode];
770 u32 offset; 963 u32 offset;
771 unsigned int i, c; 964 u8 i, c;
772 u16 val; 965 u16 val;
773 int ret; 966 int ret;
967 u8 pd_gains = 0;
968
969 /* Count how many curves we have and
970 * identify them (which one of the 4
971 * available curves we have on each count).
972 * Curves are stored from lower (x0) to
973 * higher (x3) gain */
974 for (i = 0; i < AR5K_EEPROM_N_PD_CURVES; i++) {
975 /* ee_x_gain[mode] is x gain mask */
976 if ((ee->ee_x_gain[mode] >> i) & 0x1)
977 pdgain_idx[pd_gains++] = i;
978 }
979 ee->ee_pd_gains[mode] = pd_gains;
980
981 if (pd_gains == 0 || pd_gains > 2)
982 return -EINVAL;
774 983
775 switch (mode) { 984 switch (mode) {
776 case AR5K_EEPROM_MODE_11A: 985 case AR5K_EEPROM_MODE_11A:
@@ -808,13 +1017,13 @@ ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode)
808 for (i = 0; i < ee->ee_n_piers[mode]; i++) { 1017 for (i = 0; i < ee->ee_n_piers[mode]; i++) {
809 chan_pcal_info = &gen_chan_info[i].rf5112_info; 1018 chan_pcal_info = &gen_chan_info[i].rf5112_info;
810 1019
811 /* Power values in dBm * 4 1020 /* Power values in quarter dB
812 * for the lower xpd gain curve 1021 * for the lower xpd gain curve
813 * (0 dBm -> higher output power) */ 1022 * (0 dBm -> higher output power) */
814 for (c = 0; c < AR5K_EEPROM_N_XPD0_POINTS; c++) { 1023 for (c = 0; c < AR5K_EEPROM_N_XPD0_POINTS; c++) {
815 AR5K_EEPROM_READ(offset++, val); 1024 AR5K_EEPROM_READ(offset++, val);
816 chan_pcal_info->pwr_x0[c] = (val & 0xff); 1025 chan_pcal_info->pwr_x0[c] = (s8) (val & 0xff);
817 chan_pcal_info->pwr_x0[++c] = ((val >> 8) & 0xff); 1026 chan_pcal_info->pwr_x0[++c] = (s8) ((val >> 8) & 0xff);
818 } 1027 }
819 1028
820 /* PCDAC steps 1029 /* PCDAC steps
@@ -825,12 +1034,12 @@ ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode)
825 chan_pcal_info->pcdac_x0[2] = ((val >> 5) & 0x1f); 1034 chan_pcal_info->pcdac_x0[2] = ((val >> 5) & 0x1f);
826 chan_pcal_info->pcdac_x0[3] = ((val >> 10) & 0x1f); 1035 chan_pcal_info->pcdac_x0[3] = ((val >> 10) & 0x1f);
827 1036
828 /* Power values in dBm * 4 1037 /* Power values in quarter dB
829 * for the higher xpd gain curve 1038 * for the higher xpd gain curve
830 * (18 dBm -> lower output power) */ 1039 * (18 dBm -> lower output power) */
831 AR5K_EEPROM_READ(offset++, val); 1040 AR5K_EEPROM_READ(offset++, val);
832 chan_pcal_info->pwr_x3[0] = (val & 0xff); 1041 chan_pcal_info->pwr_x3[0] = (s8) (val & 0xff);
833 chan_pcal_info->pwr_x3[1] = ((val >> 8) & 0xff); 1042 chan_pcal_info->pwr_x3[1] = (s8) ((val >> 8) & 0xff);
834 1043
835 AR5K_EEPROM_READ(offset++, val); 1044 AR5K_EEPROM_READ(offset++, val);
836 chan_pcal_info->pwr_x3[2] = (val & 0xff); 1045 chan_pcal_info->pwr_x3[2] = (val & 0xff);
@@ -843,24 +1052,36 @@ ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode)
843 chan_pcal_info->pcdac_x3[2] = 63; 1052 chan_pcal_info->pcdac_x3[2] = 63;
844 1053
845 if (ee->ee_version >= AR5K_EEPROM_VERSION_4_3) { 1054 if (ee->ee_version >= AR5K_EEPROM_VERSION_4_3) {
846 chan_pcal_info->pcdac_x0[0] = ((val >> 8) & 0xff); 1055 chan_pcal_info->pcdac_x0[0] = ((val >> 8) & 0x3f);
847 1056
848 /* Last xpd0 power level is also channel maximum */ 1057 /* Last xpd0 power level is also channel maximum */
849 gen_chan_info[i].max_pwr = chan_pcal_info->pwr_x0[3]; 1058 gen_chan_info[i].max_pwr = chan_pcal_info->pwr_x0[3];
850 } else { 1059 } else {
851 chan_pcal_info->pcdac_x0[0] = 1; 1060 chan_pcal_info->pcdac_x0[0] = 1;
852 gen_chan_info[i].max_pwr = ((val >> 8) & 0xff); 1061 gen_chan_info[i].max_pwr = (s8) ((val >> 8) & 0xff);
853 } 1062 }
854 1063
855 /* Recreate pcdac_x0 table for this channel using pcdac steps */
856 chan_pcal_info->pcdac_x0[1] += chan_pcal_info->pcdac_x0[0];
857 chan_pcal_info->pcdac_x0[2] += chan_pcal_info->pcdac_x0[1];
858 chan_pcal_info->pcdac_x0[3] += chan_pcal_info->pcdac_x0[2];
859 } 1064 }
860 1065
861 return 0; 1066 return ath5k_eeprom_convert_pcal_info_5112(ah, mode, gen_chan_info);
862} 1067}
863 1068
1069
1070/*
1071 * Read power calibration for RF2413 chips
1072 *
1073 * For RF2413 we have a Power to PDDAC table (Power Detector)
1074 * instead of a PCDAC and 4 pd gain curves for each calibrated channel.
1075 * Each curve has power on x axis in 0.5 db steps and PDDADC steps on y
1076 * axis and looks like an exponential function like the RF5111 curve.
1077 *
1078 * To recreate the curves we read here the points and interpolate
1079 * later. Note that in most cases only 2 (higher and lower) curves are
1080 * used (like RF5112) but vendors have the oportunity to include all
1081 * 4 curves on eeprom. The final curve (higher power) has an extra
1082 * point for better accuracy like RF5112.
1083 */
1084
864/* For RF2413 power calibration data doesn't start on a fixed location and 1085/* For RF2413 power calibration data doesn't start on a fixed location and
865 * if a mode is not supported, it's section is missing -not zeroed-. 1086 * if a mode is not supported, it's section is missing -not zeroed-.
866 * So we need to calculate the starting offset for each section by using 1087 * So we need to calculate the starting offset for each section by using
@@ -890,13 +1111,15 @@ ath5k_cal_data_offset_2413(struct ath5k_eeprom_info *ee, int mode)
890 switch(mode) { 1111 switch(mode) {
891 case AR5K_EEPROM_MODE_11G: 1112 case AR5K_EEPROM_MODE_11G:
892 if (AR5K_EEPROM_HDR_11B(ee->ee_header)) 1113 if (AR5K_EEPROM_HDR_11B(ee->ee_header))
893 offset += ath5k_pdgains_size_2413(ee, AR5K_EEPROM_MODE_11B) + 1114 offset += ath5k_pdgains_size_2413(ee,
894 AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2; 1115 AR5K_EEPROM_MODE_11B) +
1116 AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2;
895 /* fall through */ 1117 /* fall through */
896 case AR5K_EEPROM_MODE_11B: 1118 case AR5K_EEPROM_MODE_11B:
897 if (AR5K_EEPROM_HDR_11A(ee->ee_header)) 1119 if (AR5K_EEPROM_HDR_11A(ee->ee_header))
898 offset += ath5k_pdgains_size_2413(ee, AR5K_EEPROM_MODE_11A) + 1120 offset += ath5k_pdgains_size_2413(ee,
899 AR5K_EEPROM_N_5GHZ_CHAN / 2; 1121 AR5K_EEPROM_MODE_11A) +
1122 AR5K_EEPROM_N_5GHZ_CHAN / 2;
900 /* fall through */ 1123 /* fall through */
901 case AR5K_EEPROM_MODE_11A: 1124 case AR5K_EEPROM_MODE_11A:
902 break; 1125 break;
@@ -907,37 +1130,118 @@ ath5k_cal_data_offset_2413(struct ath5k_eeprom_info *ee, int mode)
907 return offset; 1130 return offset;
908} 1131}
909 1132
910/* Read power calibration for RF2413 chips 1133/* Convert RF2413 specific data to generic raw data
911 * For RF2413 we have a PDDAC table (Power Detector) instead 1134 * used by interpolation code */
912 * of a PCDAC and 4 pd gain curves for each calibrated channel. 1135static int
913 * Each curve has PDDAC steps on x axis and power on y axis and 1136ath5k_eeprom_convert_pcal_info_2413(struct ath5k_hw *ah, int mode,
914 * looks like an exponential function. To recreate the curves 1137 struct ath5k_chan_pcal_info *chinfo)
915 * we read here the points and interpolate later. Note that 1138{
916 * in most cases only higher and lower curves are used (like 1139 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
917 * RF5112) but vendors have the oportunity to include all 4 1140 struct ath5k_chan_pcal_info_rf2413 *pcinfo;
918 * curves on eeprom. The final curve (higher power) has an extra 1141 u8 *pdgain_idx = ee->ee_pdc_to_idx[mode];
919 * point for better accuracy like RF5112. 1142 unsigned int pier, pdg, point;
920 */ 1143
1144 /* Fill raw data for each calibration pier */
1145 for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) {
1146
1147 pcinfo = &chinfo[pier].rf2413_info;
1148
1149 /* Allocate pd_curves for this cal pier */
1150 chinfo[pier].pd_curves =
1151 kcalloc(AR5K_EEPROM_N_PD_CURVES,
1152 sizeof(struct ath5k_pdgain_info),
1153 GFP_KERNEL);
1154
1155 if (!chinfo[pier].pd_curves)
1156 return -ENOMEM;
1157
1158 /* Fill pd_curves */
1159 for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) {
1160
1161 u8 idx = pdgain_idx[pdg];
1162 struct ath5k_pdgain_info *pd =
1163 &chinfo[pier].pd_curves[idx];
1164
1165 /* One more point for the highest power
1166 * curve (lowest gain) */
1167 if (pdg == ee->ee_pd_gains[mode] - 1)
1168 pd->pd_points = AR5K_EEPROM_N_PD_POINTS;
1169 else
1170 pd->pd_points = AR5K_EEPROM_N_PD_POINTS - 1;
1171
1172 /* Allocate pd points for this curve */
1173 pd->pd_step = kcalloc(pd->pd_points,
1174 sizeof(u8), GFP_KERNEL);
1175
1176 if (!pd->pd_step)
1177 return -ENOMEM;
1178
1179 pd->pd_pwr = kcalloc(pd->pd_points,
1180 sizeof(s16), GFP_KERNEL);
1181
1182 if (!pd->pd_pwr)
1183 return -ENOMEM;
1184
1185 /* Fill raw dataset
1186 * convert all pwr levels to
1187 * quarter dB for RF5112 combatibility */
1188 pd->pd_step[0] = pcinfo->pddac_i[pdg];
1189 pd->pd_pwr[0] = 4 * pcinfo->pwr_i[pdg];
1190
1191 for (point = 1; point < pd->pd_points; point++) {
1192
1193 pd->pd_pwr[point] = pd->pd_pwr[point - 1] +
1194 2 * pcinfo->pwr[pdg][point - 1];
1195
1196 pd->pd_step[point] = pd->pd_step[point - 1] +
1197 pcinfo->pddac[pdg][point - 1];
1198
1199 }
1200
1201 /* Highest gain curve -> min power */
1202 if (pdg == 0)
1203 chinfo[pier].min_pwr = pd->pd_pwr[0];
1204
1205 /* Lowest gain curve -> max power */
1206 if (pdg == ee->ee_pd_gains[mode] - 1)
1207 chinfo[pier].max_pwr =
1208 pd->pd_pwr[pd->pd_points - 1];
1209 }
1210 }
1211
1212 return 0;
1213}
1214
1215/* Parse EEPROM data */
921static int 1216static int
922ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode) 1217ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode)
923{ 1218{
924 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; 1219 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
925 struct ath5k_chan_pcal_info_rf2413 *chan_pcal_info; 1220 struct ath5k_chan_pcal_info_rf2413 *pcinfo;
926 struct ath5k_chan_pcal_info *gen_chan_info; 1221 struct ath5k_chan_pcal_info *chinfo;
927 unsigned int i, c; 1222 u8 *pdgain_idx = ee->ee_pdc_to_idx[mode];
928 u32 offset; 1223 u32 offset;
929 int ret; 1224 int idx, i, ret;
930 u16 val; 1225 u16 val;
931 u8 pd_gains = 0; 1226 u8 pd_gains = 0;
932 1227
933 if (ee->ee_x_gain[mode] & 0x1) pd_gains++; 1228 /* Count how many curves we have and
934 if ((ee->ee_x_gain[mode] >> 1) & 0x1) pd_gains++; 1229 * identify them (which one of the 4
935 if ((ee->ee_x_gain[mode] >> 2) & 0x1) pd_gains++; 1230 * available curves we have on each count).
936 if ((ee->ee_x_gain[mode] >> 3) & 0x1) pd_gains++; 1231 * Curves are stored from higher to
1232 * lower gain so we go backwards */
1233 for (idx = AR5K_EEPROM_N_PD_CURVES - 1; idx >= 0; idx--) {
1234 /* ee_x_gain[mode] is x gain mask */
1235 if ((ee->ee_x_gain[mode] >> idx) & 0x1)
1236 pdgain_idx[pd_gains++] = idx;
1237
1238 }
937 ee->ee_pd_gains[mode] = pd_gains; 1239 ee->ee_pd_gains[mode] = pd_gains;
938 1240
1241 if (pd_gains == 0)
1242 return -EINVAL;
1243
939 offset = ath5k_cal_data_offset_2413(ee, mode); 1244 offset = ath5k_cal_data_offset_2413(ee, mode);
940 ee->ee_n_piers[mode] = 0;
941 switch (mode) { 1245 switch (mode) {
942 case AR5K_EEPROM_MODE_11A: 1246 case AR5K_EEPROM_MODE_11A:
943 if (!AR5K_EEPROM_HDR_11A(ee->ee_header)) 1247 if (!AR5K_EEPROM_HDR_11A(ee->ee_header))
@@ -945,7 +1249,7 @@ ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode)
945 1249
946 ath5k_eeprom_init_11a_pcal_freq(ah, offset); 1250 ath5k_eeprom_init_11a_pcal_freq(ah, offset);
947 offset += AR5K_EEPROM_N_5GHZ_CHAN / 2; 1251 offset += AR5K_EEPROM_N_5GHZ_CHAN / 2;
948 gen_chan_info = ee->ee_pwr_cal_a; 1252 chinfo = ee->ee_pwr_cal_a;
949 break; 1253 break;
950 case AR5K_EEPROM_MODE_11B: 1254 case AR5K_EEPROM_MODE_11B:
951 if (!AR5K_EEPROM_HDR_11B(ee->ee_header)) 1255 if (!AR5K_EEPROM_HDR_11B(ee->ee_header))
@@ -953,7 +1257,7 @@ ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode)
953 1257
954 ath5k_eeprom_init_11bg_2413(ah, mode, offset); 1258 ath5k_eeprom_init_11bg_2413(ah, mode, offset);
955 offset += AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2; 1259 offset += AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2;
956 gen_chan_info = ee->ee_pwr_cal_b; 1260 chinfo = ee->ee_pwr_cal_b;
957 break; 1261 break;
958 case AR5K_EEPROM_MODE_11G: 1262 case AR5K_EEPROM_MODE_11G:
959 if (!AR5K_EEPROM_HDR_11G(ee->ee_header)) 1263 if (!AR5K_EEPROM_HDR_11G(ee->ee_header))
@@ -961,41 +1265,35 @@ ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode)
961 1265
962 ath5k_eeprom_init_11bg_2413(ah, mode, offset); 1266 ath5k_eeprom_init_11bg_2413(ah, mode, offset);
963 offset += AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2; 1267 offset += AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2;
964 gen_chan_info = ee->ee_pwr_cal_g; 1268 chinfo = ee->ee_pwr_cal_g;
965 break; 1269 break;
966 default: 1270 default:
967 return -EINVAL; 1271 return -EINVAL;
968 } 1272 }
969 1273
970 if (pd_gains == 0)
971 return 0;
972
973 for (i = 0; i < ee->ee_n_piers[mode]; i++) { 1274 for (i = 0; i < ee->ee_n_piers[mode]; i++) {
974 chan_pcal_info = &gen_chan_info[i].rf2413_info; 1275 pcinfo = &chinfo[i].rf2413_info;
975 1276
976 /* 1277 /*
977 * Read pwr_i, pddac_i and the first 1278 * Read pwr_i, pddac_i and the first
978 * 2 pd points (pwr, pddac) 1279 * 2 pd points (pwr, pddac)
979 */ 1280 */
980 AR5K_EEPROM_READ(offset++, val); 1281 AR5K_EEPROM_READ(offset++, val);
981 chan_pcal_info->pwr_i[0] = val & 0x1f; 1282 pcinfo->pwr_i[0] = val & 0x1f;
982 chan_pcal_info->pddac_i[0] = (val >> 5) & 0x7f; 1283 pcinfo->pddac_i[0] = (val >> 5) & 0x7f;
983 chan_pcal_info->pwr[0][0] = 1284 pcinfo->pwr[0][0] = (val >> 12) & 0xf;
984 (val >> 12) & 0xf;
985 1285
986 AR5K_EEPROM_READ(offset++, val); 1286 AR5K_EEPROM_READ(offset++, val);
987 chan_pcal_info->pddac[0][0] = val & 0x3f; 1287 pcinfo->pddac[0][0] = val & 0x3f;
988 chan_pcal_info->pwr[0][1] = (val >> 6) & 0xf; 1288 pcinfo->pwr[0][1] = (val >> 6) & 0xf;
989 chan_pcal_info->pddac[0][1] = 1289 pcinfo->pddac[0][1] = (val >> 10) & 0x3f;
990 (val >> 10) & 0x3f;
991 1290
992 AR5K_EEPROM_READ(offset++, val); 1291 AR5K_EEPROM_READ(offset++, val);
993 chan_pcal_info->pwr[0][2] = val & 0xf; 1292 pcinfo->pwr[0][2] = val & 0xf;
994 chan_pcal_info->pddac[0][2] = 1293 pcinfo->pddac[0][2] = (val >> 4) & 0x3f;
995 (val >> 4) & 0x3f;
996 1294
997 chan_pcal_info->pwr[0][3] = 0; 1295 pcinfo->pwr[0][3] = 0;
998 chan_pcal_info->pddac[0][3] = 0; 1296 pcinfo->pddac[0][3] = 0;
999 1297
1000 if (pd_gains > 1) { 1298 if (pd_gains > 1) {
1001 /* 1299 /*
@@ -1003,44 +1301,36 @@ ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode)
1003 * so it only has 2 pd points. 1301 * so it only has 2 pd points.
1004 * Continue wih pd gain 1. 1302 * Continue wih pd gain 1.
1005 */ 1303 */
1006 chan_pcal_info->pwr_i[1] = (val >> 10) & 0x1f; 1304 pcinfo->pwr_i[1] = (val >> 10) & 0x1f;
1007 1305
1008 chan_pcal_info->pddac_i[1] = (val >> 15) & 0x1; 1306 pcinfo->pddac_i[1] = (val >> 15) & 0x1;
1009 AR5K_EEPROM_READ(offset++, val); 1307 AR5K_EEPROM_READ(offset++, val);
1010 chan_pcal_info->pddac_i[1] |= (val & 0x3F) << 1; 1308 pcinfo->pddac_i[1] |= (val & 0x3F) << 1;
1011 1309
1012 chan_pcal_info->pwr[1][0] = (val >> 6) & 0xf; 1310 pcinfo->pwr[1][0] = (val >> 6) & 0xf;
1013 chan_pcal_info->pddac[1][0] = 1311 pcinfo->pddac[1][0] = (val >> 10) & 0x3f;
1014 (val >> 10) & 0x3f;
1015 1312
1016 AR5K_EEPROM_READ(offset++, val); 1313 AR5K_EEPROM_READ(offset++, val);
1017 chan_pcal_info->pwr[1][1] = val & 0xf; 1314 pcinfo->pwr[1][1] = val & 0xf;
1018 chan_pcal_info->pddac[1][1] = 1315 pcinfo->pddac[1][1] = (val >> 4) & 0x3f;
1019 (val >> 4) & 0x3f; 1316 pcinfo->pwr[1][2] = (val >> 10) & 0xf;
1020 chan_pcal_info->pwr[1][2] = 1317
1021 (val >> 10) & 0xf; 1318 pcinfo->pddac[1][2] = (val >> 14) & 0x3;
1022
1023 chan_pcal_info->pddac[1][2] =
1024 (val >> 14) & 0x3;
1025 AR5K_EEPROM_READ(offset++, val); 1319 AR5K_EEPROM_READ(offset++, val);
1026 chan_pcal_info->pddac[1][2] |= 1320 pcinfo->pddac[1][2] |= (val & 0xF) << 2;
1027 (val & 0xF) << 2;
1028 1321
1029 chan_pcal_info->pwr[1][3] = 0; 1322 pcinfo->pwr[1][3] = 0;
1030 chan_pcal_info->pddac[1][3] = 0; 1323 pcinfo->pddac[1][3] = 0;
1031 } else if (pd_gains == 1) { 1324 } else if (pd_gains == 1) {
1032 /* 1325 /*
1033 * Pd gain 0 is the last one so 1326 * Pd gain 0 is the last one so
1034 * read the extra point. 1327 * read the extra point.
1035 */ 1328 */
1036 chan_pcal_info->pwr[0][3] = 1329 pcinfo->pwr[0][3] = (val >> 10) & 0xf;
1037 (val >> 10) & 0xf;
1038 1330
1039 chan_pcal_info->pddac[0][3] = 1331 pcinfo->pddac[0][3] = (val >> 14) & 0x3;
1040 (val >> 14) & 0x3;
1041 AR5K_EEPROM_READ(offset++, val); 1332 AR5K_EEPROM_READ(offset++, val);
1042 chan_pcal_info->pddac[0][3] |= 1333 pcinfo->pddac[0][3] |= (val & 0xF) << 2;
1043 (val & 0xF) << 2;
1044 } 1334 }
1045 1335
1046 /* 1336 /*
@@ -1048,105 +1338,65 @@ ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode)
1048 * as above. 1338 * as above.
1049 */ 1339 */
1050 if (pd_gains > 2) { 1340 if (pd_gains > 2) {
1051 chan_pcal_info->pwr_i[2] = (val >> 4) & 0x1f; 1341 pcinfo->pwr_i[2] = (val >> 4) & 0x1f;
1052 chan_pcal_info->pddac_i[2] = (val >> 9) & 0x7f; 1342 pcinfo->pddac_i[2] = (val >> 9) & 0x7f;
1053 1343
1054 AR5K_EEPROM_READ(offset++, val); 1344 AR5K_EEPROM_READ(offset++, val);
1055 chan_pcal_info->pwr[2][0] = 1345 pcinfo->pwr[2][0] = (val >> 0) & 0xf;
1056 (val >> 0) & 0xf; 1346 pcinfo->pddac[2][0] = (val >> 4) & 0x3f;
1057 chan_pcal_info->pddac[2][0] = 1347 pcinfo->pwr[2][1] = (val >> 10) & 0xf;
1058 (val >> 4) & 0x3f; 1348
1059 chan_pcal_info->pwr[2][1] = 1349 pcinfo->pddac[2][1] = (val >> 14) & 0x3;
1060 (val >> 10) & 0xf;
1061
1062 chan_pcal_info->pddac[2][1] =
1063 (val >> 14) & 0x3;
1064 AR5K_EEPROM_READ(offset++, val); 1350 AR5K_EEPROM_READ(offset++, val);
1065 chan_pcal_info->pddac[2][1] |= 1351 pcinfo->pddac[2][1] |= (val & 0xF) << 2;
1066 (val & 0xF) << 2;
1067 1352
1068 chan_pcal_info->pwr[2][2] = 1353 pcinfo->pwr[2][2] = (val >> 4) & 0xf;
1069 (val >> 4) & 0xf; 1354 pcinfo->pddac[2][2] = (val >> 8) & 0x3f;
1070 chan_pcal_info->pddac[2][2] =
1071 (val >> 8) & 0x3f;
1072 1355
1073 chan_pcal_info->pwr[2][3] = 0; 1356 pcinfo->pwr[2][3] = 0;
1074 chan_pcal_info->pddac[2][3] = 0; 1357 pcinfo->pddac[2][3] = 0;
1075 } else if (pd_gains == 2) { 1358 } else if (pd_gains == 2) {
1076 chan_pcal_info->pwr[1][3] = 1359 pcinfo->pwr[1][3] = (val >> 4) & 0xf;
1077 (val >> 4) & 0xf; 1360 pcinfo->pddac[1][3] = (val >> 8) & 0x3f;
1078 chan_pcal_info->pddac[1][3] =
1079 (val >> 8) & 0x3f;
1080 } 1361 }
1081 1362
1082 if (pd_gains > 3) { 1363 if (pd_gains > 3) {
1083 chan_pcal_info->pwr_i[3] = (val >> 14) & 0x3; 1364 pcinfo->pwr_i[3] = (val >> 14) & 0x3;
1084 AR5K_EEPROM_READ(offset++, val); 1365 AR5K_EEPROM_READ(offset++, val);
1085 chan_pcal_info->pwr_i[3] |= ((val >> 0) & 0x7) << 2; 1366 pcinfo->pwr_i[3] |= ((val >> 0) & 0x7) << 2;
1086 1367
1087 chan_pcal_info->pddac_i[3] = (val >> 3) & 0x7f; 1368 pcinfo->pddac_i[3] = (val >> 3) & 0x7f;
1088 chan_pcal_info->pwr[3][0] = 1369 pcinfo->pwr[3][0] = (val >> 10) & 0xf;
1089 (val >> 10) & 0xf; 1370 pcinfo->pddac[3][0] = (val >> 14) & 0x3;
1090 chan_pcal_info->pddac[3][0] =
1091 (val >> 14) & 0x3;
1092 1371
1093 AR5K_EEPROM_READ(offset++, val); 1372 AR5K_EEPROM_READ(offset++, val);
1094 chan_pcal_info->pddac[3][0] |= 1373 pcinfo->pddac[3][0] |= (val & 0xF) << 2;
1095 (val & 0xF) << 2; 1374 pcinfo->pwr[3][1] = (val >> 4) & 0xf;
1096 chan_pcal_info->pwr[3][1] = 1375 pcinfo->pddac[3][1] = (val >> 8) & 0x3f;
1097 (val >> 4) & 0xf; 1376
1098 chan_pcal_info->pddac[3][1] = 1377 pcinfo->pwr[3][2] = (val >> 14) & 0x3;
1099 (val >> 8) & 0x3f;
1100
1101 chan_pcal_info->pwr[3][2] =
1102 (val >> 14) & 0x3;
1103 AR5K_EEPROM_READ(offset++, val); 1378 AR5K_EEPROM_READ(offset++, val);
1104 chan_pcal_info->pwr[3][2] |= 1379 pcinfo->pwr[3][2] |= ((val >> 0) & 0x3) << 2;
1105 ((val >> 0) & 0x3) << 2;
1106 1380
1107 chan_pcal_info->pddac[3][2] = 1381 pcinfo->pddac[3][2] = (val >> 2) & 0x3f;
1108 (val >> 2) & 0x3f; 1382 pcinfo->pwr[3][3] = (val >> 8) & 0xf;
1109 chan_pcal_info->pwr[3][3] =
1110 (val >> 8) & 0xf;
1111 1383
1112 chan_pcal_info->pddac[3][3] = 1384 pcinfo->pddac[3][3] = (val >> 12) & 0xF;
1113 (val >> 12) & 0xF;
1114 AR5K_EEPROM_READ(offset++, val); 1385 AR5K_EEPROM_READ(offset++, val);
1115 chan_pcal_info->pddac[3][3] |= 1386 pcinfo->pddac[3][3] |= ((val >> 0) & 0x3) << 4;
1116 ((val >> 0) & 0x3) << 4;
1117 } else if (pd_gains == 3) { 1387 } else if (pd_gains == 3) {
1118 chan_pcal_info->pwr[2][3] = 1388 pcinfo->pwr[2][3] = (val >> 14) & 0x3;
1119 (val >> 14) & 0x3;
1120 AR5K_EEPROM_READ(offset++, val); 1389 AR5K_EEPROM_READ(offset++, val);
1121 chan_pcal_info->pwr[2][3] |= 1390 pcinfo->pwr[2][3] |= ((val >> 0) & 0x3) << 2;
1122 ((val >> 0) & 0x3) << 2;
1123
1124 chan_pcal_info->pddac[2][3] =
1125 (val >> 2) & 0x3f;
1126 }
1127 1391
1128 for (c = 0; c < pd_gains; c++) { 1392 pcinfo->pddac[2][3] = (val >> 2) & 0x3f;
1129 /* Recreate pwr table for this channel using pwr steps */
1130 chan_pcal_info->pwr[c][0] += chan_pcal_info->pwr_i[c] * 2;
1131 chan_pcal_info->pwr[c][1] += chan_pcal_info->pwr[c][0];
1132 chan_pcal_info->pwr[c][2] += chan_pcal_info->pwr[c][1];
1133 chan_pcal_info->pwr[c][3] += chan_pcal_info->pwr[c][2];
1134 if (chan_pcal_info->pwr[c][3] == chan_pcal_info->pwr[c][2])
1135 chan_pcal_info->pwr[c][3] = 0;
1136
1137 /* Recreate pddac table for this channel using pddac steps */
1138 chan_pcal_info->pddac[c][0] += chan_pcal_info->pddac_i[c];
1139 chan_pcal_info->pddac[c][1] += chan_pcal_info->pddac[c][0];
1140 chan_pcal_info->pddac[c][2] += chan_pcal_info->pddac[c][1];
1141 chan_pcal_info->pddac[c][3] += chan_pcal_info->pddac[c][2];
1142 if (chan_pcal_info->pddac[c][3] == chan_pcal_info->pddac[c][2])
1143 chan_pcal_info->pddac[c][3] = 0;
1144 } 1393 }
1145 } 1394 }
1146 1395
1147 return 0; 1396 return ath5k_eeprom_convert_pcal_info_2413(ah, mode, chinfo);
1148} 1397}
1149 1398
1399
1150/* 1400/*
1151 * Read per rate target power (this is the maximum tx power 1401 * Read per rate target power (this is the maximum tx power
1152 * supported by the card). This info is used when setting 1402 * supported by the card). This info is used when setting
@@ -1154,11 +1404,12 @@ ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode)
1154 * 1404 *
1155 * This also works for v5 EEPROMs. 1405 * This also works for v5 EEPROMs.
1156 */ 1406 */
1157static int ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode) 1407static int
1408ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode)
1158{ 1409{
1159 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; 1410 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
1160 struct ath5k_rate_pcal_info *rate_pcal_info; 1411 struct ath5k_rate_pcal_info *rate_pcal_info;
1161 u16 *rate_target_pwr_num; 1412 u8 *rate_target_pwr_num;
1162 u32 offset; 1413 u32 offset;
1163 u16 val; 1414 u16 val;
1164 int ret, i; 1415 int ret, i;
@@ -1264,7 +1515,9 @@ ath5k_eeprom_read_pcal_info(struct ath5k_hw *ah)
1264 else 1515 else
1265 read_pcal = ath5k_eeprom_read_pcal_info_5111; 1516 read_pcal = ath5k_eeprom_read_pcal_info_5111;
1266 1517
1267 for (mode = AR5K_EEPROM_MODE_11A; mode <= AR5K_EEPROM_MODE_11G; mode++) { 1518
1519 for (mode = AR5K_EEPROM_MODE_11A; mode <= AR5K_EEPROM_MODE_11G;
1520 mode++) {
1268 err = read_pcal(ah, mode); 1521 err = read_pcal(ah, mode);
1269 if (err) 1522 if (err)
1270 return err; 1523 return err;
@@ -1277,6 +1530,62 @@ ath5k_eeprom_read_pcal_info(struct ath5k_hw *ah)
1277 return 0; 1530 return 0;
1278} 1531}
1279 1532
1533static int
1534ath5k_eeprom_free_pcal_info(struct ath5k_hw *ah, int mode)
1535{
1536 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
1537 struct ath5k_chan_pcal_info *chinfo;
1538 u8 pier, pdg;
1539
1540 switch (mode) {
1541 case AR5K_EEPROM_MODE_11A:
1542 if (!AR5K_EEPROM_HDR_11A(ee->ee_header))
1543 return 0;
1544 chinfo = ee->ee_pwr_cal_a;
1545 break;
1546 case AR5K_EEPROM_MODE_11B:
1547 if (!AR5K_EEPROM_HDR_11B(ee->ee_header))
1548 return 0;
1549 chinfo = ee->ee_pwr_cal_b;
1550 break;
1551 case AR5K_EEPROM_MODE_11G:
1552 if (!AR5K_EEPROM_HDR_11G(ee->ee_header))
1553 return 0;
1554 chinfo = ee->ee_pwr_cal_g;
1555 break;
1556 default:
1557 return -EINVAL;
1558 }
1559
1560 for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) {
1561 if (!chinfo[pier].pd_curves)
1562 continue;
1563
1564 for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) {
1565 struct ath5k_pdgain_info *pd =
1566 &chinfo[pier].pd_curves[pdg];
1567
1568 if (pd != NULL) {
1569 kfree(pd->pd_step);
1570 kfree(pd->pd_pwr);
1571 }
1572 }
1573
1574 kfree(chinfo[pier].pd_curves);
1575 }
1576
1577 return 0;
1578}
1579
1580void
1581ath5k_eeprom_detach(struct ath5k_hw *ah)
1582{
1583 u8 mode;
1584
1585 for (mode = AR5K_EEPROM_MODE_11A; mode <= AR5K_EEPROM_MODE_11G; mode++)
1586 ath5k_eeprom_free_pcal_info(ah, mode);
1587}
1588
1280/* Read conformance test limits used for regulatory control */ 1589/* Read conformance test limits used for regulatory control */
1281static int 1590static int
1282ath5k_eeprom_read_ctl_info(struct ath5k_hw *ah) 1591ath5k_eeprom_read_ctl_info(struct ath5k_hw *ah)
@@ -1457,3 +1766,4 @@ bool ath5k_eeprom_is_hb63(struct ath5k_hw *ah)
1457 else 1766 else
1458 return false; 1767 return false;
1459} 1768}
1769
diff --git a/drivers/net/wireless/ath5k/eeprom.h b/drivers/net/wireless/ath5k/eeprom.h
index 1deebc0257d4..b0c0606dea0b 100644
--- a/drivers/net/wireless/ath5k/eeprom.h
+++ b/drivers/net/wireless/ath5k/eeprom.h
@@ -173,6 +173,7 @@
173#define AR5K_EEPROM_N_5GHZ_CHAN 10 173#define AR5K_EEPROM_N_5GHZ_CHAN 10
174#define AR5K_EEPROM_N_2GHZ_CHAN 3 174#define AR5K_EEPROM_N_2GHZ_CHAN 3
175#define AR5K_EEPROM_N_2GHZ_CHAN_2413 4 175#define AR5K_EEPROM_N_2GHZ_CHAN_2413 4
176#define AR5K_EEPROM_N_2GHZ_CHAN_MAX 4
176#define AR5K_EEPROM_MAX_CHAN 10 177#define AR5K_EEPROM_MAX_CHAN 10
177#define AR5K_EEPROM_N_PWR_POINTS_5111 11 178#define AR5K_EEPROM_N_PWR_POINTS_5111 11
178#define AR5K_EEPROM_N_PCDAC 11 179#define AR5K_EEPROM_N_PCDAC 11
@@ -193,7 +194,7 @@
193#define AR5K_EEPROM_SCALE_OC_DELTA(_x) (((_x) * 2) / 10) 194#define AR5K_EEPROM_SCALE_OC_DELTA(_x) (((_x) * 2) / 10)
194#define AR5K_EEPROM_N_CTLS(_v) AR5K_EEPROM_OFF(_v, 16, 32) 195#define AR5K_EEPROM_N_CTLS(_v) AR5K_EEPROM_OFF(_v, 16, 32)
195#define AR5K_EEPROM_MAX_CTLS 32 196#define AR5K_EEPROM_MAX_CTLS 32
196#define AR5K_EEPROM_N_XPD_PER_CHANNEL 4 197#define AR5K_EEPROM_N_PD_CURVES 4
197#define AR5K_EEPROM_N_XPD0_POINTS 4 198#define AR5K_EEPROM_N_XPD0_POINTS 4
198#define AR5K_EEPROM_N_XPD3_POINTS 3 199#define AR5K_EEPROM_N_XPD3_POINTS 3
199#define AR5K_EEPROM_N_PD_GAINS 4 200#define AR5K_EEPROM_N_PD_GAINS 4
@@ -232,7 +233,7 @@ enum ath5k_ctl_mode {
232 AR5K_CTL_11B = 1, 233 AR5K_CTL_11B = 1,
233 AR5K_CTL_11G = 2, 234 AR5K_CTL_11G = 2,
234 AR5K_CTL_TURBO = 3, 235 AR5K_CTL_TURBO = 3,
235 AR5K_CTL_108G = 4, 236 AR5K_CTL_TURBOG = 4,
236 AR5K_CTL_2GHT20 = 5, 237 AR5K_CTL_2GHT20 = 5,
237 AR5K_CTL_5GHT20 = 6, 238 AR5K_CTL_5GHT20 = 6,
238 AR5K_CTL_2GHT40 = 7, 239 AR5K_CTL_2GHT40 = 7,
@@ -240,65 +241,114 @@ enum ath5k_ctl_mode {
240 AR5K_CTL_MODE_M = 15, 241 AR5K_CTL_MODE_M = 15,
241}; 242};
242 243
244/* Default CTL ids for the 3 main reg domains.
245 * Atheros only uses these by default but vendors
246 * can have up to 32 different CTLs for different
247 * scenarios. Note that theese values are ORed with
248 * the mode id (above) so we can have up to 24 CTL
249 * datasets out of these 3 main regdomains. That leaves
250 * 8 ids that can be used by vendors and since 0x20 is
251 * missing from HAL sources i guess this is the set of
252 * custom CTLs vendors can use. */
253#define AR5K_CTL_FCC 0x10
254#define AR5K_CTL_CUSTOM 0x20
255#define AR5K_CTL_ETSI 0x30
256#define AR5K_CTL_MKK 0x40
257
258/* Indicates a CTL with only mode set and
259 * no reg domain mapping, such CTLs are used
260 * for world roaming domains or simply when
261 * a reg domain is not set */
262#define AR5K_CTL_NO_REGDOMAIN 0xf0
263
264/* Indicates an empty (invalid) CTL */
265#define AR5K_CTL_NO_CTL 0xff
266
243/* Per channel calibration data, used for power table setup */ 267/* Per channel calibration data, used for power table setup */
244struct ath5k_chan_pcal_info_rf5111 { 268struct ath5k_chan_pcal_info_rf5111 {
245 /* Power levels in half dbm units 269 /* Power levels in half dbm units
246 * for one power curve. */ 270 * for one power curve. */
247 u8 pwr[AR5K_EEPROM_N_PWR_POINTS_5111]; 271 u8 pwr[AR5K_EEPROM_N_PWR_POINTS_5111];
248 /* PCDAC table steps 272 /* PCDAC table steps
249 * for the above values */ 273 * for the above values */
250 u8 pcdac[AR5K_EEPROM_N_PWR_POINTS_5111]; 274 u8 pcdac[AR5K_EEPROM_N_PWR_POINTS_5111];
251 /* Starting PCDAC step */ 275 /* Starting PCDAC step */
252 u8 pcdac_min; 276 u8 pcdac_min;
253 /* Final PCDAC step */ 277 /* Final PCDAC step */
254 u8 pcdac_max; 278 u8 pcdac_max;
255}; 279};
256 280
257struct ath5k_chan_pcal_info_rf5112 { 281struct ath5k_chan_pcal_info_rf5112 {
258 /* Power levels in quarter dBm units 282 /* Power levels in quarter dBm units
259 * for lower (0) and higher (3) 283 * for lower (0) and higher (3)
260 * level curves */ 284 * level curves in 0.25dB units */
261 s8 pwr_x0[AR5K_EEPROM_N_XPD0_POINTS]; 285 s8 pwr_x0[AR5K_EEPROM_N_XPD0_POINTS];
262 s8 pwr_x3[AR5K_EEPROM_N_XPD3_POINTS]; 286 s8 pwr_x3[AR5K_EEPROM_N_XPD3_POINTS];
263 /* PCDAC table steps 287 /* PCDAC table steps
264 * for the above values */ 288 * for the above values */
265 u8 pcdac_x0[AR5K_EEPROM_N_XPD0_POINTS]; 289 u8 pcdac_x0[AR5K_EEPROM_N_XPD0_POINTS];
266 u8 pcdac_x3[AR5K_EEPROM_N_XPD3_POINTS]; 290 u8 pcdac_x3[AR5K_EEPROM_N_XPD3_POINTS];
267}; 291};
268 292
269struct ath5k_chan_pcal_info_rf2413 { 293struct ath5k_chan_pcal_info_rf2413 {
270 /* Starting pwr/pddac values */ 294 /* Starting pwr/pddac values */
271 s8 pwr_i[AR5K_EEPROM_N_PD_GAINS]; 295 s8 pwr_i[AR5K_EEPROM_N_PD_GAINS];
272 u8 pddac_i[AR5K_EEPROM_N_PD_GAINS]; 296 u8 pddac_i[AR5K_EEPROM_N_PD_GAINS];
273 /* (pwr,pddac) points */ 297 /* (pwr,pddac) points
274 s8 pwr[AR5K_EEPROM_N_PD_GAINS] 298 * power levels in 0.5dB units */
275 [AR5K_EEPROM_N_PD_POINTS]; 299 s8 pwr[AR5K_EEPROM_N_PD_GAINS]
276 u8 pddac[AR5K_EEPROM_N_PD_GAINS] 300 [AR5K_EEPROM_N_PD_POINTS];
277 [AR5K_EEPROM_N_PD_POINTS]; 301 u8 pddac[AR5K_EEPROM_N_PD_GAINS]
302 [AR5K_EEPROM_N_PD_POINTS];
303};
304
305enum ath5k_powertable_type {
306 AR5K_PWRTABLE_PWR_TO_PCDAC = 0,
307 AR5K_PWRTABLE_LINEAR_PCDAC = 1,
308 AR5K_PWRTABLE_PWR_TO_PDADC = 2,
309};
310
311struct ath5k_pdgain_info {
312 u8 pd_points;
313 u8 *pd_step;
314 /* Power values are in
315 * 0.25dB units */
316 s16 *pd_pwr;
278}; 317};
279 318
280struct ath5k_chan_pcal_info { 319struct ath5k_chan_pcal_info {
281 /* Frequency */ 320 /* Frequency */
282 u16 freq; 321 u16 freq;
283 /* Max available power */ 322 /* Tx power boundaries */
284 s8 max_pwr; 323 s16 max_pwr;
324 s16 min_pwr;
285 union { 325 union {
286 struct ath5k_chan_pcal_info_rf5111 rf5111_info; 326 struct ath5k_chan_pcal_info_rf5111 rf5111_info;
287 struct ath5k_chan_pcal_info_rf5112 rf5112_info; 327 struct ath5k_chan_pcal_info_rf5112 rf5112_info;
288 struct ath5k_chan_pcal_info_rf2413 rf2413_info; 328 struct ath5k_chan_pcal_info_rf2413 rf2413_info;
289 }; 329 };
330 /* Raw values used by phy code
331 * Curves are stored in order from lower
332 * gain to higher gain (max txpower -> min txpower) */
333 struct ath5k_pdgain_info *pd_curves;
290}; 334};
291 335
292/* Per rate calibration data for each mode, used for power table setup */ 336/* Per rate calibration data for each mode,
337 * used for rate power table setup.
338 * Note: Values in 0.5dB units */
293struct ath5k_rate_pcal_info { 339struct ath5k_rate_pcal_info {
294 u16 freq; /* Frequency */ 340 u16 freq; /* Frequency */
295 /* Power level for 6-24Mbit/s rates */ 341 /* Power level for 6-24Mbit/s rates or
342 * 1Mb rate */
296 u16 target_power_6to24; 343 u16 target_power_6to24;
297 /* Power level for 36Mbit rate */ 344 /* Power level for 36Mbit rate or
345 * 2Mb rate */
298 u16 target_power_36; 346 u16 target_power_36;
299 /* Power level for 48Mbit rate */ 347 /* Power level for 48Mbit rate or
348 * 5.5Mbit rate */
300 u16 target_power_48; 349 u16 target_power_48;
301 /* Power level for 54Mbit rate */ 350 /* Power level for 54Mbit rate or
351 * 11Mbit rate */
302 u16 target_power_54; 352 u16 target_power_54;
303}; 353};
304 354
@@ -330,12 +380,6 @@ struct ath5k_eeprom_info {
330 u16 ee_cck_ofdm_power_delta; 380 u16 ee_cck_ofdm_power_delta;
331 u16 ee_scaled_cck_delta; 381 u16 ee_scaled_cck_delta;
332 382
333 /* Used for tx thermal adjustment (eeprom_init, rfregs) */
334 u16 ee_tx_clip;
335 u16 ee_pwd_84;
336 u16 ee_pwd_90;
337 u16 ee_gain_select;
338
339 /* RF Calibration settings (reset, rfregs) */ 383 /* RF Calibration settings (reset, rfregs) */
340 u16 ee_i_cal[AR5K_EEPROM_N_MODES]; 384 u16 ee_i_cal[AR5K_EEPROM_N_MODES];
341 u16 ee_q_cal[AR5K_EEPROM_N_MODES]; 385 u16 ee_q_cal[AR5K_EEPROM_N_MODES];
@@ -363,23 +407,25 @@ struct ath5k_eeprom_info {
363 /* Power calibration data */ 407 /* Power calibration data */
364 u16 ee_false_detect[AR5K_EEPROM_N_MODES]; 408 u16 ee_false_detect[AR5K_EEPROM_N_MODES];
365 409
366 /* Number of pd gain curves per mode (RF2413) */ 410 /* Number of pd gain curves per mode */
367 u8 ee_pd_gains[AR5K_EEPROM_N_MODES]; 411 u8 ee_pd_gains[AR5K_EEPROM_N_MODES];
412 /* Back mapping pdcurve number -> pdcurve index in pd->pd_curves */
413 u8 ee_pdc_to_idx[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_PD_GAINS];
368 414
369 u8 ee_n_piers[AR5K_EEPROM_N_MODES]; 415 u8 ee_n_piers[AR5K_EEPROM_N_MODES];
370 struct ath5k_chan_pcal_info ee_pwr_cal_a[AR5K_EEPROM_N_5GHZ_CHAN]; 416 struct ath5k_chan_pcal_info ee_pwr_cal_a[AR5K_EEPROM_N_5GHZ_CHAN];
371 struct ath5k_chan_pcal_info ee_pwr_cal_b[AR5K_EEPROM_N_2GHZ_CHAN]; 417 struct ath5k_chan_pcal_info ee_pwr_cal_b[AR5K_EEPROM_N_2GHZ_CHAN_MAX];
372 struct ath5k_chan_pcal_info ee_pwr_cal_g[AR5K_EEPROM_N_2GHZ_CHAN]; 418 struct ath5k_chan_pcal_info ee_pwr_cal_g[AR5K_EEPROM_N_2GHZ_CHAN_MAX];
373 419
374 /* Per rate target power levels */ 420 /* Per rate target power levels */
375 u16 ee_rate_target_pwr_num[AR5K_EEPROM_N_MODES]; 421 u8 ee_rate_target_pwr_num[AR5K_EEPROM_N_MODES];
376 struct ath5k_rate_pcal_info ee_rate_tpwr_a[AR5K_EEPROM_N_5GHZ_CHAN]; 422 struct ath5k_rate_pcal_info ee_rate_tpwr_a[AR5K_EEPROM_N_5GHZ_CHAN];
377 struct ath5k_rate_pcal_info ee_rate_tpwr_b[AR5K_EEPROM_N_2GHZ_CHAN]; 423 struct ath5k_rate_pcal_info ee_rate_tpwr_b[AR5K_EEPROM_N_2GHZ_CHAN_MAX];
378 struct ath5k_rate_pcal_info ee_rate_tpwr_g[AR5K_EEPROM_N_2GHZ_CHAN]; 424 struct ath5k_rate_pcal_info ee_rate_tpwr_g[AR5K_EEPROM_N_2GHZ_CHAN_MAX];
379 425
380 /* Conformance test limits (Unused) */ 426 /* Conformance test limits (Unused) */
381 u16 ee_ctls; 427 u8 ee_ctls;
382 u16 ee_ctl[AR5K_EEPROM_MAX_CTLS]; 428 u8 ee_ctl[AR5K_EEPROM_MAX_CTLS];
383 struct ath5k_edge_power ee_ctl_pwr[AR5K_EEPROM_N_EDGES * AR5K_EEPROM_MAX_CTLS]; 429 struct ath5k_edge_power ee_ctl_pwr[AR5K_EEPROM_N_EDGES * AR5K_EEPROM_MAX_CTLS];
384 430
385 /* Noise Floor Calibration settings */ 431 /* Noise Floor Calibration settings */
diff --git a/drivers/net/wireless/ath5k/initvals.c b/drivers/net/wireless/ath5k/initvals.c
index 44886434187b..61fb621ed20d 100644
--- a/drivers/net/wireless/ath5k/initvals.c
+++ b/drivers/net/wireless/ath5k/initvals.c
@@ -1510,8 +1510,8 @@ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel)
1510 rf2425_ini_mode_end, mode); 1510 rf2425_ini_mode_end, mode);
1511 1511
1512 ath5k_hw_ini_registers(ah, 1512 ath5k_hw_ini_registers(ah,
1513 ARRAY_SIZE(rf2413_ini_common_end), 1513 ARRAY_SIZE(rf2425_ini_common_end),
1514 rf2413_ini_common_end, change_channel); 1514 rf2425_ini_common_end, change_channel);
1515 1515
1516 ath5k_hw_ini_registers(ah, 1516 ath5k_hw_ini_registers(ah,
1517 ARRAY_SIZE(rf5112_ini_bbgain), 1517 ARRAY_SIZE(rf5112_ini_bbgain),
diff --git a/drivers/net/wireless/ath5k/led.c b/drivers/net/wireless/ath5k/led.c
index 0686e12738b3..19555fb79c9b 100644
--- a/drivers/net/wireless/ath5k/led.c
+++ b/drivers/net/wireless/ath5k/led.c
@@ -65,6 +65,8 @@ static const struct pci_device_id ath5k_led_devices[] = {
65 { ATH_SDEVICE(PCI_VENDOR_ID_AMBIT, 0x0422), ATH_LED(1, 1) }, 65 { ATH_SDEVICE(PCI_VENDOR_ID_AMBIT, 0x0422), ATH_LED(1, 1) },
66 /* E-machines E510 (tuliom@gmail.com) */ 66 /* E-machines E510 (tuliom@gmail.com) */
67 { ATH_SDEVICE(PCI_VENDOR_ID_AMBIT, 0x0428), ATH_LED(3, 0) }, 67 { ATH_SDEVICE(PCI_VENDOR_ID_AMBIT, 0x0428), ATH_LED(3, 0) },
68 /* Acer Extensa 5620z (nekoreeve@gmail.com) */
69 { ATH_SDEVICE(PCI_VENDOR_ID_QMI, 0x0105), ATH_LED(3, 0) },
68 { } 70 { }
69}; 71};
70 72
diff --git a/drivers/net/wireless/ath5k/phy.c b/drivers/net/wireless/ath5k/phy.c
index 81f5bebc48b1..9e2faae5ae94 100644
--- a/drivers/net/wireless/ath5k/phy.c
+++ b/drivers/net/wireless/ath5k/phy.c
@@ -4,6 +4,7 @@
4 * Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org> 4 * Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org>
5 * Copyright (c) 2006-2009 Nick Kossifidis <mickflemm@gmail.com> 5 * Copyright (c) 2006-2009 Nick Kossifidis <mickflemm@gmail.com>
6 * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com> 6 * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
7 * Copyright (c) 2008-2009 Felix Fietkau <nbd@openwrt.org>
7 * 8 *
8 * Permission to use, copy, modify, and distribute this software for any 9 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above 10 * purpose with or without fee is hereby granted, provided that the above
@@ -183,7 +184,9 @@ static void ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah)
183 if (ah->ah_gain.g_state != AR5K_RFGAIN_ACTIVE) 184 if (ah->ah_gain.g_state != AR5K_RFGAIN_ACTIVE)
184 return; 185 return;
185 186
186 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txpower.txp_max, 187 /* Send the packet with 2dB below max power as
188 * patent doc suggest */
189 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txpower.txp_max_pwr - 4,
187 AR5K_PHY_PAPD_PROBE_TXPOWER) | 190 AR5K_PHY_PAPD_PROBE_TXPOWER) |
188 AR5K_PHY_PAPD_PROBE_TX_NEXT, AR5K_PHY_PAPD_PROBE); 191 AR5K_PHY_PAPD_PROBE_TX_NEXT, AR5K_PHY_PAPD_PROBE);
189 192
@@ -1433,93 +1436,1120 @@ unsigned int ath5k_hw_get_def_antenna(struct ath5k_hw *ah)
1433 return false; /*XXX: What do we return for 5210 ?*/ 1436 return false; /*XXX: What do we return for 5210 ?*/
1434} 1437}
1435 1438
1439
1440/****************\
1441* TX power setup *
1442\****************/
1443
1444/*
1445 * Helper functions
1446 */
1447
1448/*
1449 * Do linear interpolation between two given (x, y) points
1450 */
1451static s16
1452ath5k_get_interpolated_value(s16 target, s16 x_left, s16 x_right,
1453 s16 y_left, s16 y_right)
1454{
1455 s16 ratio, result;
1456
1457 /* Avoid divide by zero and skip interpolation
1458 * if we have the same point */
1459 if ((x_left == x_right) || (y_left == y_right))
1460 return y_left;
1461
1462 /*
1463 * Since we use ints and not fps, we need to scale up in
1464 * order to get a sane ratio value (or else we 'll eg. get
1465 * always 1 instead of 1.25, 1.75 etc). We scale up by 100
1466 * to have some accuracy both for 0.5 and 0.25 steps.
1467 */
1468 ratio = ((100 * y_right - 100 * y_left)/(x_right - x_left));
1469
1470 /* Now scale down to be in range */
1471 result = y_left + (ratio * (target - x_left) / 100);
1472
1473 return result;
1474}
1475
1476/*
1477 * Find vertical boundary (min pwr) for the linear PCDAC curve.
1478 *
1479 * Since we have the top of the curve and we draw the line below
1480 * until we reach 1 (1 pcdac step) we need to know which point
1481 * (x value) that is so that we don't go below y axis and have negative
1482 * pcdac values when creating the curve, or fill the table with zeroes.
1483 */
1484static s16
1485ath5k_get_linear_pcdac_min(const u8 *stepL, const u8 *stepR,
1486 const s16 *pwrL, const s16 *pwrR)
1487{
1488 s8 tmp;
1489 s16 min_pwrL, min_pwrR;
1490 s16 pwr_i = pwrL[0];
1491
1492 do {
1493 pwr_i--;
1494 tmp = (s8) ath5k_get_interpolated_value(pwr_i,
1495 pwrL[0], pwrL[1],
1496 stepL[0], stepL[1]);
1497
1498 } while (tmp > 1);
1499
1500 min_pwrL = pwr_i;
1501
1502 pwr_i = pwrR[0];
1503 do {
1504 pwr_i--;
1505 tmp = (s8) ath5k_get_interpolated_value(pwr_i,
1506 pwrR[0], pwrR[1],
1507 stepR[0], stepR[1]);
1508
1509 } while (tmp > 1);
1510
1511 min_pwrR = pwr_i;
1512
1513 /* Keep the right boundary so that it works for both curves */
1514 return max(min_pwrL, min_pwrR);
1515}
1516
1517/*
1518 * Interpolate (pwr,vpd) points to create a Power to PDADC or a
1519 * Power to PCDAC curve.
1520 *
1521 * Each curve has power on x axis (in 0.5dB units) and PCDAC/PDADC
1522 * steps (offsets) on y axis. Power can go up to 31.5dB and max
1523 * PCDAC/PDADC step for each curve is 64 but we can write more than
1524 * one curves on hw so we can go up to 128 (which is the max step we
1525 * can write on the final table).
1526 *
1527 * We write y values (PCDAC/PDADC steps) on hw.
1528 */
1529static void
1530ath5k_create_power_curve(s16 pmin, s16 pmax,
1531 const s16 *pwr, const u8 *vpd,
1532 u8 num_points,
1533 u8 *vpd_table, u8 type)
1534{
1535 u8 idx[2] = { 0, 1 };
1536 s16 pwr_i = 2*pmin;
1537 int i;
1538
1539 if (num_points < 2)
1540 return;
1541
1542 /* We want the whole line, so adjust boundaries
1543 * to cover the entire power range. Note that
1544 * power values are already 0.25dB so no need
1545 * to multiply pwr_i by 2 */
1546 if (type == AR5K_PWRTABLE_LINEAR_PCDAC) {
1547 pwr_i = pmin;
1548 pmin = 0;
1549 pmax = 63;
1550 }
1551
1552 /* Find surrounding turning points (TPs)
1553 * and interpolate between them */
1554 for (i = 0; (i <= (u16) (pmax - pmin)) &&
1555 (i < AR5K_EEPROM_POWER_TABLE_SIZE); i++) {
1556
1557 /* We passed the right TP, move to the next set of TPs
1558 * if we pass the last TP, extrapolate above using the last
1559 * two TPs for ratio */
1560 if ((pwr_i > pwr[idx[1]]) && (idx[1] < num_points - 1)) {
1561 idx[0]++;
1562 idx[1]++;
1563 }
1564
1565 vpd_table[i] = (u8) ath5k_get_interpolated_value(pwr_i,
1566 pwr[idx[0]], pwr[idx[1]],
1567 vpd[idx[0]], vpd[idx[1]]);
1568
1569 /* Increase by 0.5dB
1570 * (0.25 dB units) */
1571 pwr_i += 2;
1572 }
1573}
1574
1575/*
1576 * Get the surrounding per-channel power calibration piers
1577 * for a given frequency so that we can interpolate between
1578 * them and come up with an apropriate dataset for our current
1579 * channel.
1580 */
1581static void
1582ath5k_get_chan_pcal_surrounding_piers(struct ath5k_hw *ah,
1583 struct ieee80211_channel *channel,
1584 struct ath5k_chan_pcal_info **pcinfo_l,
1585 struct ath5k_chan_pcal_info **pcinfo_r)
1586{
1587 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
1588 struct ath5k_chan_pcal_info *pcinfo;
1589 u8 idx_l, idx_r;
1590 u8 mode, max, i;
1591 u32 target = channel->center_freq;
1592
1593 idx_l = 0;
1594 idx_r = 0;
1595
1596 if (!(channel->hw_value & CHANNEL_OFDM)) {
1597 pcinfo = ee->ee_pwr_cal_b;
1598 mode = AR5K_EEPROM_MODE_11B;
1599 } else if (channel->hw_value & CHANNEL_2GHZ) {
1600 pcinfo = ee->ee_pwr_cal_g;
1601 mode = AR5K_EEPROM_MODE_11G;
1602 } else {
1603 pcinfo = ee->ee_pwr_cal_a;
1604 mode = AR5K_EEPROM_MODE_11A;
1605 }
1606 max = ee->ee_n_piers[mode] - 1;
1607
1608 /* Frequency is below our calibrated
1609 * range. Use the lowest power curve
1610 * we have */
1611 if (target < pcinfo[0].freq) {
1612 idx_l = idx_r = 0;
1613 goto done;
1614 }
1615
1616 /* Frequency is above our calibrated
1617 * range. Use the highest power curve
1618 * we have */
1619 if (target > pcinfo[max].freq) {
1620 idx_l = idx_r = max;
1621 goto done;
1622 }
1623
1624 /* Frequency is inside our calibrated
1625 * channel range. Pick the surrounding
1626 * calibration piers so that we can
1627 * interpolate */
1628 for (i = 0; i <= max; i++) {
1629
1630 /* Frequency matches one of our calibration
1631 * piers, no need to interpolate, just use
1632 * that calibration pier */
1633 if (pcinfo[i].freq == target) {
1634 idx_l = idx_r = i;
1635 goto done;
1636 }
1637
1638 /* We found a calibration pier that's above
1639 * frequency, use this pier and the previous
1640 * one to interpolate */
1641 if (target < pcinfo[i].freq) {
1642 idx_r = i;
1643 idx_l = idx_r - 1;
1644 goto done;
1645 }
1646 }
1647
1648done:
1649 *pcinfo_l = &pcinfo[idx_l];
1650 *pcinfo_r = &pcinfo[idx_r];
1651
1652 return;
1653}
1654
1655/*
1656 * Get the surrounding per-rate power calibration data
1657 * for a given frequency and interpolate between power
1658 * values to set max target power supported by hw for
1659 * each rate.
1660 */
1661static void
1662ath5k_get_rate_pcal_data(struct ath5k_hw *ah,
1663 struct ieee80211_channel *channel,
1664 struct ath5k_rate_pcal_info *rates)
1665{
1666 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
1667 struct ath5k_rate_pcal_info *rpinfo;
1668 u8 idx_l, idx_r;
1669 u8 mode, max, i;
1670 u32 target = channel->center_freq;
1671
1672 idx_l = 0;
1673 idx_r = 0;
1674
1675 if (!(channel->hw_value & CHANNEL_OFDM)) {
1676 rpinfo = ee->ee_rate_tpwr_b;
1677 mode = AR5K_EEPROM_MODE_11B;
1678 } else if (channel->hw_value & CHANNEL_2GHZ) {
1679 rpinfo = ee->ee_rate_tpwr_g;
1680 mode = AR5K_EEPROM_MODE_11G;
1681 } else {
1682 rpinfo = ee->ee_rate_tpwr_a;
1683 mode = AR5K_EEPROM_MODE_11A;
1684 }
1685 max = ee->ee_rate_target_pwr_num[mode] - 1;
1686
1687 /* Get the surrounding calibration
1688 * piers - same as above */
1689 if (target < rpinfo[0].freq) {
1690 idx_l = idx_r = 0;
1691 goto done;
1692 }
1693
1694 if (target > rpinfo[max].freq) {
1695 idx_l = idx_r = max;
1696 goto done;
1697 }
1698
1699 for (i = 0; i <= max; i++) {
1700
1701 if (rpinfo[i].freq == target) {
1702 idx_l = idx_r = i;
1703 goto done;
1704 }
1705
1706 if (target < rpinfo[i].freq) {
1707 idx_r = i;
1708 idx_l = idx_r - 1;
1709 goto done;
1710 }
1711 }
1712
1713done:
1714 /* Now interpolate power value, based on the frequency */
1715 rates->freq = target;
1716
1717 rates->target_power_6to24 =
1718 ath5k_get_interpolated_value(target, rpinfo[idx_l].freq,
1719 rpinfo[idx_r].freq,
1720 rpinfo[idx_l].target_power_6to24,
1721 rpinfo[idx_r].target_power_6to24);
1722
1723 rates->target_power_36 =
1724 ath5k_get_interpolated_value(target, rpinfo[idx_l].freq,
1725 rpinfo[idx_r].freq,
1726 rpinfo[idx_l].target_power_36,
1727 rpinfo[idx_r].target_power_36);
1728
1729 rates->target_power_48 =
1730 ath5k_get_interpolated_value(target, rpinfo[idx_l].freq,
1731 rpinfo[idx_r].freq,
1732 rpinfo[idx_l].target_power_48,
1733 rpinfo[idx_r].target_power_48);
1734
1735 rates->target_power_54 =
1736 ath5k_get_interpolated_value(target, rpinfo[idx_l].freq,
1737 rpinfo[idx_r].freq,
1738 rpinfo[idx_l].target_power_54,
1739 rpinfo[idx_r].target_power_54);
1740}
1741
1742/*
1743 * Get the max edge power for this channel if
1744 * we have such data from EEPROM's Conformance Test
1745 * Limits (CTL), and limit max power if needed.
1746 *
1747 * FIXME: Only works for world regulatory domains
1748 */
1749static void
1750ath5k_get_max_ctl_power(struct ath5k_hw *ah,
1751 struct ieee80211_channel *channel)
1752{
1753 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
1754 struct ath5k_edge_power *rep = ee->ee_ctl_pwr;
1755 u8 *ctl_val = ee->ee_ctl;
1756 s16 max_chan_pwr = ah->ah_txpower.txp_max_pwr / 4;
1757 s16 edge_pwr = 0;
1758 u8 rep_idx;
1759 u8 i, ctl_mode;
1760 u8 ctl_idx = 0xFF;
1761 u32 target = channel->center_freq;
1762
1763 /* Find out a CTL for our mode that's not mapped
1764 * on a specific reg domain.
1765 *
1766 * TODO: Map our current reg domain to one of the 3 available
1767 * reg domain ids so that we can support more CTLs. */
1768 switch (channel->hw_value & CHANNEL_MODES) {
1769 case CHANNEL_A:
1770 ctl_mode = AR5K_CTL_11A | AR5K_CTL_NO_REGDOMAIN;
1771 break;
1772 case CHANNEL_G:
1773 ctl_mode = AR5K_CTL_11G | AR5K_CTL_NO_REGDOMAIN;
1774 break;
1775 case CHANNEL_B:
1776 ctl_mode = AR5K_CTL_11B | AR5K_CTL_NO_REGDOMAIN;
1777 break;
1778 case CHANNEL_T:
1779 ctl_mode = AR5K_CTL_TURBO | AR5K_CTL_NO_REGDOMAIN;
1780 break;
1781 case CHANNEL_TG:
1782 ctl_mode = AR5K_CTL_TURBOG | AR5K_CTL_NO_REGDOMAIN;
1783 break;
1784 case CHANNEL_XR:
1785 /* Fall through */
1786 default:
1787 return;
1788 }
1789
1790 for (i = 0; i < ee->ee_ctls; i++) {
1791 if (ctl_val[i] == ctl_mode) {
1792 ctl_idx = i;
1793 break;
1794 }
1795 }
1796
1797 /* If we have a CTL dataset available grab it and find the
1798 * edge power for our frequency */
1799 if (ctl_idx == 0xFF)
1800 return;
1801
1802 /* Edge powers are sorted by frequency from lower
1803 * to higher. Each CTL corresponds to 8 edge power
1804 * measurements. */
1805 rep_idx = ctl_idx * AR5K_EEPROM_N_EDGES;
1806
1807 /* Don't do boundaries check because we
1808 * might have more that one bands defined
1809 * for this mode */
1810
1811 /* Get the edge power that's closer to our
1812 * frequency */
1813 for (i = 0; i < AR5K_EEPROM_N_EDGES; i++) {
1814 rep_idx += i;
1815 if (target <= rep[rep_idx].freq)
1816 edge_pwr = (s16) rep[rep_idx].edge;
1817 }
1818
1819 if (edge_pwr)
1820 ah->ah_txpower.txp_max_pwr = 4*min(edge_pwr, max_chan_pwr);
1821}
1822
1823
1824/*
1825 * Power to PCDAC table functions
1826 */
1827
1436/* 1828/*
1437 * TX power setup 1829 * Fill Power to PCDAC table on RF5111
1830 *
1831 * No further processing is needed for RF5111, the only thing we have to
1832 * do is fill the values below and above calibration range since eeprom data
1833 * may not cover the entire PCDAC table.
1438 */ 1834 */
1835static void
1836ath5k_fill_pwr_to_pcdac_table(struct ath5k_hw *ah, s16* table_min,
1837 s16 *table_max)
1838{
1839 u8 *pcdac_out = ah->ah_txpower.txp_pd_table;
1840 u8 *pcdac_tmp = ah->ah_txpower.tmpL[0];
1841 u8 pcdac_0, pcdac_n, pcdac_i, pwr_idx, i;
1842 s16 min_pwr, max_pwr;
1843
1844 /* Get table boundaries */
1845 min_pwr = table_min[0];
1846 pcdac_0 = pcdac_tmp[0];
1847
1848 max_pwr = table_max[0];
1849 pcdac_n = pcdac_tmp[table_max[0] - table_min[0]];
1850
1851 /* Extrapolate below minimum using pcdac_0 */
1852 pcdac_i = 0;
1853 for (i = 0; i < min_pwr; i++)
1854 pcdac_out[pcdac_i++] = pcdac_0;
1855
1856 /* Copy values from pcdac_tmp */
1857 pwr_idx = min_pwr;
1858 for (i = 0 ; pwr_idx <= max_pwr &&
1859 pcdac_i < AR5K_EEPROM_POWER_TABLE_SIZE; i++) {
1860 pcdac_out[pcdac_i++] = pcdac_tmp[i];
1861 pwr_idx++;
1862 }
1863
1864 /* Extrapolate above maximum */
1865 while (pcdac_i < AR5K_EEPROM_POWER_TABLE_SIZE)
1866 pcdac_out[pcdac_i++] = pcdac_n;
1867
1868}
1439 1869
1440/* 1870/*
1441 * Initialize the tx power table (not fully implemented) 1871 * Combine available XPD Curves and fill Linear Power to PCDAC table
1872 * on RF5112
1873 *
1874 * RFX112 can have up to 2 curves (one for low txpower range and one for
1875 * higher txpower range). We need to put them both on pcdac_out and place
1876 * them in the correct location. In case we only have one curve available
1877 * just fit it on pcdac_out (it's supposed to cover the entire range of
1878 * available pwr levels since it's always the higher power curve). Extrapolate
1879 * below and above final table if needed.
1442 */ 1880 */
1443static void ath5k_txpower_table(struct ath5k_hw *ah, 1881static void
1444 struct ieee80211_channel *channel, s16 max_power) 1882ath5k_combine_linear_pcdac_curves(struct ath5k_hw *ah, s16* table_min,
1883 s16 *table_max, u8 pdcurves)
1445{ 1884{
1446 unsigned int i, min, max, n; 1885 u8 *pcdac_out = ah->ah_txpower.txp_pd_table;
1447 u16 txpower, *rates; 1886 u8 *pcdac_low_pwr;
1448 1887 u8 *pcdac_high_pwr;
1449 rates = ah->ah_txpower.txp_rates; 1888 u8 *pcdac_tmp;
1450 1889 u8 pwr;
1451 txpower = AR5K_TUNE_DEFAULT_TXPOWER * 2; 1890 s16 max_pwr_idx;
1452 if (max_power > txpower) 1891 s16 min_pwr_idx;
1453 txpower = max_power > AR5K_TUNE_MAX_TXPOWER ? 1892 s16 mid_pwr_idx = 0;
1454 AR5K_TUNE_MAX_TXPOWER : max_power; 1893 /* Edge flag turs on the 7nth bit on the PCDAC
1455 1894 * to delcare the higher power curve (force values
1456 for (i = 0; i < AR5K_MAX_RATES; i++) 1895 * to be greater than 64). If we only have one curve
1457 rates[i] = txpower; 1896 * we don't need to set this, if we have 2 curves and
1458 1897 * fill the table backwards this can also be used to
1459 /* XXX setup target powers by rate */ 1898 * switch from higher power curve to lower power curve */
1460 1899 u8 edge_flag;
1461 ah->ah_txpower.txp_min = rates[7]; 1900 int i;
1462 ah->ah_txpower.txp_max = rates[0]; 1901
1463 ah->ah_txpower.txp_ofdm = rates[0]; 1902 /* When we have only one curve available
1464 1903 * that's the higher power curve. If we have
1465 /* Calculate the power table */ 1904 * two curves the first is the high power curve
1466 n = ARRAY_SIZE(ah->ah_txpower.txp_pcdac); 1905 * and the next is the low power curve. */
1467 min = AR5K_EEPROM_PCDAC_START; 1906 if (pdcurves > 1) {
1468 max = AR5K_EEPROM_PCDAC_STOP; 1907 pcdac_low_pwr = ah->ah_txpower.tmpL[1];
1469 for (i = 0; i < n; i += AR5K_EEPROM_PCDAC_STEP) 1908 pcdac_high_pwr = ah->ah_txpower.tmpL[0];
1470 ah->ah_txpower.txp_pcdac[i] = 1909 mid_pwr_idx = table_max[1] - table_min[1] - 1;
1471#ifdef notyet 1910 max_pwr_idx = (table_max[0] - table_min[0]) / 2;
1472 min + ((i * (max - min)) / n); 1911
1473#else 1912 /* If table size goes beyond 31.5dB, keep the
1474 min; 1913 * upper 31.5dB range when setting tx power.
1914 * Note: 126 = 31.5 dB in quarter dB steps */
1915 if (table_max[0] - table_min[1] > 126)
1916 min_pwr_idx = table_max[0] - 126;
1917 else
1918 min_pwr_idx = table_min[1];
1919
1920 /* Since we fill table backwards
1921 * start from high power curve */
1922 pcdac_tmp = pcdac_high_pwr;
1923
1924 edge_flag = 0x40;
1925#if 0
1926 /* If both min and max power limits are in lower
1927 * power curve's range, only use the low power curve.
1928 * TODO: min/max levels are related to target
1929 * power values requested from driver/user
1930 * XXX: Is this really needed ? */
1931 if (min_pwr < table_max[1] &&
1932 max_pwr < table_max[1]) {
1933 edge_flag = 0;
1934 pcdac_tmp = pcdac_low_pwr;
1935 max_pwr_idx = (table_max[1] - table_min[1])/2;
1936 }
1475#endif 1937#endif
1938 } else {
1939 pcdac_low_pwr = ah->ah_txpower.tmpL[1]; /* Zeroed */
1940 pcdac_high_pwr = ah->ah_txpower.tmpL[0];
1941 min_pwr_idx = table_min[0];
1942 max_pwr_idx = (table_max[0] - table_min[0]) / 2;
1943 pcdac_tmp = pcdac_high_pwr;
1944 edge_flag = 0;
1945 }
1946
1947 /* This is used when setting tx power*/
1948 ah->ah_txpower.txp_min_idx = min_pwr_idx/2;
1949
1950 /* Fill Power to PCDAC table backwards */
1951 pwr = max_pwr_idx;
1952 for (i = 63; i >= 0; i--) {
1953 /* Entering lower power range, reset
1954 * edge flag and set pcdac_tmp to lower
1955 * power curve.*/
1956 if (edge_flag == 0x40 &&
1957 (2*pwr <= (table_max[1] - table_min[0]) || pwr == 0)) {
1958 edge_flag = 0x00;
1959 pcdac_tmp = pcdac_low_pwr;
1960 pwr = mid_pwr_idx/2;
1961 }
1962
1963 /* Don't go below 1, extrapolate below if we have
1964 * already swithced to the lower power curve -or
1965 * we only have one curve and edge_flag is zero
1966 * anyway */
1967 if (pcdac_tmp[pwr] < 1 && (edge_flag == 0x00)) {
1968 while (i >= 0) {
1969 pcdac_out[i] = pcdac_out[i + 1];
1970 i--;
1971 }
1972 break;
1973 }
1974
1975 pcdac_out[i] = pcdac_tmp[pwr] | edge_flag;
1976
1977 /* Extrapolate above if pcdac is greater than
1978 * 126 -this can happen because we OR pcdac_out
1979 * value with edge_flag on high power curve */
1980 if (pcdac_out[i] > 126)
1981 pcdac_out[i] = 126;
1982
1983 /* Decrease by a 0.5dB step */
1984 pwr--;
1985 }
1476} 1986}
1477 1987
1988/* Write PCDAC values on hw */
1989static void
1990ath5k_setup_pcdac_table(struct ath5k_hw *ah)
1991{
1992 u8 *pcdac_out = ah->ah_txpower.txp_pd_table;
1993 int i;
1994
1995 /*
1996 * Write TX power values
1997 */
1998 for (i = 0; i < (AR5K_EEPROM_POWER_TABLE_SIZE / 2); i++) {
1999 ath5k_hw_reg_write(ah,
2000 (((pcdac_out[2*i + 0] << 8 | 0xff) & 0xffff) << 0) |
2001 (((pcdac_out[2*i + 1] << 8 | 0xff) & 0xffff) << 16),
2002 AR5K_PHY_PCDAC_TXPOWER(i));
2003 }
2004}
2005
2006
1478/* 2007/*
1479 * Set transmition power 2008 * Power to PDADC table functions
1480 */ 2009 */
1481int /*O.K. - txpower_table is unimplemented so this doesn't work*/ 2010
1482ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, 2011/*
1483 unsigned int txpower) 2012 * Set the gain boundaries and create final Power to PDADC table
2013 *
2014 * We can have up to 4 pd curves, we need to do a simmilar process
2015 * as we do for RF5112. This time we don't have an edge_flag but we
2016 * set the gain boundaries on a separate register.
2017 */
2018static void
2019ath5k_combine_pwr_to_pdadc_curves(struct ath5k_hw *ah,
2020 s16 *pwr_min, s16 *pwr_max, u8 pdcurves)
1484{ 2021{
1485 bool tpc = ah->ah_txpower.txp_tpc; 2022 u8 gain_boundaries[AR5K_EEPROM_N_PD_GAINS];
1486 unsigned int i; 2023 u8 *pdadc_out = ah->ah_txpower.txp_pd_table;
2024 u8 *pdadc_tmp;
2025 s16 pdadc_0;
2026 u8 pdadc_i, pdadc_n, pwr_step, pdg, max_idx, table_size;
2027 u8 pd_gain_overlap;
2028
2029 /* Note: Register value is initialized on initvals
2030 * there is no feedback from hw.
2031 * XXX: What about pd_gain_overlap from EEPROM ? */
2032 pd_gain_overlap = (u8) ath5k_hw_reg_read(ah, AR5K_PHY_TPC_RG5) &
2033 AR5K_PHY_TPC_RG5_PD_GAIN_OVERLAP;
2034
2035 /* Create final PDADC table */
2036 for (pdg = 0, pdadc_i = 0; pdg < pdcurves; pdg++) {
2037 pdadc_tmp = ah->ah_txpower.tmpL[pdg];
2038
2039 if (pdg == pdcurves - 1)
2040 /* 2 dB boundary stretch for last
2041 * (higher power) curve */
2042 gain_boundaries[pdg] = pwr_max[pdg] + 4;
2043 else
2044 /* Set gain boundary in the middle
2045 * between this curve and the next one */
2046 gain_boundaries[pdg] =
2047 (pwr_max[pdg] + pwr_min[pdg + 1]) / 2;
2048
2049 /* Sanity check in case our 2 db stretch got out of
2050 * range. */
2051 if (gain_boundaries[pdg] > AR5K_TUNE_MAX_TXPOWER)
2052 gain_boundaries[pdg] = AR5K_TUNE_MAX_TXPOWER;
2053
2054 /* For the first curve (lower power)
2055 * start from 0 dB */
2056 if (pdg == 0)
2057 pdadc_0 = 0;
2058 else
2059 /* For the other curves use the gain overlap */
2060 pdadc_0 = (gain_boundaries[pdg - 1] - pwr_min[pdg]) -
2061 pd_gain_overlap;
1487 2062
1488 ATH5K_TRACE(ah->ah_sc); 2063 /* Force each power step to be at least 0.5 dB */
1489 if (txpower > AR5K_TUNE_MAX_TXPOWER) { 2064 if ((pdadc_tmp[1] - pdadc_tmp[0]) > 1)
1490 ATH5K_ERR(ah->ah_sc, "invalid tx power: %u\n", txpower); 2065 pwr_step = pdadc_tmp[1] - pdadc_tmp[0];
1491 return -EINVAL; 2066 else
2067 pwr_step = 1;
2068
2069 /* If pdadc_0 is negative, we need to extrapolate
2070 * below this pdgain by a number of pwr_steps */
2071 while ((pdadc_0 < 0) && (pdadc_i < 128)) {
2072 s16 tmp = pdadc_tmp[0] + pdadc_0 * pwr_step;
2073 pdadc_out[pdadc_i++] = (tmp < 0) ? 0 : (u8) tmp;
2074 pdadc_0++;
2075 }
2076
2077 /* Set last pwr level, using gain boundaries */
2078 pdadc_n = gain_boundaries[pdg] + pd_gain_overlap - pwr_min[pdg];
2079 /* Limit it to be inside pwr range */
2080 table_size = pwr_max[pdg] - pwr_min[pdg];
2081 max_idx = (pdadc_n < table_size) ? pdadc_n : table_size;
2082
2083 /* Fill pdadc_out table */
2084 while (pdadc_0 < max_idx)
2085 pdadc_out[pdadc_i++] = pdadc_tmp[pdadc_0++];
2086
2087 /* Need to extrapolate above this pdgain? */
2088 if (pdadc_n <= max_idx)
2089 continue;
2090
2091 /* Force each power step to be at least 0.5 dB */
2092 if ((pdadc_tmp[table_size - 1] - pdadc_tmp[table_size - 2]) > 1)
2093 pwr_step = pdadc_tmp[table_size - 1] -
2094 pdadc_tmp[table_size - 2];
2095 else
2096 pwr_step = 1;
2097
2098 /* Extrapolate above */
2099 while ((pdadc_0 < (s16) pdadc_n) &&
2100 (pdadc_i < AR5K_EEPROM_POWER_TABLE_SIZE * 2)) {
2101 s16 tmp = pdadc_tmp[table_size - 1] +
2102 (pdadc_0 - max_idx) * pwr_step;
2103 pdadc_out[pdadc_i++] = (tmp > 127) ? 127 : (u8) tmp;
2104 pdadc_0++;
2105 }
1492 } 2106 }
1493 2107
2108 while (pdg < AR5K_EEPROM_N_PD_GAINS) {
2109 gain_boundaries[pdg] = gain_boundaries[pdg - 1];
2110 pdg++;
2111 }
2112
2113 while (pdadc_i < AR5K_EEPROM_POWER_TABLE_SIZE * 2) {
2114 pdadc_out[pdadc_i] = pdadc_out[pdadc_i - 1];
2115 pdadc_i++;
2116 }
2117
2118 /* Set gain boundaries */
2119 ath5k_hw_reg_write(ah,
2120 AR5K_REG_SM(pd_gain_overlap,
2121 AR5K_PHY_TPC_RG5_PD_GAIN_OVERLAP) |
2122 AR5K_REG_SM(gain_boundaries[0],
2123 AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_1) |
2124 AR5K_REG_SM(gain_boundaries[1],
2125 AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_2) |
2126 AR5K_REG_SM(gain_boundaries[2],
2127 AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_3) |
2128 AR5K_REG_SM(gain_boundaries[3],
2129 AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_4),
2130 AR5K_PHY_TPC_RG5);
2131
2132 /* Used for setting rate power table */
2133 ah->ah_txpower.txp_min_idx = pwr_min[0];
2134
2135}
2136
2137/* Write PDADC values on hw */
2138static void
2139ath5k_setup_pwr_to_pdadc_table(struct ath5k_hw *ah,
2140 u8 pdcurves, u8 *pdg_to_idx)
2141{
2142 u8 *pdadc_out = ah->ah_txpower.txp_pd_table;
2143 u32 reg;
2144 u8 i;
2145
2146 /* Select the right pdgain curves */
2147
2148 /* Clear current settings */
2149 reg = ath5k_hw_reg_read(ah, AR5K_PHY_TPC_RG1);
2150 reg &= ~(AR5K_PHY_TPC_RG1_PDGAIN_1 |
2151 AR5K_PHY_TPC_RG1_PDGAIN_2 |
2152 AR5K_PHY_TPC_RG1_PDGAIN_3 |
2153 AR5K_PHY_TPC_RG1_NUM_PD_GAIN);
2154
1494 /* 2155 /*
1495 * RF2413 for some reason can't 2156 * Use pd_gains curve from eeprom
1496 * transmit anything if we call
1497 * this funtion, so we skip it
1498 * until we fix txpower.
1499 * 2157 *
1500 * XXX: Assume same for RF2425 2158 * This overrides the default setting from initvals
1501 * to be safe. 2159 * in case some vendors (e.g. Zcomax) don't use the default
2160 * curves. If we don't honor their settings we 'll get a
2161 * 5dB (1 * gain overlap ?) drop.
1502 */ 2162 */
1503 if ((ah->ah_radio == AR5K_RF2413) || (ah->ah_radio == AR5K_RF2425)) 2163 reg |= AR5K_REG_SM(pdcurves, AR5K_PHY_TPC_RG1_NUM_PD_GAIN);
1504 return 0;
1505 2164
1506 /* Reset TX power values */ 2165 switch (pdcurves) {
1507 memset(&ah->ah_txpower, 0, sizeof(ah->ah_txpower)); 2166 case 3:
1508 ah->ah_txpower.txp_tpc = tpc; 2167 reg |= AR5K_REG_SM(pdg_to_idx[2], AR5K_PHY_TPC_RG1_PDGAIN_3);
1509 2168 /* Fall through */
1510 /* Initialize TX power table */ 2169 case 2:
1511 ath5k_txpower_table(ah, channel, txpower); 2170 reg |= AR5K_REG_SM(pdg_to_idx[1], AR5K_PHY_TPC_RG1_PDGAIN_2);
2171 /* Fall through */
2172 case 1:
2173 reg |= AR5K_REG_SM(pdg_to_idx[0], AR5K_PHY_TPC_RG1_PDGAIN_1);
2174 break;
2175 }
2176 ath5k_hw_reg_write(ah, reg, AR5K_PHY_TPC_RG1);
1512 2177
1513 /* 2178 /*
1514 * Write TX power values 2179 * Write TX power values
1515 */ 2180 */
1516 for (i = 0; i < (AR5K_EEPROM_POWER_TABLE_SIZE / 2); i++) { 2181 for (i = 0; i < (AR5K_EEPROM_POWER_TABLE_SIZE / 2); i++) {
1517 ath5k_hw_reg_write(ah, 2182 ath5k_hw_reg_write(ah,
1518 ((((ah->ah_txpower.txp_pcdac[(i << 1) + 1] << 8) | 0xff) & 0xffff) << 16) | 2183 ((pdadc_out[4*i + 0] & 0xff) << 0) |
1519 (((ah->ah_txpower.txp_pcdac[(i << 1) ] << 8) | 0xff) & 0xffff), 2184 ((pdadc_out[4*i + 1] & 0xff) << 8) |
1520 AR5K_PHY_PCDAC_TXPOWER(i)); 2185 ((pdadc_out[4*i + 2] & 0xff) << 16) |
2186 ((pdadc_out[4*i + 3] & 0xff) << 24),
2187 AR5K_PHY_PDADC_TXPOWER(i));
2188 }
2189}
2190
2191
2192/*
2193 * Common code for PCDAC/PDADC tables
2194 */
2195
2196/*
2197 * This is the main function that uses all of the above
2198 * to set PCDAC/PDADC table on hw for the current channel.
2199 * This table is used for tx power calibration on the basband,
2200 * without it we get weird tx power levels and in some cases
2201 * distorted spectral mask
2202 */
2203static int
2204ath5k_setup_channel_powertable(struct ath5k_hw *ah,
2205 struct ieee80211_channel *channel,
2206 u8 ee_mode, u8 type)
2207{
2208 struct ath5k_pdgain_info *pdg_L, *pdg_R;
2209 struct ath5k_chan_pcal_info *pcinfo_L;
2210 struct ath5k_chan_pcal_info *pcinfo_R;
2211 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
2212 u8 *pdg_curve_to_idx = ee->ee_pdc_to_idx[ee_mode];
2213 s16 table_min[AR5K_EEPROM_N_PD_GAINS];
2214 s16 table_max[AR5K_EEPROM_N_PD_GAINS];
2215 u8 *tmpL;
2216 u8 *tmpR;
2217 u32 target = channel->center_freq;
2218 int pdg, i;
2219
2220 /* Get surounding freq piers for this channel */
2221 ath5k_get_chan_pcal_surrounding_piers(ah, channel,
2222 &pcinfo_L,
2223 &pcinfo_R);
2224
2225 /* Loop over pd gain curves on
2226 * surounding freq piers by index */
2227 for (pdg = 0; pdg < ee->ee_pd_gains[ee_mode]; pdg++) {
2228
2229 /* Fill curves in reverse order
2230 * from lower power (max gain)
2231 * to higher power. Use curve -> idx
2232 * backmaping we did on eeprom init */
2233 u8 idx = pdg_curve_to_idx[pdg];
2234
2235 /* Grab the needed curves by index */
2236 pdg_L = &pcinfo_L->pd_curves[idx];
2237 pdg_R = &pcinfo_R->pd_curves[idx];
2238
2239 /* Initialize the temp tables */
2240 tmpL = ah->ah_txpower.tmpL[pdg];
2241 tmpR = ah->ah_txpower.tmpR[pdg];
2242
2243 /* Set curve's x boundaries and create
2244 * curves so that they cover the same
2245 * range (if we don't do that one table
2246 * will have values on some range and the
2247 * other one won't have any so interpolation
2248 * will fail) */
2249 table_min[pdg] = min(pdg_L->pd_pwr[0],
2250 pdg_R->pd_pwr[0]) / 2;
2251
2252 table_max[pdg] = max(pdg_L->pd_pwr[pdg_L->pd_points - 1],
2253 pdg_R->pd_pwr[pdg_R->pd_points - 1]) / 2;
2254
2255 /* Now create the curves on surrounding channels
2256 * and interpolate if needed to get the final
2257 * curve for this gain on this channel */
2258 switch (type) {
2259 case AR5K_PWRTABLE_LINEAR_PCDAC:
2260 /* Override min/max so that we don't loose
2261 * accuracy (don't divide by 2) */
2262 table_min[pdg] = min(pdg_L->pd_pwr[0],
2263 pdg_R->pd_pwr[0]);
2264
2265 table_max[pdg] =
2266 max(pdg_L->pd_pwr[pdg_L->pd_points - 1],
2267 pdg_R->pd_pwr[pdg_R->pd_points - 1]);
2268
2269 /* Override minimum so that we don't get
2270 * out of bounds while extrapolating
2271 * below. Don't do this when we have 2
2272 * curves and we are on the high power curve
2273 * because table_min is ok in this case */
2274 if (!(ee->ee_pd_gains[ee_mode] > 1 && pdg == 0)) {
2275
2276 table_min[pdg] =
2277 ath5k_get_linear_pcdac_min(pdg_L->pd_step,
2278 pdg_R->pd_step,
2279 pdg_L->pd_pwr,
2280 pdg_R->pd_pwr);
2281
2282 /* Don't go too low because we will
2283 * miss the upper part of the curve.
2284 * Note: 126 = 31.5dB (max power supported)
2285 * in 0.25dB units */
2286 if (table_max[pdg] - table_min[pdg] > 126)
2287 table_min[pdg] = table_max[pdg] - 126;
2288 }
2289
2290 /* Fall through */
2291 case AR5K_PWRTABLE_PWR_TO_PCDAC:
2292 case AR5K_PWRTABLE_PWR_TO_PDADC:
2293
2294 ath5k_create_power_curve(table_min[pdg],
2295 table_max[pdg],
2296 pdg_L->pd_pwr,
2297 pdg_L->pd_step,
2298 pdg_L->pd_points, tmpL, type);
2299
2300 /* We are in a calibration
2301 * pier, no need to interpolate
2302 * between freq piers */
2303 if (pcinfo_L == pcinfo_R)
2304 continue;
2305
2306 ath5k_create_power_curve(table_min[pdg],
2307 table_max[pdg],
2308 pdg_R->pd_pwr,
2309 pdg_R->pd_step,
2310 pdg_R->pd_points, tmpR, type);
2311 break;
2312 default:
2313 return -EINVAL;
2314 }
2315
2316 /* Interpolate between curves
2317 * of surounding freq piers to
2318 * get the final curve for this
2319 * pd gain. Re-use tmpL for interpolation
2320 * output */
2321 for (i = 0; (i < (u16) (table_max[pdg] - table_min[pdg])) &&
2322 (i < AR5K_EEPROM_POWER_TABLE_SIZE); i++) {
2323 tmpL[i] = (u8) ath5k_get_interpolated_value(target,
2324 (s16) pcinfo_L->freq,
2325 (s16) pcinfo_R->freq,
2326 (s16) tmpL[i],
2327 (s16) tmpR[i]);
2328 }
1521 } 2329 }
1522 2330
2331 /* Now we have a set of curves for this
2332 * channel on tmpL (x range is table_max - table_min
2333 * and y values are tmpL[pdg][]) sorted in the same
2334 * order as EEPROM (because we've used the backmaping).
2335 * So for RF5112 it's from higher power to lower power
2336 * and for RF2413 it's from lower power to higher power.
2337 * For RF5111 we only have one curve. */
2338
2339 /* Fill min and max power levels for this
2340 * channel by interpolating the values on
2341 * surounding channels to complete the dataset */
2342 ah->ah_txpower.txp_min_pwr = ath5k_get_interpolated_value(target,
2343 (s16) pcinfo_L->freq,
2344 (s16) pcinfo_R->freq,
2345 pcinfo_L->min_pwr, pcinfo_R->min_pwr);
2346
2347 ah->ah_txpower.txp_max_pwr = ath5k_get_interpolated_value(target,
2348 (s16) pcinfo_L->freq,
2349 (s16) pcinfo_R->freq,
2350 pcinfo_L->max_pwr, pcinfo_R->max_pwr);
2351
2352 /* We are ready to go, fill PCDAC/PDADC
2353 * table and write settings on hardware */
2354 switch (type) {
2355 case AR5K_PWRTABLE_LINEAR_PCDAC:
2356 /* For RF5112 we can have one or two curves
2357 * and each curve covers a certain power lvl
2358 * range so we need to do some more processing */
2359 ath5k_combine_linear_pcdac_curves(ah, table_min, table_max,
2360 ee->ee_pd_gains[ee_mode]);
2361
2362 /* Set txp.offset so that we can
2363 * match max power value with max
2364 * table index */
2365 ah->ah_txpower.txp_offset = 64 - (table_max[0] / 2);
2366
2367 /* Write settings on hw */
2368 ath5k_setup_pcdac_table(ah);
2369 break;
2370 case AR5K_PWRTABLE_PWR_TO_PCDAC:
2371 /* We are done for RF5111 since it has only
2372 * one curve, just fit the curve on the table */
2373 ath5k_fill_pwr_to_pcdac_table(ah, table_min, table_max);
2374
2375 /* No rate powertable adjustment for RF5111 */
2376 ah->ah_txpower.txp_min_idx = 0;
2377 ah->ah_txpower.txp_offset = 0;
2378
2379 /* Write settings on hw */
2380 ath5k_setup_pcdac_table(ah);
2381 break;
2382 case AR5K_PWRTABLE_PWR_TO_PDADC:
2383 /* Set PDADC boundaries and fill
2384 * final PDADC table */
2385 ath5k_combine_pwr_to_pdadc_curves(ah, table_min, table_max,
2386 ee->ee_pd_gains[ee_mode]);
2387
2388 /* Write settings on hw */
2389 ath5k_setup_pwr_to_pdadc_table(ah, pdg, pdg_curve_to_idx);
2390
2391 /* Set txp.offset, note that table_min
2392 * can be negative */
2393 ah->ah_txpower.txp_offset = table_min[0];
2394 break;
2395 default:
2396 return -EINVAL;
2397 }
2398
2399 return 0;
2400}
2401
2402
2403/*
2404 * Per-rate tx power setting
2405 *
2406 * This is the code that sets the desired tx power (below
2407 * maximum) on hw for each rate (we also have TPC that sets
2408 * power per packet). We do that by providing an index on the
2409 * PCDAC/PDADC table we set up.
2410 */
2411
2412/*
2413 * Set rate power table
2414 *
2415 * For now we only limit txpower based on maximum tx power
2416 * supported by hw (what's inside rate_info). We need to limit
2417 * this even more, based on regulatory domain etc.
2418 *
2419 * Rate power table contains indices to PCDAC/PDADC table (0.5dB steps)
2420 * and is indexed as follows:
2421 * rates[0] - rates[7] -> OFDM rates
2422 * rates[8] - rates[14] -> CCK rates
2423 * rates[15] -> XR rates (they all have the same power)
2424 */
2425static void
2426ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
2427 struct ath5k_rate_pcal_info *rate_info,
2428 u8 ee_mode)
2429{
2430 unsigned int i;
2431 u16 *rates;
2432
2433 /* max_pwr is power level we got from driver/user in 0.5dB
2434 * units, switch to 0.25dB units so we can compare */
2435 max_pwr *= 2;
2436 max_pwr = min(max_pwr, (u16) ah->ah_txpower.txp_max_pwr) / 2;
2437
2438 /* apply rate limits */
2439 rates = ah->ah_txpower.txp_rates_power_table;
2440
2441 /* OFDM rates 6 to 24Mb/s */
2442 for (i = 0; i < 5; i++)
2443 rates[i] = min(max_pwr, rate_info->target_power_6to24);
2444
2445 /* Rest OFDM rates */
2446 rates[5] = min(rates[0], rate_info->target_power_36);
2447 rates[6] = min(rates[0], rate_info->target_power_48);
2448 rates[7] = min(rates[0], rate_info->target_power_54);
2449
2450 /* CCK rates */
2451 /* 1L */
2452 rates[8] = min(rates[0], rate_info->target_power_6to24);
2453 /* 2L */
2454 rates[9] = min(rates[0], rate_info->target_power_36);
2455 /* 2S */
2456 rates[10] = min(rates[0], rate_info->target_power_36);
2457 /* 5L */
2458 rates[11] = min(rates[0], rate_info->target_power_48);
2459 /* 5S */
2460 rates[12] = min(rates[0], rate_info->target_power_48);
2461 /* 11L */
2462 rates[13] = min(rates[0], rate_info->target_power_54);
2463 /* 11S */
2464 rates[14] = min(rates[0], rate_info->target_power_54);
2465
2466 /* XR rates */
2467 rates[15] = min(rates[0], rate_info->target_power_6to24);
2468
2469 /* CCK rates have different peak to average ratio
2470 * so we have to tweak their power so that gainf
2471 * correction works ok. For this we use OFDM to
2472 * CCK delta from eeprom */
2473 if ((ee_mode == AR5K_EEPROM_MODE_11G) &&
2474 (ah->ah_phy_revision < AR5K_SREV_PHY_5212A))
2475 for (i = 8; i <= 15; i++)
2476 rates[i] -= ah->ah_txpower.txp_cck_ofdm_gainf_delta;
2477
2478 ah->ah_txpower.txp_min_pwr = rates[7];
2479 ah->ah_txpower.txp_max_pwr = rates[0];
2480 ah->ah_txpower.txp_ofdm = rates[7];
2481}
2482
2483
2484/*
2485 * Set transmition power
2486 */
2487int
2488ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
2489 u8 ee_mode, u8 txpower)
2490{
2491 struct ath5k_rate_pcal_info rate_info;
2492 u8 type;
2493 int ret;
2494
2495 ATH5K_TRACE(ah->ah_sc);
2496 if (txpower > AR5K_TUNE_MAX_TXPOWER) {
2497 ATH5K_ERR(ah->ah_sc, "invalid tx power: %u\n", txpower);
2498 return -EINVAL;
2499 }
2500 if (txpower == 0)
2501 txpower = AR5K_TUNE_DEFAULT_TXPOWER;
2502
2503 /* Reset TX power values */
2504 memset(&ah->ah_txpower, 0, sizeof(ah->ah_txpower));
2505 ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
2506 ah->ah_txpower.txp_min_pwr = 0;
2507 ah->ah_txpower.txp_max_pwr = AR5K_TUNE_MAX_TXPOWER;
2508
2509 /* Initialize TX power table */
2510 switch (ah->ah_radio) {
2511 case AR5K_RF5111:
2512 type = AR5K_PWRTABLE_PWR_TO_PCDAC;
2513 break;
2514 case AR5K_RF5112:
2515 type = AR5K_PWRTABLE_LINEAR_PCDAC;
2516 break;
2517 case AR5K_RF2413:
2518 case AR5K_RF5413:
2519 case AR5K_RF2316:
2520 case AR5K_RF2317:
2521 case AR5K_RF2425:
2522 type = AR5K_PWRTABLE_PWR_TO_PDADC;
2523 break;
2524 default:
2525 return -EINVAL;
2526 }
2527
2528 /* FIXME: Only on channel/mode change */
2529 ret = ath5k_setup_channel_powertable(ah, channel, ee_mode, type);
2530 if (ret)
2531 return ret;
2532
2533 /* Limit max power if we have a CTL available */
2534 ath5k_get_max_ctl_power(ah, channel);
2535
2536 /* FIXME: Tx power limit for this regdomain
2537 * XXX: Mac80211/CRDA will do that anyway ? */
2538
2539 /* FIXME: Antenna reduction stuff */
2540
2541 /* FIXME: Limit power on turbo modes */
2542
2543 /* FIXME: TPC scale reduction */
2544
2545 /* Get surounding channels for per-rate power table
2546 * calibration */
2547 ath5k_get_rate_pcal_data(ah, channel, &rate_info);
2548
2549 /* Setup rate power table */
2550 ath5k_setup_rate_powertable(ah, txpower, &rate_info, ee_mode);
2551
2552 /* Write rate power table on hw */
1523 ath5k_hw_reg_write(ah, AR5K_TXPOWER_OFDM(3, 24) | 2553 ath5k_hw_reg_write(ah, AR5K_TXPOWER_OFDM(3, 24) |
1524 AR5K_TXPOWER_OFDM(2, 16) | AR5K_TXPOWER_OFDM(1, 8) | 2554 AR5K_TXPOWER_OFDM(2, 16) | AR5K_TXPOWER_OFDM(1, 8) |
1525 AR5K_TXPOWER_OFDM(0, 0), AR5K_PHY_TXPOWER_RATE1); 2555 AR5K_TXPOWER_OFDM(0, 0), AR5K_PHY_TXPOWER_RATE1);
@@ -1536,26 +2566,34 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
1536 AR5K_TXPOWER_CCK(13, 16) | AR5K_TXPOWER_CCK(12, 8) | 2566 AR5K_TXPOWER_CCK(13, 16) | AR5K_TXPOWER_CCK(12, 8) |
1537 AR5K_TXPOWER_CCK(11, 0), AR5K_PHY_TXPOWER_RATE4); 2567 AR5K_TXPOWER_CCK(11, 0), AR5K_PHY_TXPOWER_RATE4);
1538 2568
1539 if (ah->ah_txpower.txp_tpc) 2569 /* FIXME: TPC support */
2570 if (ah->ah_txpower.txp_tpc) {
1540 ath5k_hw_reg_write(ah, AR5K_PHY_TXPOWER_RATE_MAX_TPC_ENABLE | 2571 ath5k_hw_reg_write(ah, AR5K_PHY_TXPOWER_RATE_MAX_TPC_ENABLE |
1541 AR5K_TUNE_MAX_TXPOWER, AR5K_PHY_TXPOWER_RATE_MAX); 2572 AR5K_TUNE_MAX_TXPOWER, AR5K_PHY_TXPOWER_RATE_MAX);
1542 else 2573
2574 ath5k_hw_reg_write(ah,
2575 AR5K_REG_MS(AR5K_TUNE_MAX_TXPOWER, AR5K_TPC_ACK) |
2576 AR5K_REG_MS(AR5K_TUNE_MAX_TXPOWER, AR5K_TPC_CTS) |
2577 AR5K_REG_MS(AR5K_TUNE_MAX_TXPOWER, AR5K_TPC_CHIRP),
2578 AR5K_TPC);
2579 } else {
1543 ath5k_hw_reg_write(ah, AR5K_PHY_TXPOWER_RATE_MAX | 2580 ath5k_hw_reg_write(ah, AR5K_PHY_TXPOWER_RATE_MAX |
1544 AR5K_TUNE_MAX_TXPOWER, AR5K_PHY_TXPOWER_RATE_MAX); 2581 AR5K_TUNE_MAX_TXPOWER, AR5K_PHY_TXPOWER_RATE_MAX);
2582 }
1545 2583
1546 return 0; 2584 return 0;
1547} 2585}
1548 2586
1549int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, unsigned int power) 2587int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 mode, u8 txpower)
1550{ 2588{
1551 /*Just a try M.F.*/ 2589 /*Just a try M.F.*/
1552 struct ieee80211_channel *channel = &ah->ah_current_channel; 2590 struct ieee80211_channel *channel = &ah->ah_current_channel;
1553 2591
1554 ATH5K_TRACE(ah->ah_sc); 2592 ATH5K_TRACE(ah->ah_sc);
1555 ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_TXPOWER, 2593 ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_TXPOWER,
1556 "changing txpower to %d\n", power); 2594 "changing txpower to %d\n", txpower);
1557 2595
1558 return ath5k_hw_txpower(ah, channel, power); 2596 return ath5k_hw_txpower(ah, channel, mode, txpower);
1559} 2597}
1560 2598
1561#undef _ATH5K_PHY 2599#undef _ATH5K_PHY
diff --git a/drivers/net/wireless/ath5k/reg.h b/drivers/net/wireless/ath5k/reg.h
index 2dc008e10226..7070d1543cdc 100644
--- a/drivers/net/wireless/ath5k/reg.h
+++ b/drivers/net/wireless/ath5k/reg.h
@@ -1554,6 +1554,19 @@
1554/*===5212 Specific PCU registers===*/ 1554/*===5212 Specific PCU registers===*/
1555 1555
1556/* 1556/*
1557 * Transmit power control register
1558 */
1559#define AR5K_TPC 0x80e8
1560#define AR5K_TPC_ACK 0x0000003f /* ack frames */
1561#define AR5K_TPC_ACK_S 0
1562#define AR5K_TPC_CTS 0x00003f00 /* cts frames */
1563#define AR5K_TPC_CTS_S 8
1564#define AR5K_TPC_CHIRP 0x003f0000 /* chirp frames */
1565#define AR5K_TPC_CHIRP_S 16
1566#define AR5K_TPC_DOPPLER 0x0f000000 /* doppler chirp span */
1567#define AR5K_TPC_DOPPLER_S 24
1568
1569/*
1557 * XR (eXtended Range) mode register 1570 * XR (eXtended Range) mode register
1558 */ 1571 */
1559#define AR5K_XRMODE 0x80c0 /* Register Address */ 1572#define AR5K_XRMODE 0x80c0 /* Register Address */
@@ -2550,6 +2563,12 @@
2550#define AR5K_PHY_TPC_RG1 0xa258 2563#define AR5K_PHY_TPC_RG1 0xa258
2551#define AR5K_PHY_TPC_RG1_NUM_PD_GAIN 0x0000c000 2564#define AR5K_PHY_TPC_RG1_NUM_PD_GAIN 0x0000c000
2552#define AR5K_PHY_TPC_RG1_NUM_PD_GAIN_S 14 2565#define AR5K_PHY_TPC_RG1_NUM_PD_GAIN_S 14
2566#define AR5K_PHY_TPC_RG1_PDGAIN_1 0x00030000
2567#define AR5K_PHY_TPC_RG1_PDGAIN_1_S 16
2568#define AR5K_PHY_TPC_RG1_PDGAIN_2 0x000c0000
2569#define AR5K_PHY_TPC_RG1_PDGAIN_2_S 18
2570#define AR5K_PHY_TPC_RG1_PDGAIN_3 0x00300000
2571#define AR5K_PHY_TPC_RG1_PDGAIN_3_S 20
2553 2572
2554#define AR5K_PHY_TPC_RG5 0xa26C 2573#define AR5K_PHY_TPC_RG5 0xa26C
2555#define AR5K_PHY_TPC_RG5_PD_GAIN_OVERLAP 0x0000000F 2574#define AR5K_PHY_TPC_RG5_PD_GAIN_OVERLAP 0x0000000F
diff --git a/drivers/net/wireless/ath5k/reset.c b/drivers/net/wireless/ath5k/reset.c
index 685dc213edae..7a17d31b2fd9 100644
--- a/drivers/net/wireless/ath5k/reset.c
+++ b/drivers/net/wireless/ath5k/reset.c
@@ -664,29 +664,35 @@ static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
664 struct ieee80211_channel *channel, u8 *ant, u8 ee_mode) 664 struct ieee80211_channel *channel, u8 *ant, u8 ee_mode)
665{ 665{
666 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; 666 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
667 s16 cck_ofdm_pwr_delta;
667 668
668 /* Set CCK to OFDM power delta */ 669 /* Adjust power delta for channel 14 */
669 if (ah->ah_phy_revision >= AR5K_SREV_PHY_5212A) { 670 if (channel->center_freq == 2484)
670 int16_t cck_ofdm_pwr_delta; 671 cck_ofdm_pwr_delta =
671 672 ((ee->ee_cck_ofdm_power_delta -
672 /* Adjust power delta for channel 14 */ 673 ee->ee_scaled_cck_delta) * 2) / 10;
673 if (channel->center_freq == 2484) 674 else
674 cck_ofdm_pwr_delta = 675 cck_ofdm_pwr_delta =
675 ((ee->ee_cck_ofdm_power_delta - 676 (ee->ee_cck_ofdm_power_delta * 2) / 10;
676 ee->ee_scaled_cck_delta) * 2) / 10;
677 else
678 cck_ofdm_pwr_delta =
679 (ee->ee_cck_ofdm_power_delta * 2) / 10;
680 677
678 /* Set CCK to OFDM power delta on tx power
679 * adjustment register */
680 if (ah->ah_phy_revision >= AR5K_SREV_PHY_5212A) {
681 if (channel->hw_value == CHANNEL_G) 681 if (channel->hw_value == CHANNEL_G)
682 ath5k_hw_reg_write(ah, 682 ath5k_hw_reg_write(ah,
683 AR5K_REG_SM((ee->ee_cck_ofdm_power_delta * -1), 683 AR5K_REG_SM((ee->ee_cck_ofdm_gain_delta * -1),
684 AR5K_PHY_TX_PWR_ADJ_CCK_GAIN_DELTA) | 684 AR5K_PHY_TX_PWR_ADJ_CCK_GAIN_DELTA) |
685 AR5K_REG_SM((cck_ofdm_pwr_delta * -1), 685 AR5K_REG_SM((cck_ofdm_pwr_delta * -1),
686 AR5K_PHY_TX_PWR_ADJ_CCK_PCDAC_INDEX), 686 AR5K_PHY_TX_PWR_ADJ_CCK_PCDAC_INDEX),
687 AR5K_PHY_TX_PWR_ADJ); 687 AR5K_PHY_TX_PWR_ADJ);
688 else 688 else
689 ath5k_hw_reg_write(ah, 0, AR5K_PHY_TX_PWR_ADJ); 689 ath5k_hw_reg_write(ah, 0, AR5K_PHY_TX_PWR_ADJ);
690 } else {
691 /* For older revs we scale power on sw during tx power
692 * setup */
693 ah->ah_txpower.txp_cck_ofdm_pwr_delta = cck_ofdm_pwr_delta;
694 ah->ah_txpower.txp_cck_ofdm_gainf_delta =
695 ee->ee_cck_ofdm_gain_delta;
690 } 696 }
691 697
692 /* Set antenna idle switch table */ 698 /* Set antenna idle switch table */
@@ -994,7 +1000,8 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
994 /* 1000 /*
995 * Set TX power (FIXME) 1001 * Set TX power (FIXME)
996 */ 1002 */
997 ret = ath5k_hw_txpower(ah, channel, AR5K_TUNE_DEFAULT_TXPOWER); 1003 ret = ath5k_hw_txpower(ah, channel, ee_mode,
1004 AR5K_TUNE_DEFAULT_TXPOWER);
998 if (ret) 1005 if (ret)
999 return ret; 1006 return ret;
1000 1007
diff --git a/drivers/net/wireless/ath9k/ahb.c b/drivers/net/wireless/ath9k/ahb.c
index 00cc7bb01f2e..0e65c51ba176 100644
--- a/drivers/net/wireless/ath9k/ahb.c
+++ b/drivers/net/wireless/ath9k/ahb.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * Copyright (c) 2009 Gabor Juhos <juhosg@openwrt.org> 3 * Copyright (c) 2009 Gabor Juhos <juhosg@openwrt.org>
4 * Copyright (c) 2009 Imre Kaloz <kaloz@openwrt.org> 4 * Copyright (c) 2009 Imre Kaloz <kaloz@openwrt.org>
5 * 5 *
diff --git a/drivers/net/wireless/ath9k/ani.c b/drivers/net/wireless/ath9k/ani.c
index a39eb760cbb7..6c5e887d50d7 100644
--- a/drivers/net/wireless/ath9k/ani.c
+++ b/drivers/net/wireless/ath9k/ani.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/ani.h b/drivers/net/wireless/ath9k/ani.h
index 7315761f6d74..08b4e7ed5ff0 100644
--- a/drivers/net/wireless/ath9k/ani.h
+++ b/drivers/net/wireless/ath9k/ani.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/ath9k.h b/drivers/net/wireless/ath9k/ath9k.h
index b64be8e9a690..2689a08a2844 100644
--- a/drivers/net/wireless/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath9k/ath9k.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -295,13 +295,9 @@ struct ath_tx_control {
295 enum ath9k_internal_frame_type frame_type; 295 enum ath9k_internal_frame_type frame_type;
296}; 296};
297 297
298struct ath_xmit_status {
299 int retries;
300 int flags;
301#define ATH_TX_ERROR 0x01 298#define ATH_TX_ERROR 0x01
302#define ATH_TX_XRETRY 0x02 299#define ATH_TX_XRETRY 0x02
303#define ATH_TX_BAR 0x04 300#define ATH_TX_BAR 0x04
304};
305 301
306/* All RSSI values are noise floor adjusted */ 302/* All RSSI values are noise floor adjusted */
307struct ath_tx_stat { 303struct ath_tx_stat {
@@ -390,6 +386,7 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid
390 386
391struct ath_vif { 387struct ath_vif {
392 int av_bslot; 388 int av_bslot;
389 __le64 tsf_adjust; /* TSF adjustment for staggered beacons */
393 enum nl80211_iftype av_opmode; 390 enum nl80211_iftype av_opmode;
394 struct ath_buf *av_bcbuf; 391 struct ath_buf *av_bcbuf;
395 struct ath_tx_control av_btxctl; 392 struct ath_tx_control av_btxctl;
@@ -406,7 +403,7 @@ struct ath_vif {
406 * number of beacon intervals, the game's up. 403 * number of beacon intervals, the game's up.
407 */ 404 */
408#define BSTUCK_THRESH (9 * ATH_BCBUF) 405#define BSTUCK_THRESH (9 * ATH_BCBUF)
409#define ATH_BCBUF 1 406#define ATH_BCBUF 4
410#define ATH_DEFAULT_BINTVAL 100 /* TU */ 407#define ATH_DEFAULT_BINTVAL 100 /* TU */
411#define ATH_DEFAULT_BMISS_LIMIT 10 408#define ATH_DEFAULT_BMISS_LIMIT 10
412#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024) 409#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
diff --git a/drivers/net/wireless/ath9k/beacon.c b/drivers/net/wireless/ath9k/beacon.c
index 039c78136c50..ec995730632d 100644
--- a/drivers/net/wireless/ath9k/beacon.c
+++ b/drivers/net/wireless/ath9k/beacon.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -70,7 +70,8 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
70 ds = bf->bf_desc; 70 ds = bf->bf_desc;
71 flags = ATH9K_TXDESC_NOACK; 71 flags = ATH9K_TXDESC_NOACK;
72 72
73 if (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC && 73 if (((sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) ||
74 (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) &&
74 (ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)) { 75 (ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)) {
75 ds->ds_link = bf->bf_daddr; /* self-linked */ 76 ds->ds_link = bf->bf_daddr; /* self-linked */
76 flags |= ATH9K_TXDESC_VEOL; 77 flags |= ATH9K_TXDESC_VEOL;
@@ -153,6 +154,8 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
153 bf->bf_mpdu = skb; 154 bf->bf_mpdu = skb;
154 if (skb == NULL) 155 if (skb == NULL)
155 return NULL; 156 return NULL;
157 ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp =
158 avp->tsf_adjust;
156 159
157 info = IEEE80211_SKB_CB(skb); 160 info = IEEE80211_SKB_CB(skb);
158 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 161 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
@@ -253,7 +256,6 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
253{ 256{
254 struct ath_softc *sc = aphy->sc; 257 struct ath_softc *sc = aphy->sc;
255 struct ath_vif *avp; 258 struct ath_vif *avp;
256 struct ieee80211_hdr *hdr;
257 struct ath_buf *bf; 259 struct ath_buf *bf;
258 struct sk_buff *skb; 260 struct sk_buff *skb;
259 __le64 tstamp; 261 __le64 tstamp;
@@ -316,42 +318,33 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
316 318
317 tstamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp; 319 tstamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
318 sc->beacon.bc_tstamp = le64_to_cpu(tstamp); 320 sc->beacon.bc_tstamp = le64_to_cpu(tstamp);
319 321 /* Calculate a TSF adjustment factor required for staggered beacons. */
320 /*
321 * Calculate a TSF adjustment factor required for
322 * staggered beacons. Note that we assume the format
323 * of the beacon frame leaves the tstamp field immediately
324 * following the header.
325 */
326 if (avp->av_bslot > 0) { 322 if (avp->av_bslot > 0) {
327 u64 tsfadjust; 323 u64 tsfadjust;
328 __le64 val;
329 int intval; 324 int intval;
330 325
331 intval = sc->hw->conf.beacon_int ? 326 intval = sc->hw->conf.beacon_int ?
332 sc->hw->conf.beacon_int : ATH_DEFAULT_BINTVAL; 327 sc->hw->conf.beacon_int : ATH_DEFAULT_BINTVAL;
333 328
334 /* 329 /*
335 * The beacon interval is in TU's; the TSF in usecs. 330 * Calculate the TSF offset for this beacon slot, i.e., the
336 * We figure out how many TU's to add to align the 331 * number of usecs that need to be added to the timestamp field
337 * timestamp then convert to TSF units and handle 332 * in Beacon and Probe Response frames. Beacon slot 0 is
338 * byte swapping before writing it in the frame. 333 * processed at the correct offset, so it does not require TSF
339 * The hardware will then add this each time a beacon 334 * adjustment. Other slots are adjusted to get the timestamp
340 * frame is sent. Note that we align vif's 1..N 335 * close to the TBTT for the BSS.
341 * and leave vif 0 untouched. This means vap 0
342 * has a timestamp in one beacon interval while the
343 * others get a timestamp aligned to the next interval.
344 */ 336 */
345 tsfadjust = (intval * (ATH_BCBUF - avp->av_bslot)) / ATH_BCBUF; 337 tsfadjust = intval * avp->av_bslot / ATH_BCBUF;
346 val = cpu_to_le64(tsfadjust << 10); /* TU->TSF */ 338 avp->tsf_adjust = cpu_to_le64(TU_TO_USEC(tsfadjust));
347 339
348 DPRINTF(sc, ATH_DBG_BEACON, 340 DPRINTF(sc, ATH_DBG_BEACON,
349 "stagger beacons, bslot %d intval %u tsfadjust %llu\n", 341 "stagger beacons, bslot %d intval %u tsfadjust %llu\n",
350 avp->av_bslot, intval, (unsigned long long)tsfadjust); 342 avp->av_bslot, intval, (unsigned long long)tsfadjust);
351 343
352 hdr = (struct ieee80211_hdr *)skb->data; 344 ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp =
353 memcpy(&hdr[1], &val, sizeof(val)); 345 avp->tsf_adjust;
354 } 346 } else
347 avp->tsf_adjust = cpu_to_le64(0);
355 348
356 bf->bf_mpdu = skb; 349 bf->bf_mpdu = skb;
357 bf->bf_buf_addr = bf->bf_dmacontext = 350 bf->bf_buf_addr = bf->bf_dmacontext =
@@ -447,8 +440,16 @@ void ath_beacon_tasklet(unsigned long data)
447 tsf = ath9k_hw_gettsf64(ah); 440 tsf = ath9k_hw_gettsf64(ah);
448 tsftu = TSF_TO_TU(tsf>>32, tsf); 441 tsftu = TSF_TO_TU(tsf>>32, tsf);
449 slot = ((tsftu % intval) * ATH_BCBUF) / intval; 442 slot = ((tsftu % intval) * ATH_BCBUF) / intval;
450 vif = sc->beacon.bslot[(slot + 1) % ATH_BCBUF]; 443 /*
451 aphy = sc->beacon.bslot_aphy[(slot + 1) % ATH_BCBUF]; 444 * Reverse the slot order to get slot 0 on the TBTT offset that does
445 * not require TSF adjustment and other slots adding
446 * slot/ATH_BCBUF * beacon_int to timestamp. For example, with
447 * ATH_BCBUF = 4, we process beacon slots as follows: 3 2 1 0 3 2 1 ..
448 * and slot 0 is at correct offset to TBTT.
449 */
450 slot = ATH_BCBUF - slot - 1;
451 vif = sc->beacon.bslot[slot];
452 aphy = sc->beacon.bslot_aphy[slot];
452 453
453 DPRINTF(sc, ATH_DBG_BEACON, 454 DPRINTF(sc, ATH_DBG_BEACON,
454 "slot %d [tsf %llu tsftu %u intval %u] vif %p\n", 455 "slot %d [tsf %llu tsftu %u intval %u] vif %p\n",
@@ -728,6 +729,7 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
728 ath_beacon_config_ap(sc, &conf, avp); 729 ath_beacon_config_ap(sc, &conf, avp);
729 break; 730 break;
730 case NL80211_IFTYPE_ADHOC: 731 case NL80211_IFTYPE_ADHOC:
732 case NL80211_IFTYPE_MESH_POINT:
731 ath_beacon_config_adhoc(sc, &conf, avp, vif); 733 ath_beacon_config_adhoc(sc, &conf, avp, vif);
732 break; 734 break;
733 case NL80211_IFTYPE_STATION: 735 case NL80211_IFTYPE_STATION:
diff --git a/drivers/net/wireless/ath9k/calib.c b/drivers/net/wireless/ath9k/calib.c
index c9446fb6b153..e2d62e97131c 100644
--- a/drivers/net/wireless/ath9k/calib.c
+++ b/drivers/net/wireless/ath9k/calib.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/calib.h b/drivers/net/wireless/ath9k/calib.h
index 32589e0c5018..1c74bd50700d 100644
--- a/drivers/net/wireless/ath9k/calib.h
+++ b/drivers/net/wireless/ath9k/calib.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/debug.c b/drivers/net/wireless/ath9k/debug.c
index 82573cadb1ab..fdf9528fa49b 100644
--- a/drivers/net/wireless/ath9k/debug.c
+++ b/drivers/net/wireless/ath9k/debug.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/debug.h b/drivers/net/wireless/ath9k/debug.h
index 065268b8568f..7b0e5419d2bc 100644
--- a/drivers/net/wireless/ath9k/debug.h
+++ b/drivers/net/wireless/ath9k/debug.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/eeprom.c b/drivers/net/wireless/ath9k/eeprom.c
index 183c949bcca1..ffc36b0361c7 100644
--- a/drivers/net/wireless/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath9k/eeprom.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -342,8 +342,7 @@ static int ath9k_hw_4k_get_eeprom_rev(struct ath_hw *ah)
342static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah) 342static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
343{ 343{
344#define SIZE_EEPROM_4K (sizeof(struct ar5416_eeprom_4k) / sizeof(u16)) 344#define SIZE_EEPROM_4K (sizeof(struct ar5416_eeprom_4k) / sizeof(u16))
345 struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k; 345 u16 *eep_data = (u16 *)&ah->eeprom.map4k;
346 u16 *eep_data;
347 int addr, eep_start_loc = 0; 346 int addr, eep_start_loc = 0;
348 347
349 eep_start_loc = 64; 348 eep_start_loc = 64;
@@ -353,8 +352,6 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
353 "Reading from EEPROM, not flash\n"); 352 "Reading from EEPROM, not flash\n");
354 } 353 }
355 354
356 eep_data = (u16 *)eep;
357
358 for (addr = 0; addr < SIZE_EEPROM_4K; addr++) { 355 for (addr = 0; addr < SIZE_EEPROM_4K; addr++) {
359 if (!ath9k_hw_nvram_read(ah, addr + eep_start_loc, eep_data)) { 356 if (!ath9k_hw_nvram_read(ah, addr + eep_start_loc, eep_data)) {
360 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 357 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
@@ -363,6 +360,7 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
363 } 360 }
364 eep_data++; 361 eep_data++;
365 } 362 }
363
366 return true; 364 return true;
367#undef SIZE_EEPROM_4K 365#undef SIZE_EEPROM_4K
368} 366}
@@ -379,16 +377,15 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
379 377
380 378
381 if (!ath9k_hw_use_flash(ah)) { 379 if (!ath9k_hw_use_flash(ah)) {
382
383 if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET, 380 if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET,
384 &magic)) { 381 &magic)) {
385 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 382 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
386 "Reading Magic # failed\n"); 383 "Reading Magic # failed\n");
387 return false; 384 return false;
388 } 385 }
389 386
390 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 387 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
391 "Read Magic = 0x%04X\n", magic); 388 "Read Magic = 0x%04X\n", magic);
392 389
393 if (magic != AR5416_EEPROM_MAGIC) { 390 if (magic != AR5416_EEPROM_MAGIC) {
394 magic2 = swab16(magic); 391 magic2 = swab16(magic);
@@ -401,16 +398,9 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
401 temp = swab16(*eepdata); 398 temp = swab16(*eepdata);
402 *eepdata = temp; 399 *eepdata = temp;
403 eepdata++; 400 eepdata++;
404
405 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
406 "0x%04X ", *eepdata);
407
408 if (((addr + 1) % 6) == 0)
409 DPRINTF(ah->ah_sc,
410 ATH_DBG_EEPROM, "\n");
411 } 401 }
412 } else { 402 } else {
413 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 403 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
414 "Invalid EEPROM Magic. " 404 "Invalid EEPROM Magic. "
415 "endianness mismatch.\n"); 405 "endianness mismatch.\n");
416 return -EINVAL; 406 return -EINVAL;
@@ -426,7 +416,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
426 else 416 else
427 el = ah->eeprom.map4k.baseEepHeader.length; 417 el = ah->eeprom.map4k.baseEepHeader.length;
428 418
429 if (el > sizeof(struct ar5416_eeprom_def)) 419 if (el > sizeof(struct ar5416_eeprom_4k))
430 el = sizeof(struct ar5416_eeprom_4k) / sizeof(u16); 420 el = sizeof(struct ar5416_eeprom_4k) / sizeof(u16);
431 else 421 else
432 el = el / sizeof(u16); 422 el = el / sizeof(u16);
@@ -441,7 +431,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
441 u16 word; 431 u16 word;
442 432
443 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 433 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
444 "EEPROM Endianness is not native.. Changing \n"); 434 "EEPROM Endianness is not native.. Changing\n");
445 435
446 word = swab16(eep->baseEepHeader.length); 436 word = swab16(eep->baseEepHeader.length);
447 eep->baseEepHeader.length = word; 437 eep->baseEepHeader.length = word;
@@ -483,7 +473,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
483 473
484 if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER || 474 if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER ||
485 ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) { 475 ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) {
486 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 476 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
487 "Bad EEPROM checksum 0x%x or revision 0x%04x\n", 477 "Bad EEPROM checksum 0x%x or revision 0x%04x\n",
488 sum, ah->eep_ops->get_eeprom_ver(ah)); 478 sum, ah->eep_ops->get_eeprom_ver(ah));
489 return -EINVAL; 479 return -EINVAL;
@@ -1203,57 +1193,63 @@ static void ath9k_hw_4k_set_addac(struct ath_hw *ah,
1203 } 1193 }
1204} 1194}
1205 1195
1206static bool ath9k_hw_4k_set_board_values(struct ath_hw *ah, 1196static void ath9k_hw_4k_set_gain(struct ath_hw *ah,
1207 struct ath9k_channel *chan) 1197 struct modal_eep_4k_header *pModal,
1198 struct ar5416_eeprom_4k *eep,
1199 u8 txRxAttenLocal, int regChainOffset)
1208{ 1200{
1209 struct modal_eep_4k_header *pModal;
1210 struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
1211 int regChainOffset;
1212 u8 txRxAttenLocal;
1213 u8 ob[5], db1[5], db2[5];
1214 u8 ant_div_control1, ant_div_control2;
1215 u32 regVal;
1216
1217
1218 pModal = &eep->modalHeader;
1219
1220 txRxAttenLocal = 23;
1221
1222 REG_WRITE(ah, AR_PHY_SWITCH_COM,
1223 ah->eep_ops->get_eeprom_antenna_cfg(ah, chan));
1224
1225 regChainOffset = 0;
1226 REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0 + regChainOffset, 1201 REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0 + regChainOffset,
1227 pModal->antCtrlChain[0]); 1202 pModal->antCtrlChain[0]);
1228 1203
1229 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset, 1204 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset,
1230 (REG_READ(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset) & 1205 (REG_READ(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset) &
1231 ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF | 1206 ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF |
1232 AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) | 1207 AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) |
1233 SM(pModal->iqCalICh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) | 1208 SM(pModal->iqCalICh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) |
1234 SM(pModal->iqCalQCh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF)); 1209 SM(pModal->iqCalQCh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF));
1235 1210
1236 if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= 1211 if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
1237 AR5416_EEP_MINOR_VER_3) { 1212 AR5416_EEP_MINOR_VER_3) {
1238 txRxAttenLocal = pModal->txRxAttenCh[0]; 1213 txRxAttenLocal = pModal->txRxAttenCh[0];
1214
1239 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset, 1215 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
1240 AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN, pModal->bswMargin[0]); 1216 AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN, pModal->bswMargin[0]);
1241 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset, 1217 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
1242 AR_PHY_GAIN_2GHZ_XATTEN1_DB, pModal->bswAtten[0]); 1218 AR_PHY_GAIN_2GHZ_XATTEN1_DB, pModal->bswAtten[0]);
1243 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset, 1219 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
1244 AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN, 1220 AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN,
1245 pModal->xatten2Margin[0]); 1221 pModal->xatten2Margin[0]);
1246 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset, 1222 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
1247 AR_PHY_GAIN_2GHZ_XATTEN2_DB, pModal->xatten2Db[0]); 1223 AR_PHY_GAIN_2GHZ_XATTEN2_DB, pModal->xatten2Db[0]);
1248 } 1224 }
1249 1225
1250 REG_RMW_FIELD(ah, AR_PHY_RXGAIN + regChainOffset, 1226 REG_RMW_FIELD(ah, AR_PHY_RXGAIN + regChainOffset,
1251 AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal); 1227 AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal);
1252 REG_RMW_FIELD(ah, AR_PHY_RXGAIN + regChainOffset, 1228 REG_RMW_FIELD(ah, AR_PHY_RXGAIN + regChainOffset,
1253 AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[0]); 1229 AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[0]);
1254 1230
1255 if (AR_SREV_9285_11(ah)) 1231 if (AR_SREV_9285_11(ah))
1256 REG_WRITE(ah, AR9285_AN_TOP4, (AR9285_AN_TOP4_DEFAULT | 0x14)); 1232 REG_WRITE(ah, AR9285_AN_TOP4, (AR9285_AN_TOP4_DEFAULT | 0x14));
1233}
1234
1235static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
1236 struct ath9k_channel *chan)
1237{
1238 struct modal_eep_4k_header *pModal;
1239 struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
1240 u8 txRxAttenLocal;
1241 u8 ob[5], db1[5], db2[5];
1242 u8 ant_div_control1, ant_div_control2;
1243 u32 regVal;
1244
1245 pModal = &eep->modalHeader;
1246 txRxAttenLocal = 23;
1247
1248 REG_WRITE(ah, AR_PHY_SWITCH_COM,
1249 ah->eep_ops->get_eeprom_antenna_cfg(ah, chan));
1250
1251 /* Single chain for 4K EEPROM*/
1252 ath9k_hw_4k_set_gain(ah, pModal, eep, txRxAttenLocal, 0);
1257 1253
1258 /* Initialize Ant Diversity settings from EEPROM */ 1254 /* Initialize Ant Diversity settings from EEPROM */
1259 if (pModal->version == 3) { 1255 if (pModal->version == 3) {
@@ -1295,9 +1291,6 @@ static bool ath9k_hw_4k_set_board_values(struct ath_hw *ah,
1295 db2[4] = ((pModal->db2_234 >> 8) & 0xf); 1291 db2[4] = ((pModal->db2_234 >> 8) & 0xf);
1296 1292
1297 } else if (pModal->version == 1) { 1293 } else if (pModal->version == 1) {
1298
1299 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
1300 "EEPROM Model version is set to 1 \n");
1301 ob[0] = (pModal->ob_01 & 0xf); 1294 ob[0] = (pModal->ob_01 & 0xf);
1302 ob[1] = ob[2] = ob[3] = ob[4] = (pModal->ob_01 >> 4) & 0xf; 1295 ob[1] = ob[2] = ob[3] = ob[4] = (pModal->ob_01 >> 4) & 0xf;
1303 db1[0] = (pModal->db1_01 & 0xf); 1296 db1[0] = (pModal->db1_01 & 0xf);
@@ -1385,8 +1378,6 @@ static bool ath9k_hw_4k_set_board_values(struct ath_hw *ah,
1385 AR_PHY_SETTLING_SWITCH, 1378 AR_PHY_SETTLING_SWITCH,
1386 pModal->swSettleHt40); 1379 pModal->swSettleHt40);
1387 } 1380 }
1388
1389 return true;
1390} 1381}
1391 1382
1392static u16 ath9k_hw_4k_get_eeprom_antenna_cfg(struct ath_hw *ah, 1383static u16 ath9k_hw_4k_get_eeprom_antenna_cfg(struct ath_hw *ah,
@@ -1464,16 +1455,13 @@ static int ath9k_hw_def_get_eeprom_rev(struct ath_hw *ah)
1464static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah) 1455static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
1465{ 1456{
1466#define SIZE_EEPROM_DEF (sizeof(struct ar5416_eeprom_def) / sizeof(u16)) 1457#define SIZE_EEPROM_DEF (sizeof(struct ar5416_eeprom_def) / sizeof(u16))
1467 struct ar5416_eeprom_def *eep = &ah->eeprom.def; 1458 u16 *eep_data = (u16 *)&ah->eeprom.def;
1468 u16 *eep_data;
1469 int addr, ar5416_eep_start_loc = 0x100; 1459 int addr, ar5416_eep_start_loc = 0x100;
1470 1460
1471 eep_data = (u16 *)eep;
1472
1473 for (addr = 0; addr < SIZE_EEPROM_DEF; addr++) { 1461 for (addr = 0; addr < SIZE_EEPROM_DEF; addr++) {
1474 if (!ath9k_hw_nvram_read(ah, addr + ar5416_eep_start_loc, 1462 if (!ath9k_hw_nvram_read(ah, addr + ar5416_eep_start_loc,
1475 eep_data)) { 1463 eep_data)) {
1476 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 1464 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
1477 "Unable to read eeprom region\n"); 1465 "Unable to read eeprom region\n");
1478 return false; 1466 return false;
1479 } 1467 }
@@ -1492,17 +1480,14 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
1492 bool need_swap = false; 1480 bool need_swap = false;
1493 int i, addr, size; 1481 int i, addr, size;
1494 1482
1495 if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET, 1483 if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET, &magic)) {
1496 &magic)) { 1484 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, "Reading Magic # failed\n");
1497 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
1498 "Reading Magic # failed\n");
1499 return false; 1485 return false;
1500 } 1486 }
1501 1487
1502 if (!ath9k_hw_use_flash(ah)) { 1488 if (!ath9k_hw_use_flash(ah)) {
1503
1504 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 1489 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
1505 "Read Magic = 0x%04X\n", magic); 1490 "Read Magic = 0x%04X\n", magic);
1506 1491
1507 if (magic != AR5416_EEPROM_MAGIC) { 1492 if (magic != AR5416_EEPROM_MAGIC) {
1508 magic2 = swab16(magic); 1493 magic2 = swab16(magic);
@@ -1516,18 +1501,11 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
1516 temp = swab16(*eepdata); 1501 temp = swab16(*eepdata);
1517 *eepdata = temp; 1502 *eepdata = temp;
1518 eepdata++; 1503 eepdata++;
1519
1520 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
1521 "0x%04X ", *eepdata);
1522
1523 if (((addr + 1) % 6) == 0)
1524 DPRINTF(ah->ah_sc,
1525 ATH_DBG_EEPROM, "\n");
1526 } 1504 }
1527 } else { 1505 } else {
1528 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 1506 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
1529 "Invalid EEPROM Magic. " 1507 "Invalid EEPROM Magic. "
1530 "endianness mismatch.\n"); 1508 "Endianness mismatch.\n");
1531 return -EINVAL; 1509 return -EINVAL;
1532 } 1510 }
1533 } 1511 }
@@ -1556,7 +1534,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
1556 u16 word; 1534 u16 word;
1557 1535
1558 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 1536 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
1559 "EEPROM Endianness is not native.. Changing \n"); 1537 "EEPROM Endianness is not native.. Changing.\n");
1560 1538
1561 word = swab16(eep->baseEepHeader.length); 1539 word = swab16(eep->baseEepHeader.length);
1562 eep->baseEepHeader.length = word; 1540 eep->baseEepHeader.length = word;
@@ -1602,7 +1580,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
1602 1580
1603 if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER || 1581 if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER ||
1604 ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) { 1582 ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) {
1605 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 1583 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
1606 "Bad EEPROM checksum 0x%x or revision 0x%04x\n", 1584 "Bad EEPROM checksum 0x%x or revision 0x%04x\n",
1607 sum, ah->eep_ops->get_eeprom_ver(ah)); 1585 sum, ah->eep_ops->get_eeprom_ver(ah));
1608 return -EINVAL; 1586 return -EINVAL;
@@ -1614,7 +1592,6 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
1614static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah, 1592static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah,
1615 enum eeprom_param param) 1593 enum eeprom_param param)
1616{ 1594{
1617#define AR5416_VER_MASK (pBase->version & AR5416_EEP_VER_MINOR_MASK)
1618 struct ar5416_eeprom_def *eep = &ah->eeprom.def; 1595 struct ar5416_eeprom_def *eep = &ah->eeprom.def;
1619 struct modal_eep_header *pModal = eep->modalHeader; 1596 struct modal_eep_header *pModal = eep->modalHeader;
1620 struct base_eep_header *pBase = &eep->baseEepHeader; 1597 struct base_eep_header *pBase = &eep->baseEepHeader;
@@ -1681,21 +1658,73 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah,
1681 default: 1658 default:
1682 return 0; 1659 return 0;
1683 } 1660 }
1684#undef AR5416_VER_MASK
1685} 1661}
1686 1662
1687/* XXX: Clean me up, make me more legible */ 1663static void ath9k_hw_def_set_gain(struct ath_hw *ah,
1688static bool ath9k_hw_def_set_board_values(struct ath_hw *ah, 1664 struct modal_eep_header *pModal,
1665 struct ar5416_eeprom_def *eep,
1666 u8 txRxAttenLocal, int regChainOffset, int i)
1667{
1668 if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_3) {
1669 txRxAttenLocal = pModal->txRxAttenCh[i];
1670
1671 if (AR_SREV_9280_10_OR_LATER(ah)) {
1672 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
1673 AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN,
1674 pModal->bswMargin[i]);
1675 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
1676 AR_PHY_GAIN_2GHZ_XATTEN1_DB,
1677 pModal->bswAtten[i]);
1678 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
1679 AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN,
1680 pModal->xatten2Margin[i]);
1681 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
1682 AR_PHY_GAIN_2GHZ_XATTEN2_DB,
1683 pModal->xatten2Db[i]);
1684 } else {
1685 REG_WRITE(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
1686 (REG_READ(ah, AR_PHY_GAIN_2GHZ + regChainOffset) &
1687 ~AR_PHY_GAIN_2GHZ_BSW_MARGIN)
1688 | SM(pModal-> bswMargin[i],
1689 AR_PHY_GAIN_2GHZ_BSW_MARGIN));
1690 REG_WRITE(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
1691 (REG_READ(ah, AR_PHY_GAIN_2GHZ + regChainOffset) &
1692 ~AR_PHY_GAIN_2GHZ_BSW_ATTEN)
1693 | SM(pModal->bswAtten[i],
1694 AR_PHY_GAIN_2GHZ_BSW_ATTEN));
1695 }
1696 }
1697
1698 if (AR_SREV_9280_10_OR_LATER(ah)) {
1699 REG_RMW_FIELD(ah,
1700 AR_PHY_RXGAIN + regChainOffset,
1701 AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal);
1702 REG_RMW_FIELD(ah,
1703 AR_PHY_RXGAIN + regChainOffset,
1704 AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[i]);
1705 } else {
1706 REG_WRITE(ah,
1707 AR_PHY_RXGAIN + regChainOffset,
1708 (REG_READ(ah, AR_PHY_RXGAIN + regChainOffset) &
1709 ~AR_PHY_RXGAIN_TXRX_ATTEN)
1710 | SM(txRxAttenLocal, AR_PHY_RXGAIN_TXRX_ATTEN));
1711 REG_WRITE(ah,
1712 AR_PHY_GAIN_2GHZ + regChainOffset,
1713 (REG_READ(ah, AR_PHY_GAIN_2GHZ + regChainOffset) &
1714 ~AR_PHY_GAIN_2GHZ_RXTX_MARGIN) |
1715 SM(pModal->rxTxMarginCh[i], AR_PHY_GAIN_2GHZ_RXTX_MARGIN));
1716 }
1717}
1718
1719static void ath9k_hw_def_set_board_values(struct ath_hw *ah,
1689 struct ath9k_channel *chan) 1720 struct ath9k_channel *chan)
1690{ 1721{
1691#define AR5416_VER_MASK (eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK)
1692 struct modal_eep_header *pModal; 1722 struct modal_eep_header *pModal;
1693 struct ar5416_eeprom_def *eep = &ah->eeprom.def; 1723 struct ar5416_eeprom_def *eep = &ah->eeprom.def;
1694 int i, regChainOffset; 1724 int i, regChainOffset;
1695 u8 txRxAttenLocal; 1725 u8 txRxAttenLocal;
1696 1726
1697 pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]); 1727 pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]);
1698
1699 txRxAttenLocal = IS_CHAN_2GHZ(chan) ? 23 : 44; 1728 txRxAttenLocal = IS_CHAN_2GHZ(chan) ? 23 : 44;
1700 1729
1701 REG_WRITE(ah, AR_PHY_SWITCH_COM, 1730 REG_WRITE(ah, AR_PHY_SWITCH_COM,
@@ -1708,8 +1737,7 @@ static bool ath9k_hw_def_set_board_values(struct ath_hw *ah,
1708 } 1737 }
1709 1738
1710 if (AR_SREV_5416_20_OR_LATER(ah) && 1739 if (AR_SREV_5416_20_OR_LATER(ah) &&
1711 (ah->rxchainmask == 5 || ah->txchainmask == 5) 1740 (ah->rxchainmask == 5 || ah->txchainmask == 5) && (i != 0))
1712 && (i != 0))
1713 regChainOffset = (i == 1) ? 0x2000 : 0x1000; 1741 regChainOffset = (i == 1) ? 0x2000 : 0x1000;
1714 else 1742 else
1715 regChainOffset = i * 0x1000; 1743 regChainOffset = i * 0x1000;
@@ -1718,9 +1746,7 @@ static bool ath9k_hw_def_set_board_values(struct ath_hw *ah,
1718 pModal->antCtrlChain[i]); 1746 pModal->antCtrlChain[i]);
1719 1747
1720 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset, 1748 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset,
1721 (REG_READ(ah, 1749 (REG_READ(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset) &
1722 AR_PHY_TIMING_CTRL4(0) +
1723 regChainOffset) &
1724 ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF | 1750 ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF |
1725 AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) | 1751 AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) |
1726 SM(pModal->iqCalICh[i], 1752 SM(pModal->iqCalICh[i],
@@ -1728,87 +1754,9 @@ static bool ath9k_hw_def_set_board_values(struct ath_hw *ah,
1728 SM(pModal->iqCalQCh[i], 1754 SM(pModal->iqCalQCh[i],
1729 AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF)); 1755 AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF));
1730 1756
1731 if ((i == 0) || AR_SREV_5416_20_OR_LATER(ah)) { 1757 if ((i == 0) || AR_SREV_5416_20_OR_LATER(ah))
1732 if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_3) { 1758 ath9k_hw_def_set_gain(ah, pModal, eep, txRxAttenLocal,
1733 txRxAttenLocal = pModal->txRxAttenCh[i]; 1759 regChainOffset, i);
1734 if (AR_SREV_9280_10_OR_LATER(ah)) {
1735 REG_RMW_FIELD(ah,
1736 AR_PHY_GAIN_2GHZ +
1737 regChainOffset,
1738 AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN,
1739 pModal->
1740 bswMargin[i]);
1741 REG_RMW_FIELD(ah,
1742 AR_PHY_GAIN_2GHZ +
1743 regChainOffset,
1744 AR_PHY_GAIN_2GHZ_XATTEN1_DB,
1745 pModal->
1746 bswAtten[i]);
1747 REG_RMW_FIELD(ah,
1748 AR_PHY_GAIN_2GHZ +
1749 regChainOffset,
1750 AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN,
1751 pModal->
1752 xatten2Margin[i]);
1753 REG_RMW_FIELD(ah,
1754 AR_PHY_GAIN_2GHZ +
1755 regChainOffset,
1756 AR_PHY_GAIN_2GHZ_XATTEN2_DB,
1757 pModal->
1758 xatten2Db[i]);
1759 } else {
1760 REG_WRITE(ah,
1761 AR_PHY_GAIN_2GHZ +
1762 regChainOffset,
1763 (REG_READ(ah,
1764 AR_PHY_GAIN_2GHZ +
1765 regChainOffset) &
1766 ~AR_PHY_GAIN_2GHZ_BSW_MARGIN)
1767 | SM(pModal->
1768 bswMargin[i],
1769 AR_PHY_GAIN_2GHZ_BSW_MARGIN));
1770 REG_WRITE(ah,
1771 AR_PHY_GAIN_2GHZ +
1772 regChainOffset,
1773 (REG_READ(ah,
1774 AR_PHY_GAIN_2GHZ +
1775 regChainOffset) &
1776 ~AR_PHY_GAIN_2GHZ_BSW_ATTEN)
1777 | SM(pModal->bswAtten[i],
1778 AR_PHY_GAIN_2GHZ_BSW_ATTEN));
1779 }
1780 }
1781 if (AR_SREV_9280_10_OR_LATER(ah)) {
1782 REG_RMW_FIELD(ah,
1783 AR_PHY_RXGAIN +
1784 regChainOffset,
1785 AR9280_PHY_RXGAIN_TXRX_ATTEN,
1786 txRxAttenLocal);
1787 REG_RMW_FIELD(ah,
1788 AR_PHY_RXGAIN +
1789 regChainOffset,
1790 AR9280_PHY_RXGAIN_TXRX_MARGIN,
1791 pModal->rxTxMarginCh[i]);
1792 } else {
1793 REG_WRITE(ah,
1794 AR_PHY_RXGAIN + regChainOffset,
1795 (REG_READ(ah,
1796 AR_PHY_RXGAIN +
1797 regChainOffset) &
1798 ~AR_PHY_RXGAIN_TXRX_ATTEN) |
1799 SM(txRxAttenLocal,
1800 AR_PHY_RXGAIN_TXRX_ATTEN));
1801 REG_WRITE(ah,
1802 AR_PHY_GAIN_2GHZ +
1803 regChainOffset,
1804 (REG_READ(ah,
1805 AR_PHY_GAIN_2GHZ +
1806 regChainOffset) &
1807 ~AR_PHY_GAIN_2GHZ_RXTX_MARGIN) |
1808 SM(pModal->rxTxMarginCh[i],
1809 AR_PHY_GAIN_2GHZ_RXTX_MARGIN));
1810 }
1811 }
1812 } 1760 }
1813 1761
1814 if (AR_SREV_9280_10_OR_LATER(ah)) { 1762 if (AR_SREV_9280_10_OR_LATER(ah)) {
@@ -1855,8 +1803,6 @@ static bool ath9k_hw_def_set_board_values(struct ath_hw *ah,
1855 AR_AN_TOP2_LOCALBIAS, 1803 AR_AN_TOP2_LOCALBIAS,
1856 AR_AN_TOP2_LOCALBIAS_S, 1804 AR_AN_TOP2_LOCALBIAS_S,
1857 pModal->local_bias); 1805 pModal->local_bias);
1858 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, "ForceXPAon: %d\n",
1859 pModal->force_xpaon);
1860 REG_RMW_FIELD(ah, AR_PHY_XPA_CFG, AR_PHY_FORCE_XPA_CFG, 1806 REG_RMW_FIELD(ah, AR_PHY_XPA_CFG, AR_PHY_FORCE_XPA_CFG,
1861 pModal->force_xpaon); 1807 pModal->force_xpaon);
1862 } 1808 }
@@ -1882,6 +1828,7 @@ static bool ath9k_hw_def_set_board_values(struct ath_hw *ah,
1882 1828
1883 REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON, 1829 REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON,
1884 pModal->txEndToRxOn); 1830 pModal->txEndToRxOn);
1831
1885 if (AR_SREV_9280_10_OR_LATER(ah)) { 1832 if (AR_SREV_9280_10_OR_LATER(ah)) {
1886 REG_RMW_FIELD(ah, AR_PHY_CCA, AR9280_PHY_CCA_THRESH62, 1833 REG_RMW_FIELD(ah, AR_PHY_CCA, AR9280_PHY_CCA_THRESH62,
1887 pModal->thresh62); 1834 pModal->thresh62);
@@ -1912,10 +1859,10 @@ static bool ath9k_hw_def_set_board_values(struct ath_hw *ah,
1912 } 1859 }
1913 1860
1914 if (AR_SREV_9280_20_OR_LATER(ah) && 1861 if (AR_SREV_9280_20_OR_LATER(ah) &&
1915 AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_19) 1862 AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_19)
1916 REG_RMW_FIELD(ah, AR_PHY_CCK_TX_CTRL, 1863 REG_RMW_FIELD(ah, AR_PHY_CCK_TX_CTRL,
1917 AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK, 1864 AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK,
1918 pModal->miscBits); 1865 pModal->miscBits);
1919 1866
1920 1867
1921 if (AR_SREV_9280_20(ah) && AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_20) { 1868 if (AR_SREV_9280_20(ah) && AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_20) {
@@ -1926,18 +1873,15 @@ static bool ath9k_hw_def_set_board_values(struct ath_hw *ah,
1926 REG_RMW_FIELD(ah, AR_AN_TOP1, AR_AN_TOP1_DACIPMODE, 0); 1873 REG_RMW_FIELD(ah, AR_AN_TOP1, AR_AN_TOP1_DACIPMODE, 0);
1927 else 1874 else
1928 REG_RMW_FIELD(ah, AR_AN_TOP1, AR_AN_TOP1_DACIPMODE, 1875 REG_RMW_FIELD(ah, AR_AN_TOP1, AR_AN_TOP1_DACIPMODE,
1929 eep->baseEepHeader.dacLpMode); 1876 eep->baseEepHeader.dacLpMode);
1930 1877
1931 REG_RMW_FIELD(ah, AR_PHY_FRAME_CTL, AR_PHY_FRAME_CTL_TX_CLIP, 1878 REG_RMW_FIELD(ah, AR_PHY_FRAME_CTL, AR_PHY_FRAME_CTL_TX_CLIP,
1932 pModal->miscBits >> 2); 1879 pModal->miscBits >> 2);
1933 1880
1934 REG_RMW_FIELD(ah, AR_PHY_TX_PWRCTRL9, 1881 REG_RMW_FIELD(ah, AR_PHY_TX_PWRCTRL9,
1935 AR_PHY_TX_DESIRED_SCALE_CCK, 1882 AR_PHY_TX_DESIRED_SCALE_CCK,
1936 eep->baseEepHeader.desiredScaleCCK); 1883 eep->baseEepHeader.desiredScaleCCK);
1937 } 1884 }
1938
1939 return true;
1940#undef AR5416_VER_MASK
1941} 1885}
1942 1886
1943static void ath9k_hw_def_set_addac(struct ath_hw *ah, 1887static void ath9k_hw_def_set_addac(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath9k/eeprom.h b/drivers/net/wireless/ath9k/eeprom.h
index d6f6108f63c7..25b68c881ff1 100644
--- a/drivers/net/wireless/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath9k/eeprom.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -95,6 +95,7 @@
95#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5)) 95#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5))
96#define ath9k_hw_use_flash(_ah) (!(_ah->ah_flags & AH_USE_EEPROM)) 96#define ath9k_hw_use_flash(_ah) (!(_ah->ah_flags & AH_USE_EEPROM))
97 97
98#define AR5416_VER_MASK (eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK)
98#define OLC_FOR_AR9280_20_LATER (AR_SREV_9280_20_OR_LATER(ah) && \ 99#define OLC_FOR_AR9280_20_LATER (AR_SREV_9280_20_OR_LATER(ah) && \
99 ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)) 100 ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))
100 101
@@ -489,7 +490,7 @@ struct eeprom_ops {
489 u8 (*get_num_ant_config)(struct ath_hw *hw, enum ieee80211_band band); 490 u8 (*get_num_ant_config)(struct ath_hw *hw, enum ieee80211_band band);
490 u16 (*get_eeprom_antenna_cfg)(struct ath_hw *hw, 491 u16 (*get_eeprom_antenna_cfg)(struct ath_hw *hw,
491 struct ath9k_channel *chan); 492 struct ath9k_channel *chan);
492 bool (*set_board_values)(struct ath_hw *hw, struct ath9k_channel *chan); 493 void (*set_board_values)(struct ath_hw *hw, struct ath9k_channel *chan);
493 void (*set_addac)(struct ath_hw *hw, struct ath9k_channel *chan); 494 void (*set_addac)(struct ath_hw *hw, struct ath9k_channel *chan);
494 int (*set_txpower)(struct ath_hw *hw, struct ath9k_channel *chan, 495 int (*set_txpower)(struct ath_hw *hw, struct ath9k_channel *chan,
495 u16 cfgCtl, u8 twiceAntennaReduction, 496 u16 cfgCtl, u8 twiceAntennaReduction,
diff --git a/drivers/net/wireless/ath9k/hw.c b/drivers/net/wireless/ath9k/hw.c
index d494e98ba971..b15eaf8417ff 100644
--- a/drivers/net/wireless/ath9k/hw.c
+++ b/drivers/net/wireless/ath9k/hw.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -588,6 +588,10 @@ static int ath9k_hw_post_attach(struct ath_hw *ah)
588 ecode = ath9k_hw_eeprom_attach(ah); 588 ecode = ath9k_hw_eeprom_attach(ah);
589 if (ecode != 0) 589 if (ecode != 0)
590 return ecode; 590 return ecode;
591
592 DPRINTF(ah->ah_sc, ATH_DBG_CONFIG, "Eeprom VER: %d, REV: %d\n",
593 ah->eep_ops->get_eeprom_ver(ah), ah->eep_ops->get_eeprom_rev(ah));
594
591 ecode = ath9k_hw_rfattach(ah); 595 ecode = ath9k_hw_rfattach(ah);
592 if (ecode != 0) 596 if (ecode != 0)
593 return ecode; 597 return ecode;
@@ -1444,6 +1448,7 @@ static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
1444 REG_CLR_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION); 1448 REG_CLR_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
1445 break; 1449 break;
1446 case NL80211_IFTYPE_ADHOC: 1450 case NL80211_IFTYPE_ADHOC:
1451 case NL80211_IFTYPE_MESH_POINT:
1447 REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_ADHOC 1452 REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_ADHOC
1448 | AR_STA_ID1_KSRCH_MODE); 1453 | AR_STA_ID1_KSRCH_MODE);
1449 REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION); 1454 REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
@@ -2273,11 +2278,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2273 else 2278 else
2274 ath9k_hw_spur_mitigate(ah, chan); 2279 ath9k_hw_spur_mitigate(ah, chan);
2275 2280
2276 if (!ah->eep_ops->set_board_values(ah, chan)) { 2281 ah->eep_ops->set_board_values(ah, chan);
2277 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
2278 "error setting board options\n");
2279 return -EIO;
2280 }
2281 2282
2282 ath9k_hw_decrease_chain_power(ah, chan); 2283 ath9k_hw_decrease_chain_power(ah, chan);
2283 2284
@@ -3149,6 +3150,7 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
3149 flags |= AR_TBTT_TIMER_EN; 3150 flags |= AR_TBTT_TIMER_EN;
3150 break; 3151 break;
3151 case NL80211_IFTYPE_ADHOC: 3152 case NL80211_IFTYPE_ADHOC:
3153 case NL80211_IFTYPE_MESH_POINT:
3152 REG_SET_BIT(ah, AR_TXCFG, 3154 REG_SET_BIT(ah, AR_TXCFG,
3153 AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY); 3155 AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY);
3154 REG_WRITE(ah, AR_NEXT_NDP_TIMER, 3156 REG_WRITE(ah, AR_NEXT_NDP_TIMER,
diff --git a/drivers/net/wireless/ath9k/hw.h b/drivers/net/wireless/ath9k/hw.h
index dc681f011fdf..0b594e0ee260 100644
--- a/drivers/net/wireless/ath9k/hw.h
+++ b/drivers/net/wireless/ath9k/hw.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/initvals.h b/drivers/net/wireless/ath9k/initvals.h
index 1d60c3706f1c..e2f0a34b79a1 100644
--- a/drivers/net/wireless/ath9k/initvals.h
+++ b/drivers/net/wireless/ath9k/initvals.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/mac.c b/drivers/net/wireless/ath9k/mac.c
index f757bc7eec68..e0a6dee45839 100644
--- a/drivers/net/wireless/ath9k/mac.c
+++ b/drivers/net/wireless/ath9k/mac.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/mac.h b/drivers/net/wireless/ath9k/mac.h
index a75f65dae1d7..1176bce8b76c 100644
--- a/drivers/net/wireless/ath9k/mac.h
+++ b/drivers/net/wireless/ath9k/mac.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/main.c b/drivers/net/wireless/ath9k/main.c
index 8db75f6de53e..13d4e6756c99 100644
--- a/drivers/net/wireless/ath9k/main.c
+++ b/drivers/net/wireless/ath9k/main.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -940,18 +940,25 @@ static void ath_led_blink_work(struct work_struct *work)
940 940
941 if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED)) 941 if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED))
942 return; 942 return;
943 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 943
944 (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0); 944 if ((sc->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
945 (sc->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
946 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 0);
947 else
948 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN,
949 (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0);
945 950
946 queue_delayed_work(sc->hw->workqueue, &sc->ath_led_blink_work, 951 queue_delayed_work(sc->hw->workqueue, &sc->ath_led_blink_work,
947 (sc->sc_flags & SC_OP_LED_ON) ? 952 (sc->sc_flags & SC_OP_LED_ON) ?
948 msecs_to_jiffies(sc->led_off_duration) : 953 msecs_to_jiffies(sc->led_off_duration) :
949 msecs_to_jiffies(sc->led_on_duration)); 954 msecs_to_jiffies(sc->led_on_duration));
950 955
951 sc->led_on_duration = 956 sc->led_on_duration = sc->led_on_cnt ?
952 max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25); 957 max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25) :
953 sc->led_off_duration = 958 ATH_LED_ON_DURATION_IDLE;
954 max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10); 959 sc->led_off_duration = sc->led_off_cnt ?
960 max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10) :
961 ATH_LED_OFF_DURATION_IDLE;
955 sc->led_on_cnt = sc->led_off_cnt = 0; 962 sc->led_on_cnt = sc->led_off_cnt = 0;
956 if (sc->sc_flags & SC_OP_LED_ON) 963 if (sc->sc_flags & SC_OP_LED_ON)
957 sc->sc_flags &= ~SC_OP_LED_ON; 964 sc->sc_flags &= ~SC_OP_LED_ON;
@@ -1592,7 +1599,8 @@ void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
1592 hw->wiphy->interface_modes = 1599 hw->wiphy->interface_modes =
1593 BIT(NL80211_IFTYPE_AP) | 1600 BIT(NL80211_IFTYPE_AP) |
1594 BIT(NL80211_IFTYPE_STATION) | 1601 BIT(NL80211_IFTYPE_STATION) |
1595 BIT(NL80211_IFTYPE_ADHOC); 1602 BIT(NL80211_IFTYPE_ADHOC) |
1603 BIT(NL80211_IFTYPE_MESH_POINT);
1596 1604
1597 hw->wiphy->reg_notifier = ath9k_reg_notifier; 1605 hw->wiphy->reg_notifier = ath9k_reg_notifier;
1598 hw->wiphy->strict_regulatory = true; 1606 hw->wiphy->strict_regulatory = true;
@@ -2200,18 +2208,13 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
2200 ic_opmode = NL80211_IFTYPE_STATION; 2208 ic_opmode = NL80211_IFTYPE_STATION;
2201 break; 2209 break;
2202 case NL80211_IFTYPE_ADHOC: 2210 case NL80211_IFTYPE_ADHOC:
2203 if (sc->nbcnvifs >= ATH_BCBUF) {
2204 ret = -ENOBUFS;
2205 goto out;
2206 }
2207 ic_opmode = NL80211_IFTYPE_ADHOC;
2208 break;
2209 case NL80211_IFTYPE_AP: 2211 case NL80211_IFTYPE_AP:
2212 case NL80211_IFTYPE_MESH_POINT:
2210 if (sc->nbcnvifs >= ATH_BCBUF) { 2213 if (sc->nbcnvifs >= ATH_BCBUF) {
2211 ret = -ENOBUFS; 2214 ret = -ENOBUFS;
2212 goto out; 2215 goto out;
2213 } 2216 }
2214 ic_opmode = NL80211_IFTYPE_AP; 2217 ic_opmode = conf->type;
2215 break; 2218 break;
2216 default: 2219 default:
2217 DPRINTF(sc, ATH_DBG_FATAL, 2220 DPRINTF(sc, ATH_DBG_FATAL,
@@ -2247,7 +2250,8 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
2247 * Note we only do this (at the moment) for station mode. 2250 * Note we only do this (at the moment) for station mode.
2248 */ 2251 */
2249 if ((conf->type == NL80211_IFTYPE_STATION) || 2252 if ((conf->type == NL80211_IFTYPE_STATION) ||
2250 (conf->type == NL80211_IFTYPE_ADHOC)) { 2253 (conf->type == NL80211_IFTYPE_ADHOC) ||
2254 (conf->type == NL80211_IFTYPE_MESH_POINT)) {
2251 if (ath9k_hw_phycounters(sc->sc_ah)) 2255 if (ath9k_hw_phycounters(sc->sc_ah))
2252 sc->imask |= ATH9K_INT_MIB; 2256 sc->imask |= ATH9K_INT_MIB;
2253 sc->imask |= ATH9K_INT_TSFOOR; 2257 sc->imask |= ATH9K_INT_TSFOOR;
@@ -2294,8 +2298,9 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
2294 del_timer_sync(&sc->ani.timer); 2298 del_timer_sync(&sc->ani.timer);
2295 2299
2296 /* Reclaim beacon resources */ 2300 /* Reclaim beacon resources */
2297 if (sc->sc_ah->opmode == NL80211_IFTYPE_AP || 2301 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
2298 sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) { 2302 (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) ||
2303 (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) {
2299 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); 2304 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
2300 ath_beacon_return(sc, avp); 2305 ath_beacon_return(sc, avp);
2301 } 2306 }
@@ -2428,6 +2433,7 @@ static int ath9k_config_interface(struct ieee80211_hw *hw,
2428 switch (vif->type) { 2433 switch (vif->type) {
2429 case NL80211_IFTYPE_STATION: 2434 case NL80211_IFTYPE_STATION:
2430 case NL80211_IFTYPE_ADHOC: 2435 case NL80211_IFTYPE_ADHOC:
2436 case NL80211_IFTYPE_MESH_POINT:
2431 /* Set BSSID */ 2437 /* Set BSSID */
2432 memcpy(sc->curbssid, conf->bssid, ETH_ALEN); 2438 memcpy(sc->curbssid, conf->bssid, ETH_ALEN);
2433 memcpy(avp->bssid, conf->bssid, ETH_ALEN); 2439 memcpy(avp->bssid, conf->bssid, ETH_ALEN);
@@ -2451,7 +2457,8 @@ static int ath9k_config_interface(struct ieee80211_hw *hw,
2451 } 2457 }
2452 2458
2453 if ((vif->type == NL80211_IFTYPE_ADHOC) || 2459 if ((vif->type == NL80211_IFTYPE_ADHOC) ||
2454 (vif->type == NL80211_IFTYPE_AP)) { 2460 (vif->type == NL80211_IFTYPE_AP) ||
2461 (vif->type == NL80211_IFTYPE_MESH_POINT)) {
2455 if ((conf->changed & IEEE80211_IFCC_BEACON) || 2462 if ((conf->changed & IEEE80211_IFCC_BEACON) ||
2456 (conf->changed & IEEE80211_IFCC_BEACON_ENABLED && 2463 (conf->changed & IEEE80211_IFCC_BEACON_ENABLED &&
2457 conf->enable_beacon)) { 2464 conf->enable_beacon)) {
@@ -2723,7 +2730,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
2723 2730
2724 ieee80211_stop_tx_ba_cb_irqsafe(hw, sta->addr, tid); 2731 ieee80211_stop_tx_ba_cb_irqsafe(hw, sta->addr, tid);
2725 break; 2732 break;
2726 case IEEE80211_AMPDU_TX_RESUME: 2733 case IEEE80211_AMPDU_TX_OPERATIONAL:
2727 ath_tx_aggr_resume(sc, sta, tid); 2734 ath_tx_aggr_resume(sc, sta, tid);
2728 break; 2735 break;
2729 default: 2736 default:
diff --git a/drivers/net/wireless/ath9k/pci.c b/drivers/net/wireless/ath9k/pci.c
index 9a58baabb9ca..6dbc58580abb 100644
--- a/drivers/net/wireless/ath9k/pci.c
+++ b/drivers/net/wireless/ath9k/pci.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -87,7 +87,6 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
87 struct ath_softc *sc; 87 struct ath_softc *sc;
88 struct ieee80211_hw *hw; 88 struct ieee80211_hw *hw;
89 u8 csz; 89 u8 csz;
90 u32 val;
91 int ret = 0; 90 int ret = 0;
92 struct ath_hw *ah; 91 struct ath_hw *ah;
93 92
@@ -134,14 +133,6 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
134 133
135 pci_set_master(pdev); 134 pci_set_master(pdev);
136 135
137 /*
138 * Disable the RETRY_TIMEOUT register (0x41) to keep
139 * PCI Tx retries from interfering with C3 CPU state.
140 */
141 pci_read_config_dword(pdev, 0x40, &val);
142 if ((val & 0x0000ff00) != 0)
143 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
144
145 ret = pci_request_region(pdev, 0, "ath9k"); 136 ret = pci_request_region(pdev, 0, "ath9k");
146 if (ret) { 137 if (ret) {
147 dev_err(&pdev->dev, "PCI memory region reserve error\n"); 138 dev_err(&pdev->dev, "PCI memory region reserve error\n");
@@ -253,21 +244,12 @@ static int ath_pci_resume(struct pci_dev *pdev)
253 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 244 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
254 struct ath_wiphy *aphy = hw->priv; 245 struct ath_wiphy *aphy = hw->priv;
255 struct ath_softc *sc = aphy->sc; 246 struct ath_softc *sc = aphy->sc;
256 u32 val;
257 int err; 247 int err;
258 248
259 err = pci_enable_device(pdev); 249 err = pci_enable_device(pdev);
260 if (err) 250 if (err)
261 return err; 251 return err;
262 pci_restore_state(pdev); 252 pci_restore_state(pdev);
263 /*
264 * Suspend/Resume resets the PCI configuration space, so we have to
265 * re-disable the RETRY_TIMEOUT register (0x41) to keep
266 * PCI Tx retries from interfering with C3 CPU state
267 */
268 pci_read_config_dword(pdev, 0x40, &val);
269 if ((val & 0x0000ff00) != 0)
270 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
271 253
272 /* Enable LED */ 254 /* Enable LED */
273 ath9k_hw_cfg_output(sc->sc_ah, ATH_LED_PIN, 255 ath9k_hw_cfg_output(sc->sc_ah, ATH_LED_PIN,
diff --git a/drivers/net/wireless/ath9k/phy.c b/drivers/net/wireless/ath9k/phy.c
index e1494bae0f9f..8bcba906929a 100644
--- a/drivers/net/wireless/ath9k/phy.c
+++ b/drivers/net/wireless/ath9k/phy.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/phy.h b/drivers/net/wireless/ath9k/phy.h
index 1eac8c707342..0f7f8e0c9c95 100644
--- a/drivers/net/wireless/ath9k/phy.h
+++ b/drivers/net/wireless/ath9k/phy.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/rc.c b/drivers/net/wireless/ath9k/rc.c
index 832735677a46..824ccbb8b7b8 100644
--- a/drivers/net/wireless/ath9k/rc.c
+++ b/drivers/net/wireless/ath9k/rc.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004 Video54 Technologies, Inc. 2 * Copyright (c) 2004 Video54 Technologies, Inc.
3 * Copyright (c) 2004-2008 Atheros Communications, Inc. 3 * Copyright (c) 2004-2009 Atheros Communications, Inc.
4 * 4 *
5 * Permission to use, copy, modify, and/or distribute this software for any 5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above 6 * purpose with or without fee is hereby granted, provided that the above
@@ -864,6 +864,8 @@ static void ath_rc_ratefind(struct ath_softc *sc,
864 rate_table, nrix, 1, 0); 864 rate_table, nrix, 1, 0);
865 ath_rc_rate_set_series(rate_table, &rates[i++], txrc, 865 ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
866 try_per_rate, nrix, 0); 866 try_per_rate, nrix, 0);
867
868 tx_info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
867 } else { 869 } else {
868 try_per_rate = (ATH_11N_TXMAXTRY/4); 870 try_per_rate = (ATH_11N_TXMAXTRY/4);
869 /* Set the choosen rate. No RTS for first series entry. */ 871 /* Set the choosen rate. No RTS for first series entry. */
@@ -1468,16 +1470,18 @@ static void ath_rc_init(struct ath_softc *sc,
1468 ath_rc_priv->ht_cap); 1470 ath_rc_priv->ht_cap);
1469} 1471}
1470 1472
1471static u8 ath_rc_build_ht_caps(struct ath_softc *sc, bool is_ht, bool is_cw40, 1473static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta,
1472 bool is_sgi40) 1474 bool is_cw40, bool is_sgi40)
1473{ 1475{
1474 u8 caps = 0; 1476 u8 caps = 0;
1475 1477
1476 if (is_ht) { 1478 if (sta->ht_cap.ht_supported) {
1477 caps = WLAN_RC_HT_FLAG; 1479 caps = WLAN_RC_HT_FLAG;
1478 if (sc->sc_ah->caps.tx_chainmask != 1 && 1480 if (sc->sc_ah->caps.tx_chainmask != 1 &&
1479 ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_DS, 0, NULL)) 1481 ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_DS, 0, NULL)) {
1480 caps |= WLAN_RC_DS_FLAG; 1482 if (sta->ht_cap.mcs.rx_mask[1])
1483 caps |= WLAN_RC_DS_FLAG;
1484 }
1481 if (is_cw40) 1485 if (is_cw40)
1482 caps |= WLAN_RC_40_FLAG; 1486 caps |= WLAN_RC_40_FLAG;
1483 if (is_sgi40) 1487 if (is_sgi40)
@@ -1615,6 +1619,7 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
1615 /* Choose rate table first */ 1619 /* Choose rate table first */
1616 1620
1617 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) || 1621 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) ||
1622 (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT) ||
1618 (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC)) { 1623 (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC)) {
1619 rate_table = ath_choose_rate_table(sc, sband->band, 1624 rate_table = ath_choose_rate_table(sc, sband->band,
1620 sta->ht_cap.ht_supported, 1625 sta->ht_cap.ht_supported,
@@ -1624,8 +1629,7 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
1624 rate_table = sc->cur_rate_table; 1629 rate_table = sc->cur_rate_table;
1625 } 1630 }
1626 1631
1627 ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta->ht_cap.ht_supported, 1632 ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta, is_cw40, is_sgi40);
1628 is_cw40, is_sgi40);
1629 ath_rc_init(sc, priv_sta, sband, sta, rate_table); 1633 ath_rc_init(sc, priv_sta, sband, sta, rate_table);
1630} 1634}
1631 1635
@@ -1659,8 +1663,7 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
1659 rate_table = ath_choose_rate_table(sc, sband->band, 1663 rate_table = ath_choose_rate_table(sc, sband->band,
1660 sta->ht_cap.ht_supported, 1664 sta->ht_cap.ht_supported,
1661 oper_cw40); 1665 oper_cw40);
1662 ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, 1666 ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta,
1663 sta->ht_cap.ht_supported,
1664 oper_cw40, oper_sgi40); 1667 oper_cw40, oper_sgi40);
1665 ath_rc_init(sc, priv_sta, sband, sta, rate_table); 1668 ath_rc_init(sc, priv_sta, sband, sta, rate_table);
1666 1669
diff --git a/drivers/net/wireless/ath9k/rc.h b/drivers/net/wireless/ath9k/rc.h
index db9b0b9a3431..199a3ce57d64 100644
--- a/drivers/net/wireless/ath9k/rc.h
+++ b/drivers/net/wireless/ath9k/rc.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004 Sam Leffler, Errno Consulting 2 * Copyright (c) 2004 Sam Leffler, Errno Consulting
3 * Copyright (c) 2004 Video54 Technologies, Inc. 3 * Copyright (c) 2004 Video54 Technologies, Inc.
4 * Copyright (c) 2008 Atheros Communications Inc. 4 * Copyright (c) 2008-2009 Atheros Communications Inc.
5 * 5 *
6 * Permission to use, copy, modify, and/or distribute this software for any 6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above 7 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/recv.c b/drivers/net/wireless/ath9k/recv.c
index 0bba17662a1f..71cb18d6757d 100644
--- a/drivers/net/wireless/ath9k/recv.c
+++ b/drivers/net/wireless/ath9k/recv.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -344,8 +344,13 @@ void ath_rx_cleanup(struct ath_softc *sc)
344 344
345 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 345 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
346 skb = bf->bf_mpdu; 346 skb = bf->bf_mpdu;
347 if (skb) 347 if (skb) {
348 dma_unmap_single(sc->dev,
349 bf->bf_buf_addr,
350 sc->rx.bufsize,
351 DMA_FROM_DEVICE);
348 dev_kfree_skb(skb); 352 dev_kfree_skb(skb);
353 }
349 } 354 }
350 355
351 if (sc->rx.rxdma.dd_desc_len != 0) 356 if (sc->rx.rxdma.dd_desc_len != 0)
diff --git a/drivers/net/wireless/ath9k/reg.h b/drivers/net/wireless/ath9k/reg.h
index d86e90e38173..52605246679f 100644
--- a/drivers/net/wireless/ath9k/reg.h
+++ b/drivers/net/wireless/ath9k/reg.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/regd.c b/drivers/net/wireless/ath9k/regd.c
index b8f9b6d6bec4..4ca625102291 100644
--- a/drivers/net/wireless/ath9k/regd.c
+++ b/drivers/net/wireless/ath9k/regd.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/regd.h b/drivers/net/wireless/ath9k/regd.h
index 8f885f3bc8df..9f5fbd4eea7a 100644
--- a/drivers/net/wireless/ath9k/regd.h
+++ b/drivers/net/wireless/ath9k/regd.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/regd_common.h b/drivers/net/wireless/ath9k/regd_common.h
index b41d0002f3fe..4d0e298cd1c7 100644
--- a/drivers/net/wireless/ath9k/regd_common.h
+++ b/drivers/net/wireless/ath9k/regd_common.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/xmit.c b/drivers/net/wireless/ath9k/xmit.c
index e3f376611f85..689bdbf78808 100644
--- a/drivers/net/wireless/ath9k/xmit.c
+++ b/drivers/net/wireless/ath9k/xmit.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -64,6 +64,10 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
64static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, 64static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
65 struct list_head *head); 65 struct list_head *head);
66static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf); 66static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
67static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
68 int txok);
69static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds,
70 int nbad, int txok, bool update_rc);
67 71
68/*********************/ 72/*********************/
69/* Aggregation logic */ 73/* Aggregation logic */
@@ -274,9 +278,10 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
274 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf; 278 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
275 struct ath_desc *ds = bf_last->bf_desc; 279 struct ath_desc *ds = bf_last->bf_desc;
276 struct list_head bf_head, bf_pending; 280 struct list_head bf_head, bf_pending;
277 u16 seq_st = 0; 281 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
278 u32 ba[WME_BA_BMP_SIZE >> 5]; 282 u32 ba[WME_BA_BMP_SIZE >> 5];
279 int isaggr, txfail, txpending, sendbar = 0, needreset = 0; 283 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
284 bool rc_update = true;
280 285
281 skb = (struct sk_buff *)bf->bf_mpdu; 286 skb = (struct sk_buff *)bf->bf_mpdu;
282 hdr = (struct ieee80211_hdr *)skb->data; 287 hdr = (struct ieee80211_hdr *)skb->data;
@@ -316,6 +321,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
316 INIT_LIST_HEAD(&bf_pending); 321 INIT_LIST_HEAD(&bf_pending);
317 INIT_LIST_HEAD(&bf_head); 322 INIT_LIST_HEAD(&bf_head);
318 323
324 nbad = ath_tx_num_badfrms(sc, bf, txok);
319 while (bf) { 325 while (bf) {
320 txfail = txpending = 0; 326 txfail = txpending = 0;
321 bf_next = bf->bf_next; 327 bf_next = bf->bf_next;
@@ -323,8 +329,10 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
323 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) { 329 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
324 /* transmit completion, subframe is 330 /* transmit completion, subframe is
325 * acked by block ack */ 331 * acked by block ack */
332 acked_cnt++;
326 } else if (!isaggr && txok) { 333 } else if (!isaggr && txok) {
327 /* transmit completion */ 334 /* transmit completion */
335 acked_cnt++;
328 } else { 336 } else {
329 if (!(tid->state & AGGR_CLEANUP) && 337 if (!(tid->state & AGGR_CLEANUP) &&
330 ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) { 338 ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) {
@@ -335,6 +343,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
335 bf->bf_state.bf_type |= BUF_XRETRY; 343 bf->bf_state.bf_type |= BUF_XRETRY;
336 txfail = 1; 344 txfail = 1;
337 sendbar = 1; 345 sendbar = 1;
346 txfail_cnt++;
338 } 347 }
339 } else { 348 } else {
340 /* 349 /*
@@ -361,6 +370,13 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
361 ath_tx_update_baw(sc, tid, bf->bf_seqno); 370 ath_tx_update_baw(sc, tid, bf->bf_seqno);
362 spin_unlock_bh(&txq->axq_lock); 371 spin_unlock_bh(&txq->axq_lock);
363 372
373 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
374 ath_tx_rc_status(bf, ds, nbad, txok, true);
375 rc_update = false;
376 } else {
377 ath_tx_rc_status(bf, ds, nbad, txok, false);
378 }
379
364 ath_tx_complete_buf(sc, bf, &bf_head, !txfail, sendbar); 380 ath_tx_complete_buf(sc, bf, &bf_head, !txfail, sendbar);
365 } else { 381 } else {
366 /* retry the un-acked ones */ 382 /* retry the un-acked ones */
@@ -1734,7 +1750,7 @@ exit:
1734/*****************/ 1750/*****************/
1735 1751
1736static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, 1752static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1737 struct ath_xmit_status *tx_status) 1753 int tx_flags)
1738{ 1754{
1739 struct ieee80211_hw *hw = sc->hw; 1755 struct ieee80211_hw *hw = sc->hw;
1740 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1756 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -1755,18 +1771,14 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1755 tx_info->rate_driver_data[0] = NULL; 1771 tx_info->rate_driver_data[0] = NULL;
1756 } 1772 }
1757 1773
1758 if (tx_status->flags & ATH_TX_BAR) { 1774 if (tx_flags & ATH_TX_BAR)
1759 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; 1775 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1760 tx_status->flags &= ~ATH_TX_BAR;
1761 }
1762 1776
1763 if (!(tx_status->flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) { 1777 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
1764 /* Frame was ACKed */ 1778 /* Frame was ACKed */
1765 tx_info->flags |= IEEE80211_TX_STAT_ACK; 1779 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1766 } 1780 }
1767 1781
1768 tx_info->status.rates[0].count = tx_status->retries + 1;
1769
1770 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1782 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1771 padsize = hdrlen & 3; 1783 padsize = hdrlen & 3;
1772 if (padsize && hdrlen >= 24) { 1784 if (padsize && hdrlen >= 24) {
@@ -1789,29 +1801,22 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
1789 int txok, int sendbar) 1801 int txok, int sendbar)
1790{ 1802{
1791 struct sk_buff *skb = bf->bf_mpdu; 1803 struct sk_buff *skb = bf->bf_mpdu;
1792 struct ath_xmit_status tx_status;
1793 unsigned long flags; 1804 unsigned long flags;
1805 int tx_flags = 0;
1794 1806
1795 /*
1796 * Set retry information.
1797 * NB: Don't use the information in the descriptor, because the frame
1798 * could be software retried.
1799 */
1800 tx_status.retries = bf->bf_retries;
1801 tx_status.flags = 0;
1802 1807
1803 if (sendbar) 1808 if (sendbar)
1804 tx_status.flags = ATH_TX_BAR; 1809 tx_flags = ATH_TX_BAR;
1805 1810
1806 if (!txok) { 1811 if (!txok) {
1807 tx_status.flags |= ATH_TX_ERROR; 1812 tx_flags |= ATH_TX_ERROR;
1808 1813
1809 if (bf_isxretried(bf)) 1814 if (bf_isxretried(bf))
1810 tx_status.flags |= ATH_TX_XRETRY; 1815 tx_flags |= ATH_TX_XRETRY;
1811 } 1816 }
1812 1817
1813 dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE); 1818 dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
1814 ath_tx_complete(sc, skb, &tx_status); 1819 ath_tx_complete(sc, skb, tx_flags);
1815 1820
1816 /* 1821 /*
1817 * Return the list of ath_buf of this mpdu to free queue 1822 * Return the list of ath_buf of this mpdu to free queue
@@ -1852,27 +1857,40 @@ static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
1852 return nbad; 1857 return nbad;
1853} 1858}
1854 1859
1855static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds, int nbad) 1860static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds,
1861 int nbad, int txok, bool update_rc)
1856{ 1862{
1857 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu; 1863 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
1858 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1864 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1859 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1865 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1860 struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info); 1866 struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
1867 struct ieee80211_hw *hw = tx_info_priv->aphy->hw;
1868 u8 i, tx_rateindex;
1869
1870 if (txok)
1871 tx_info->status.ack_signal = ds->ds_txstat.ts_rssi;
1861 1872
1862 tx_info_priv->update_rc = false; 1873 tx_rateindex = ds->ds_txstat.ts_rateindex;
1874 WARN_ON(tx_rateindex >= hw->max_rates);
1875
1876 tx_info_priv->update_rc = update_rc;
1863 if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) 1877 if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT)
1864 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 1878 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1865 1879
1866 if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 && 1880 if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
1867 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) { 1881 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
1868 if (ieee80211_is_data(hdr->frame_control)) { 1882 if (ieee80211_is_data(hdr->frame_control)) {
1869 memcpy(&tx_info_priv->tx, &ds->ds_txstat, 1883 memcpy(&tx_info_priv->tx, &ds->ds_txstat,
1870 sizeof(tx_info_priv->tx)); 1884 sizeof(tx_info_priv->tx));
1871 tx_info_priv->n_frames = bf->bf_nframes; 1885 tx_info_priv->n_frames = bf->bf_nframes;
1872 tx_info_priv->n_bad_frames = nbad; 1886 tx_info_priv->n_bad_frames = nbad;
1873 tx_info_priv->update_rc = true;
1874 } 1887 }
1875 } 1888 }
1889
1890 for (i = tx_rateindex + 1; i < hw->max_rates; i++)
1891 tx_info->status.rates[i].count = 0;
1892
1893 tx_info->status.rates[tx_rateindex].count = bf->bf_retries + 1;
1876} 1894}
1877 1895
1878static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq) 1896static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
@@ -1897,7 +1915,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1897 struct ath_buf *bf, *lastbf, *bf_held = NULL; 1915 struct ath_buf *bf, *lastbf, *bf_held = NULL;
1898 struct list_head bf_head; 1916 struct list_head bf_head;
1899 struct ath_desc *ds; 1917 struct ath_desc *ds;
1900 int txok, nbad = 0; 1918 int txok;
1901 int status; 1919 int status;
1902 1920
1903 DPRINTF(sc, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n", 1921 DPRINTF(sc, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
@@ -1991,13 +2009,9 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1991 bf->bf_retries = ds->ds_txstat.ts_longretry; 2009 bf->bf_retries = ds->ds_txstat.ts_longretry;
1992 if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY) 2010 if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
1993 bf->bf_state.bf_type |= BUF_XRETRY; 2011 bf->bf_state.bf_type |= BUF_XRETRY;
1994 nbad = 0; 2012 ath_tx_rc_status(bf, ds, 0, txok, true);
1995 } else {
1996 nbad = ath_tx_num_badfrms(sc, bf, txok);
1997 } 2013 }
1998 2014
1999 ath_tx_rc_status(bf, ds, nbad);
2000
2001 if (bf_isampdu(bf)) 2015 if (bf_isampdu(bf))
2002 ath_tx_complete_aggr(sc, txq, bf, &bf_head, txok); 2016 ath_tx_complete_aggr(sc, txq, bf, &bf_head, txok);
2003 else 2017 else
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index b72ef3fd315a..4896e0831114 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -3993,6 +3993,8 @@ static void setup_struct_wldev_for_init(struct b43_wldev *dev)
3993 dev->irq_reason = 0; 3993 dev->irq_reason = 0;
3994 memset(dev->dma_reason, 0, sizeof(dev->dma_reason)); 3994 memset(dev->dma_reason, 0, sizeof(dev->dma_reason));
3995 dev->irq_savedstate = B43_IRQ_MASKTEMPLATE; 3995 dev->irq_savedstate = B43_IRQ_MASKTEMPLATE;
3996 if (b43_modparam_verbose < B43_VERBOSITY_DEBUG)
3997 dev->irq_savedstate &= ~B43_IRQ_PHY_TXERR;
3996 3998
3997 dev->mac_suspended = 1; 3999 dev->mac_suspended = 1;
3998 4000
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 0f53c7e5e01e..a63d88841df8 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -50,7 +50,7 @@ static int b43_plcp_get_bitrate_idx_cck(struct b43_plcp_hdr6 *plcp)
50} 50}
51 51
52/* Extract the bitrate index out of an OFDM PLCP header. */ 52/* Extract the bitrate index out of an OFDM PLCP header. */
53static u8 b43_plcp_get_bitrate_idx_ofdm(struct b43_plcp_hdr6 *plcp, bool aphy) 53static int b43_plcp_get_bitrate_idx_ofdm(struct b43_plcp_hdr6 *plcp, bool aphy)
54{ 54{
55 int base = aphy ? 0 : 4; 55 int base = aphy ? 0 : 4;
56 56
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
index 205603d082aa..73f93a0ff2df 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
@@ -233,7 +233,7 @@ struct iwl3945_eeprom {
233#define PCI_CFG_REV_ID_BIT_RTP (0x80) /* bit 7 */ 233#define PCI_CFG_REV_ID_BIT_RTP (0x80) /* bit 7 */
234 234
235#define TFD_QUEUE_MIN 0 235#define TFD_QUEUE_MIN 0
236#define TFD_QUEUE_MAX 6 236#define TFD_QUEUE_MAX 5 /* 4 DATA + 1 CMD */
237 237
238#define IWL_NUM_SCAN_RATES (2) 238#define IWL_NUM_SCAN_RATES (2)
239 239
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
index f65c308a6714..af6b9d444778 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -124,7 +124,7 @@ static struct iwl3945_tpt_entry iwl3945_tpt_table_g[] = {
124#define IWL39_RATE_HIGH_TH 11520 124#define IWL39_RATE_HIGH_TH 11520
125#define IWL_SUCCESS_UP_TH 8960 125#define IWL_SUCCESS_UP_TH 8960
126#define IWL_SUCCESS_DOWN_TH 10880 126#define IWL_SUCCESS_DOWN_TH 10880
127#define IWL_RATE_MIN_FAILURE_TH 8 127#define IWL_RATE_MIN_FAILURE_TH 6
128#define IWL_RATE_MIN_SUCCESS_TH 8 128#define IWL_RATE_MIN_SUCCESS_TH 8
129#define IWL_RATE_DECREASE_TH 1920 129#define IWL_RATE_DECREASE_TH 1920
130#define IWL_RATE_RETRY_TH 15 130#define IWL_RATE_RETRY_TH 15
@@ -488,7 +488,7 @@ static void rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband
488 488
489 IWL_DEBUG_RATE(priv, "enter\n"); 489 IWL_DEBUG_RATE(priv, "enter\n");
490 490
491 retries = info->status.rates[0].count - 1; 491 retries = info->status.rates[0].count;
492 /* Sanity Check for retries */ 492 /* Sanity Check for retries */
493 if (retries > IWL_RATE_RETRY_TH) 493 if (retries > IWL_RATE_RETRY_TH)
494 retries = IWL_RATE_RETRY_TH; 494 retries = IWL_RATE_RETRY_TH;
@@ -791,16 +791,15 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
791 if ((window->success_ratio < IWL_RATE_DECREASE_TH) || !current_tpt) { 791 if ((window->success_ratio < IWL_RATE_DECREASE_TH) || !current_tpt) {
792 IWL_DEBUG_RATE(priv, "decrease rate because of low success_ratio\n"); 792 IWL_DEBUG_RATE(priv, "decrease rate because of low success_ratio\n");
793 scale_action = -1; 793 scale_action = -1;
794
795 /* No throughput measured yet for adjacent rates, 794 /* No throughput measured yet for adjacent rates,
796 * try increase */ 795 * try increase */
797 } else if ((low_tpt == IWL_INVALID_VALUE) && 796 } else if ((low_tpt == IWL_INVALID_VALUE) &&
798 (high_tpt == IWL_INVALID_VALUE)) { 797 (high_tpt == IWL_INVALID_VALUE)) {
799 798
800 if (high != IWL_RATE_INVALID && window->success_counter >= IWL_RATE_INCREASE_TH) 799 if (high != IWL_RATE_INVALID && window->success_ratio >= IWL_RATE_INCREASE_TH)
801 scale_action = 1; 800 scale_action = 1;
802 else if (low != IWL_RATE_INVALID) 801 else if (low != IWL_RATE_INVALID)
803 scale_action = -1; 802 scale_action = 0;
804 803
805 /* Both adjacent throughputs are measured, but neither one has 804 /* Both adjacent throughputs are measured, but neither one has
806 * better throughput; we're using the best rate, don't change 805 * better throughput; we're using the best rate, don't change
@@ -826,14 +825,14 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
826 else { 825 else {
827 IWL_DEBUG_RATE(priv, 826 IWL_DEBUG_RATE(priv,
828 "decrease rate because of high tpt\n"); 827 "decrease rate because of high tpt\n");
829 scale_action = -1; 828 scale_action = 0;
830 } 829 }
831 } else if (low_tpt != IWL_INVALID_VALUE) { 830 } else if (low_tpt != IWL_INVALID_VALUE) {
832 if (low_tpt > current_tpt) { 831 if (low_tpt > current_tpt) {
833 IWL_DEBUG_RATE(priv, 832 IWL_DEBUG_RATE(priv,
834 "decrease rate because of low tpt\n"); 833 "decrease rate because of low tpt\n");
835 scale_action = -1; 834 scale_action = -1;
836 } else if (window->success_counter >= IWL_RATE_INCREASE_TH) { 835 } else if (window->success_ratio >= IWL_RATE_INCREASE_TH) {
837 /* Lower rate has better 836 /* Lower rate has better
838 * throughput,decrease rate */ 837 * throughput,decrease rate */
839 scale_action = 1; 838 scale_action = 1;
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index ba7e720e73c1..2399328e8de7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -293,7 +293,7 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
293 if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) && 293 if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) &&
294 (txq_id != IWL_CMD_QUEUE_NUM) && 294 (txq_id != IWL_CMD_QUEUE_NUM) &&
295 priv->mac80211_registered) 295 priv->mac80211_registered)
296 ieee80211_wake_queue(priv->hw, txq_id); 296 iwl_wake_queue(priv, txq_id);
297} 297}
298 298
299/** 299/**
@@ -747,11 +747,6 @@ void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
747 int i; 747 int i;
748 int counter; 748 int counter;
749 749
750 /* classify bd */
751 if (txq->q.id == IWL_CMD_QUEUE_NUM)
752 /* nothing to cleanup after for host commands */
753 return;
754
755 /* sanity check */ 750 /* sanity check */
756 counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags)); 751 counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
757 if (counter > NUM_TFD_CHUNKS) { 752 if (counter > NUM_TFD_CHUNKS) {
@@ -1046,7 +1041,7 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
1046 goto error; 1041 goto error;
1047 1042
1048 /* Tx queue(s) */ 1043 /* Tx queue(s) */
1049 for (txq_id = 0; txq_id < TFD_QUEUE_MAX; txq_id++) { 1044 for (txq_id = 0; txq_id <= priv->hw_params.max_txq_num; txq_id++) {
1050 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ? 1045 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
1051 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 1046 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
1052 rc = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num, 1047 rc = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
@@ -1184,7 +1179,7 @@ int iwl3945_hw_nic_init(struct iwl_priv *priv)
1184 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id); 1179 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id);
1185 1180
1186 rc = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN); 1181 rc = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN);
1187 if(rc) 1182 if (rc)
1188 return rc; 1183 return rc;
1189 1184
1190 priv->cfg->ops->lib->apm_ops.config(priv); 1185 priv->cfg->ops->lib->apm_ops.config(priv);
@@ -1239,8 +1234,12 @@ void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv)
1239 int txq_id; 1234 int txq_id;
1240 1235
1241 /* Tx queues */ 1236 /* Tx queues */
1242 for (txq_id = 0; txq_id < TFD_QUEUE_MAX; txq_id++) 1237 for (txq_id = 0; txq_id <= priv->hw_params.max_txq_num; txq_id++)
1243 iwl_tx_queue_free(priv, txq_id); 1238 if (txq_id == IWL_CMD_QUEUE_NUM)
1239 iwl_cmd_queue_free(priv);
1240 else
1241 iwl_tx_queue_free(priv, txq_id);
1242
1244} 1243}
1245 1244
1246void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv) 1245void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
@@ -1259,7 +1258,7 @@ void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
1259 iwl_write_prph(priv, ALM_SCD_MODE_REG, 0); 1258 iwl_write_prph(priv, ALM_SCD_MODE_REG, 0);
1260 1259
1261 /* reset TFD queues */ 1260 /* reset TFD queues */
1262 for (txq_id = 0; txq_id < TFD_QUEUE_MAX; txq_id++) { 1261 for (txq_id = 0; txq_id <= priv->hw_params.max_txq_num; txq_id++) {
1263 iwl_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0); 1262 iwl_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0);
1264 iwl_poll_direct_bit(priv, FH39_TSSR_TX_STATUS, 1263 iwl_poll_direct_bit(priv, FH39_TSSR_TX_STATUS,
1265 FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id), 1264 FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
@@ -2488,6 +2487,9 @@ int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
2488 return -ENOMEM; 2487 return -ENOMEM;
2489 } 2488 }
2490 2489
2490 /* Assign number of Usable TX queues */
2491 priv->hw_params.max_txq_num = TFD_QUEUE_MAX;
2492
2491 priv->hw_params.tfd_size = sizeof(struct iwl3945_tfd); 2493 priv->hw_params.tfd_size = sizeof(struct iwl3945_tfd);
2492 priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_3K; 2494 priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_3K;
2493 priv->hw_params.max_pkt_size = 2342; 2495 priv->hw_params.max_pkt_size = 2342;
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index bd0140be774e..847a6220c5e6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -2178,10 +2178,9 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2178 (iwl_queue_space(&txq->q) > txq->q.low_mark) && 2178 (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
2179 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) { 2179 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) {
2180 if (agg->state == IWL_AGG_OFF) 2180 if (agg->state == IWL_AGG_OFF)
2181 ieee80211_wake_queue(priv->hw, txq_id); 2181 iwl_wake_queue(priv, txq_id);
2182 else 2182 else
2183 ieee80211_wake_queue(priv->hw, 2183 iwl_wake_queue(priv, txq->swq_id);
2184 txq->swq_id);
2185 } 2184 }
2186 } 2185 }
2187 } else { 2186 } else {
@@ -2205,7 +2204,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2205 2204
2206 if (priv->mac80211_registered && 2205 if (priv->mac80211_registered &&
2207 (iwl_queue_space(&txq->q) > txq->q.low_mark)) 2206 (iwl_queue_space(&txq->q) > txq->q.low_mark))
2208 ieee80211_wake_queue(priv->hw, txq_id); 2207 iwl_wake_queue(priv, txq_id);
2209 } 2208 }
2210 2209
2211 if (qc && likely(sta_id != IWL_INVALID_STATION)) 2210 if (qc && likely(sta_id != IWL_INVALID_STATION))
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 08c19bea71e3..e5ca2511a81a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -1077,7 +1077,7 @@ static int iwl5000_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
1077 1077
1078 if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) || 1078 if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) ||
1079 (IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES <= txq_id)) { 1079 (IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES <= txq_id)) {
1080 IWL_WARN(priv, 1080 IWL_ERR(priv,
1081 "queue number out of range: %d, must be %d to %d\n", 1081 "queue number out of range: %d, must be %d to %d\n",
1082 txq_id, IWL50_FIRST_AMPDU_QUEUE, 1082 txq_id, IWL50_FIRST_AMPDU_QUEUE,
1083 IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES - 1); 1083 IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES - 1);
@@ -1295,10 +1295,9 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
1295 (iwl_queue_space(&txq->q) > txq->q.low_mark) && 1295 (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
1296 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) { 1296 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) {
1297 if (agg->state == IWL_AGG_OFF) 1297 if (agg->state == IWL_AGG_OFF)
1298 ieee80211_wake_queue(priv->hw, txq_id); 1298 iwl_wake_queue(priv, txq_id);
1299 else 1299 else
1300 ieee80211_wake_queue(priv->hw, 1300 iwl_wake_queue(priv, txq->swq_id);
1301 txq->swq_id);
1302 } 1301 }
1303 } 1302 }
1304 } else { 1303 } else {
@@ -1324,7 +1323,7 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
1324 1323
1325 if (priv->mac80211_registered && 1324 if (priv->mac80211_registered &&
1326 (iwl_queue_space(&txq->q) > txq->q.low_mark)) 1325 (iwl_queue_space(&txq->q) > txq->q.low_mark))
1327 ieee80211_wake_queue(priv->hw, txq_id); 1326 iwl_wake_queue(priv, txq_id);
1328 } 1327 }
1329 1328
1330 if (ieee80211_is_data_qos(tx_resp->frame_ctrl)) 1329 if (ieee80211_is_data_qos(tx_resp->frame_ctrl))
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 0db3bc011ac2..663dc83be501 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -1567,9 +1567,8 @@ static void iwl_alive_start(struct iwl_priv *priv)
1567 if (iwl_is_associated(priv)) { 1567 if (iwl_is_associated(priv)) {
1568 struct iwl_rxon_cmd *active_rxon = 1568 struct iwl_rxon_cmd *active_rxon =
1569 (struct iwl_rxon_cmd *)&priv->active_rxon; 1569 (struct iwl_rxon_cmd *)&priv->active_rxon;
1570 1570 /* apply any changes in staging */
1571 memcpy(&priv->staging_rxon, &priv->active_rxon, 1571 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
1572 sizeof(priv->staging_rxon));
1573 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 1572 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1574 } else { 1573 } else {
1575 /* Initialize our rx_config data */ 1574 /* Initialize our rx_config data */
@@ -2184,110 +2183,112 @@ static int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2184 struct iwl_priv *priv = hw->priv; 2183 struct iwl_priv *priv = hw->priv;
2185 const struct iwl_channel_info *ch_info; 2184 const struct iwl_channel_info *ch_info;
2186 struct ieee80211_conf *conf = &hw->conf; 2185 struct ieee80211_conf *conf = &hw->conf;
2187 unsigned long flags; 2186 unsigned long flags = 0;
2188 int ret = 0; 2187 int ret = 0;
2189 u16 channel; 2188 u16 ch;
2189 int scan_active = 0;
2190 2190
2191 mutex_lock(&priv->mutex); 2191 mutex_lock(&priv->mutex);
2192 IWL_DEBUG_MAC80211(priv, "enter to channel %d\n", conf->channel->hw_value); 2192 IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
2193 conf->channel->hw_value, changed);
2193 2194
2194 priv->current_ht_config.is_ht = conf_is_ht(conf); 2195 if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
2195 2196 test_bit(STATUS_SCANNING, &priv->status))) {
2196 if (conf->radio_enabled && iwl_radio_kill_sw_enable_radio(priv)) { 2197 scan_active = 1;
2197 IWL_DEBUG_MAC80211(priv, "leave - RF-KILL - waiting for uCode\n"); 2198 IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
2198 goto out;
2199 } 2199 }
2200 2200
2201 if (!conf->radio_enabled)
2202 iwl_radio_kill_sw_disable_radio(priv);
2203 2201
2204 if (!iwl_is_ready(priv)) { 2202 /* during scanning mac80211 will delay channel setting until
2205 IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); 2203 * scan finish with changed = 0
2206 ret = -EIO; 2204 */
2207 goto out; 2205 if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
2208 } 2206 if (scan_active)
2207 goto set_ch_out;
2208
2209 ch = ieee80211_frequency_to_channel(conf->channel->center_freq);
2210 ch_info = iwl_get_channel_info(priv, conf->channel->band, ch);
2211 if (!is_channel_valid(ch_info)) {
2212 IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
2213 ret = -EINVAL;
2214 goto set_ch_out;
2215 }
2209 2216
2210 if (unlikely(!priv->cfg->mod_params->disable_hw_scan && 2217 if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
2211 test_bit(STATUS_SCANNING, &priv->status))) { 2218 !is_channel_ibss(ch_info)) {
2212 IWL_DEBUG_MAC80211(priv, "leave - scanning\n"); 2219 IWL_ERR(priv, "channel %d in band %d not "
2213 mutex_unlock(&priv->mutex); 2220 "IBSS channel\n",
2214 return 0; 2221 conf->channel->hw_value, conf->channel->band);
2215 } 2222 ret = -EINVAL;
2223 goto set_ch_out;
2224 }
2216 2225
2217 channel = ieee80211_frequency_to_channel(conf->channel->center_freq); 2226 priv->current_ht_config.is_ht = conf_is_ht(conf);
2218 ch_info = iwl_get_channel_info(priv, conf->channel->band, channel);
2219 if (!is_channel_valid(ch_info)) {
2220 IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
2221 ret = -EINVAL;
2222 goto out;
2223 }
2224 2227
2225 if (priv->iw_mode == NL80211_IFTYPE_ADHOC && 2228 spin_lock_irqsave(&priv->lock, flags);
2226 !is_channel_ibss(ch_info)) {
2227 IWL_ERR(priv, "channel %d in band %d not IBSS channel\n",
2228 conf->channel->hw_value, conf->channel->band);
2229 ret = -EINVAL;
2230 goto out;
2231 }
2232 2229
2233 spin_lock_irqsave(&priv->lock, flags);
2234 2230
2231 /* if we are switching from ht to 2.4 clear flags
2232 * from any ht related info since 2.4 does not
2233 * support ht */
2234 if ((le16_to_cpu(priv->staging_rxon.channel) != ch))
2235 priv->staging_rxon.flags = 0;
2235 2236
2236 /* if we are switching from ht to 2.4 clear flags 2237 iwl_set_rxon_channel(priv, conf->channel);
2237 * from any ht related info since 2.4 does not
2238 * support ht */
2239 if ((le16_to_cpu(priv->staging_rxon.channel) != channel)
2240#ifdef IEEE80211_CONF_CHANNEL_SWITCH
2241 && !(conf->flags & IEEE80211_CONF_CHANNEL_SWITCH)
2242#endif
2243 )
2244 priv->staging_rxon.flags = 0;
2245 2238
2246 iwl_set_rxon_channel(priv, conf->channel); 2239 iwl_set_flags_for_band(priv, conf->channel->band);
2240 spin_unlock_irqrestore(&priv->lock, flags);
2241 set_ch_out:
2242 /* The list of supported rates and rate mask can be different
2243 * for each band; since the band may have changed, reset
2244 * the rate mask to what mac80211 lists */
2245 iwl_set_rate(priv);
2246 }
2247 2247
2248 iwl_set_flags_for_band(priv, conf->channel->band); 2248 if (changed & IEEE80211_CONF_CHANGE_PS) {
2249 if (conf->flags & IEEE80211_CONF_PS)
2250 ret = iwl_power_set_user_mode(priv, IWL_POWER_INDEX_3);
2251 else
2252 ret = iwl_power_set_user_mode(priv, IWL_POWER_MODE_CAM);
2253 if (ret)
2254 IWL_DEBUG_MAC80211(priv, "Error setting power level\n");
2249 2255
2250 /* The list of supported rates and rate mask can be different 2256 }
2251 * for each band; since the band may have changed, reset
2252 * the rate mask to what mac80211 lists */
2253 iwl_set_rate(priv);
2254 2257
2255 spin_unlock_irqrestore(&priv->lock, flags); 2258 if (changed & IEEE80211_CONF_CHANGE_POWER) {
2259 IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
2260 priv->tx_power_user_lmt, conf->power_level);
2256 2261
2257#ifdef IEEE80211_CONF_CHANNEL_SWITCH 2262 iwl_set_tx_power(priv, conf->power_level, false);
2258 if (conf->flags & IEEE80211_CONF_CHANNEL_SWITCH) { 2263 }
2259 iwl_hw_channel_switch(priv, conf->channel); 2264
2260 goto out; 2265 /* call to ensure that 4965 rx_chain is set properly in monitor mode */
2266 iwl_set_rxon_chain(priv);
2267
2268 if (changed & IEEE80211_CONF_CHANGE_RADIO_ENABLED) {
2269 if (conf->radio_enabled &&
2270 iwl_radio_kill_sw_enable_radio(priv)) {
2271 IWL_DEBUG_MAC80211(priv, "leave - RF-KILL - "
2272 "waiting for uCode\n");
2273 goto out;
2274 }
2275
2276 if (!conf->radio_enabled)
2277 iwl_radio_kill_sw_disable_radio(priv);
2261 } 2278 }
2262#endif
2263 2279
2264 if (!conf->radio_enabled) { 2280 if (!conf->radio_enabled) {
2265 IWL_DEBUG_MAC80211(priv, "leave - radio disabled\n"); 2281 IWL_DEBUG_MAC80211(priv, "leave - radio disabled\n");
2266 goto out; 2282 goto out;
2267 } 2283 }
2268 2284
2269 if (iwl_is_rfkill(priv)) { 2285 if (!iwl_is_ready(priv)) {
2270 IWL_DEBUG_MAC80211(priv, "leave - RF kill\n"); 2286 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2271 ret = -EIO;
2272 goto out; 2287 goto out;
2273 } 2288 }
2274 2289
2275 if (conf->flags & IEEE80211_CONF_PS) 2290 if (scan_active)
2276 ret = iwl_power_set_user_mode(priv, IWL_POWER_INDEX_3); 2291 goto out;
2277 else
2278 ret = iwl_power_set_user_mode(priv, IWL_POWER_MODE_CAM);
2279 if (ret)
2280 IWL_DEBUG_MAC80211(priv, "Error setting power level\n");
2281
2282 IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
2283 priv->tx_power_user_lmt, conf->power_level);
2284
2285 iwl_set_tx_power(priv, conf->power_level, false);
2286
2287 iwl_set_rate(priv);
2288
2289 /* call to ensure that 4965 rx_chain is set properly in monitor mode */
2290 iwl_set_rxon_chain(priv);
2291 2292
2292 if (memcmp(&priv->active_rxon, 2293 if (memcmp(&priv->active_rxon,
2293 &priv->staging_rxon, sizeof(priv->staging_rxon))) 2294 &priv->staging_rxon, sizeof(priv->staging_rxon)))
@@ -2295,9 +2296,9 @@ static int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2295 else 2296 else
2296 IWL_DEBUG_INFO(priv, "No re-sending same RXON configuration.\n"); 2297 IWL_DEBUG_INFO(priv, "No re-sending same RXON configuration.\n");
2297 2298
2298 IWL_DEBUG_MAC80211(priv, "leave\n");
2299 2299
2300out: 2300out:
2301 IWL_DEBUG_MAC80211(priv, "leave\n");
2301 mutex_unlock(&priv->mutex); 2302 mutex_unlock(&priv->mutex);
2302 return ret; 2303 return ret;
2303} 2304}
@@ -2682,6 +2683,7 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
2682 struct ieee80211_sta *sta, u16 tid, u16 *ssn) 2683 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2683{ 2684{
2684 struct iwl_priv *priv = hw->priv; 2685 struct iwl_priv *priv = hw->priv;
2686 int ret;
2685 2687
2686 IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n", 2688 IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
2687 sta->addr, tid); 2689 sta->addr, tid);
@@ -2695,13 +2697,21 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
2695 return iwl_sta_rx_agg_start(priv, sta->addr, tid, *ssn); 2697 return iwl_sta_rx_agg_start(priv, sta->addr, tid, *ssn);
2696 case IEEE80211_AMPDU_RX_STOP: 2698 case IEEE80211_AMPDU_RX_STOP:
2697 IWL_DEBUG_HT(priv, "stop Rx\n"); 2699 IWL_DEBUG_HT(priv, "stop Rx\n");
2698 return iwl_sta_rx_agg_stop(priv, sta->addr, tid); 2700 ret = iwl_sta_rx_agg_stop(priv, sta->addr, tid);
2701 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2702 return 0;
2703 else
2704 return ret;
2699 case IEEE80211_AMPDU_TX_START: 2705 case IEEE80211_AMPDU_TX_START:
2700 IWL_DEBUG_HT(priv, "start Tx\n"); 2706 IWL_DEBUG_HT(priv, "start Tx\n");
2701 return iwl_tx_agg_start(priv, sta->addr, tid, ssn); 2707 return iwl_tx_agg_start(priv, sta->addr, tid, ssn);
2702 case IEEE80211_AMPDU_TX_STOP: 2708 case IEEE80211_AMPDU_TX_STOP:
2703 IWL_DEBUG_HT(priv, "stop Tx\n"); 2709 IWL_DEBUG_HT(priv, "stop Tx\n");
2704 return iwl_tx_agg_stop(priv, sta->addr, tid); 2710 ret = iwl_tx_agg_stop(priv, sta->addr, tid);
2711 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2712 return 0;
2713 else
2714 return ret;
2705 default: 2715 default:
2706 IWL_DEBUG_HT(priv, "unknown\n"); 2716 IWL_DEBUG_HT(priv, "unknown\n");
2707 return -EINVAL; 2717 return -EINVAL;
@@ -3083,11 +3093,6 @@ static ssize_t store_power_level(struct device *d,
3083 3093
3084 mutex_lock(&priv->mutex); 3094 mutex_lock(&priv->mutex);
3085 3095
3086 if (!iwl_is_ready(priv)) {
3087 ret = -EAGAIN;
3088 goto out;
3089 }
3090
3091 ret = strict_strtoul(buf, 10, &mode); 3096 ret = strict_strtoul(buf, 10, &mode);
3092 if (ret) 3097 if (ret)
3093 goto out; 3098 goto out;
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 085e9cf1cac9..c54fb93e9d72 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -1298,6 +1298,7 @@ int iwl_setup_mac(struct iwl_priv *priv)
1298 hw->flags = IEEE80211_HW_SIGNAL_DBM | 1298 hw->flags = IEEE80211_HW_SIGNAL_DBM |
1299 IEEE80211_HW_NOISE_DBM | 1299 IEEE80211_HW_NOISE_DBM |
1300 IEEE80211_HW_AMPDU_AGGREGATION | 1300 IEEE80211_HW_AMPDU_AGGREGATION |
1301 IEEE80211_HW_SPECTRUM_MGMT |
1301 IEEE80211_HW_SUPPORTS_PS; 1302 IEEE80211_HW_SUPPORTS_PS;
1302 hw->wiphy->interface_modes = 1303 hw->wiphy->interface_modes =
1303 BIT(NL80211_IFTYPE_STATION) | 1304 BIT(NL80211_IFTYPE_STATION) |
@@ -1308,9 +1309,6 @@ int iwl_setup_mac(struct iwl_priv *priv)
1308 1309
1309 /* Default value; 4 EDCA QOS priorities */ 1310 /* Default value; 4 EDCA QOS priorities */
1310 hw->queues = 4; 1311 hw->queues = 4;
1311 /* queues to support 11n aggregation */
1312 if (priv->cfg->sku & IWL_SKU_N)
1313 hw->ampdu_queues = priv->cfg->mod_params->num_of_ampdu_queues;
1314 1312
1315 hw->conf.beacon_int = 100; 1313 hw->conf.beacon_int = 100;
1316 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; 1314 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
@@ -1437,6 +1435,10 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1437 1435
1438 priv->tx_power_user_lmt = tx_power; 1436 priv->tx_power_user_lmt = tx_power;
1439 1437
1438 /* if nic is not up don't send command */
1439 if (!iwl_is_ready_rf(priv))
1440 return ret;
1441
1440 if (force && priv->cfg->ops->lib->send_tx_power) 1442 if (force && priv->cfg->ops->lib->send_tx_power)
1441 ret = priv->cfg->ops->lib->send_tx_power(priv); 1443 ret = priv->cfg->ops->lib->send_tx_power(priv);
1442 1444
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 27310fec2e43..a8eac8c3c1fa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -264,6 +264,7 @@ void iwl_rx_reply_error(struct iwl_priv *priv,
264* RX 264* RX
265******************************************************/ 265******************************************************/
266void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq); 266void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
267void iwl_cmd_queue_free(struct iwl_priv *priv);
267int iwl_rx_queue_alloc(struct iwl_priv *priv); 268int iwl_rx_queue_alloc(struct iwl_priv *priv);
268void iwl_rx_handle(struct iwl_priv *priv); 269void iwl_rx_handle(struct iwl_priv *priv);
269int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, 270int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 36cfeccfafbc..64eb585f1578 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -425,6 +425,56 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
425 return ret; 425 return ret;
426} 426}
427 427
428static ssize_t iwl_dbgfs_status_read(struct file *file,
429 char __user *user_buf,
430 size_t count, loff_t *ppos) {
431
432 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
433 char buf[512];
434 int pos = 0;
435 const size_t bufsz = sizeof(buf);
436
437 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
438 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
439 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
440 test_bit(STATUS_HCMD_SYNC_ACTIVE, &priv->status));
441 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
442 test_bit(STATUS_INT_ENABLED, &priv->status));
443 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
444 test_bit(STATUS_RF_KILL_HW, &priv->status));
445 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_SW:\t %d\n",
446 test_bit(STATUS_RF_KILL_SW, &priv->status));
447 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n",
448 test_bit(STATUS_INIT, &priv->status));
449 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n",
450 test_bit(STATUS_ALIVE, &priv->status));
451 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n",
452 test_bit(STATUS_READY, &priv->status));
453 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_TEMPERATURE:\t %d\n",
454 test_bit(STATUS_TEMPERATURE, &priv->status));
455 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n",
456 test_bit(STATUS_GEO_CONFIGURED, &priv->status));
457 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n",
458 test_bit(STATUS_EXIT_PENDING, &priv->status));
459 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_IN_SUSPEND:\t %d\n",
460 test_bit(STATUS_IN_SUSPEND, &priv->status));
461 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n",
462 test_bit(STATUS_STATISTICS, &priv->status));
463 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n",
464 test_bit(STATUS_SCANNING, &priv->status));
465 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n",
466 test_bit(STATUS_SCAN_ABORTING, &priv->status));
467 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n",
468 test_bit(STATUS_SCAN_HW, &priv->status));
469 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n",
470 test_bit(STATUS_POWER_PMI, &priv->status));
471 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
472 test_bit(STATUS_FW_ERROR, &priv->status));
473 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_MODE_PENDING:\t %d\n",
474 test_bit(STATUS_MODE_PENDING, &priv->status));
475 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
476}
477
428DEBUGFS_READ_WRITE_FILE_OPS(sram); 478DEBUGFS_READ_WRITE_FILE_OPS(sram);
429DEBUGFS_WRITE_FILE_OPS(log_event); 479DEBUGFS_WRITE_FILE_OPS(log_event);
430DEBUGFS_READ_FILE_OPS(eeprom); 480DEBUGFS_READ_FILE_OPS(eeprom);
@@ -432,6 +482,7 @@ DEBUGFS_READ_FILE_OPS(stations);
432DEBUGFS_READ_FILE_OPS(rx_statistics); 482DEBUGFS_READ_FILE_OPS(rx_statistics);
433DEBUGFS_READ_FILE_OPS(tx_statistics); 483DEBUGFS_READ_FILE_OPS(tx_statistics);
434DEBUGFS_READ_FILE_OPS(channels); 484DEBUGFS_READ_FILE_OPS(channels);
485DEBUGFS_READ_FILE_OPS(status);
435 486
436/* 487/*
437 * Create the debugfs files and directories 488 * Create the debugfs files and directories
@@ -466,7 +517,7 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
466 DEBUGFS_ADD_FILE(rx_statistics, data); 517 DEBUGFS_ADD_FILE(rx_statistics, data);
467 DEBUGFS_ADD_FILE(tx_statistics, data); 518 DEBUGFS_ADD_FILE(tx_statistics, data);
468 DEBUGFS_ADD_FILE(channels, data); 519 DEBUGFS_ADD_FILE(channels, data);
469 DEBUGFS_ADD_X32(status, data, (u32 *)&priv->status); 520 DEBUGFS_ADD_FILE(status, data);
470 DEBUGFS_ADD_BOOL(disable_sensitivity, rf, &priv->disable_sens_cal); 521 DEBUGFS_ADD_BOOL(disable_sensitivity, rf, &priv->disable_sens_cal);
471 DEBUGFS_ADD_BOOL(disable_chain_noise, rf, 522 DEBUGFS_ADD_BOOL(disable_chain_noise, rf,
472 &priv->disable_chain_noise_cal); 523 &priv->disable_chain_noise_cal);
@@ -496,6 +547,7 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv)
496 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_log_event); 547 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_log_event);
497 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_stations); 548 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_stations);
498 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_channels); 549 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_channels);
550 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_status);
499 DEBUGFS_REMOVE(priv->dbgfs->dir_data); 551 DEBUGFS_REMOVE(priv->dbgfs->dir_data);
500 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_sensitivity); 552 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_sensitivity);
501 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_chain_noise); 553 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_chain_noise);
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 0baae8022824..ec9a13846edd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -996,6 +996,12 @@ struct iwl_priv {
996 u8 key_mapping_key; 996 u8 key_mapping_key;
997 unsigned long ucode_key_table; 997 unsigned long ucode_key_table;
998 998
999 /* queue refcounts */
1000#define IWL_MAX_HW_QUEUES 32
1001 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
1002 /* for each AC */
1003 atomic_t queue_stop_count[4];
1004
999 /* Indication if ieee80211_ops->open has been called */ 1005 /* Indication if ieee80211_ops->open has been called */
1000 u8 is_open; 1006 u8 is_open;
1001 1007
diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
index fb64d297dd4e..a1328c3c81ae 100644
--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
+++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
@@ -93,4 +93,56 @@ static inline int iwl_alloc_fw_desc(struct pci_dev *pci_dev,
93 return (desc->v_addr != NULL) ? 0 : -ENOMEM; 93 return (desc->v_addr != NULL) ? 0 : -ENOMEM;
94} 94}
95 95
96/*
97 * we have 8 bits used like this:
98 *
99 * 7 6 5 4 3 2 1 0
100 * | | | | | | | |
101 * | | | | | | +-+-------- AC queue (0-3)
102 * | | | | | |
103 * | +-+-+-+-+------------ HW A-MPDU queue
104 * |
105 * +---------------------- indicates agg queue
106 */
107static inline u8 iwl_virtual_agg_queue_num(u8 ac, u8 hwq)
108{
109 BUG_ON(ac > 3); /* only have 2 bits */
110 BUG_ON(hwq > 31); /* only have 5 bits */
111
112 return 0x80 | (hwq << 2) | ac;
113}
114
115static inline void iwl_wake_queue(struct iwl_priv *priv, u8 queue)
116{
117 u8 ac = queue;
118 u8 hwq = queue;
119
120 if (queue & 0x80) {
121 ac = queue & 3;
122 hwq = (queue >> 2) & 0x1f;
123 }
124
125 if (test_and_clear_bit(hwq, priv->queue_stopped))
126 if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0)
127 ieee80211_wake_queue(priv->hw, ac);
128}
129
130static inline void iwl_stop_queue(struct iwl_priv *priv, u8 queue)
131{
132 u8 ac = queue;
133 u8 hwq = queue;
134
135 if (queue & 0x80) {
136 ac = queue & 3;
137 hwq = (queue >> 2) & 0x1f;
138 }
139
140 if (!test_and_set_bit(hwq, priv->queue_stopped))
141 if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0)
142 ieee80211_stop_queue(priv->hw, ac);
143}
144
145#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
146#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
147
96#endif /* __iwl_helpers_h__ */ 148#endif /* __iwl_helpers_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 18b7e4195ea1..47c894530eb5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -273,7 +273,7 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
273 if (priv->iw_mode != NL80211_IFTYPE_STATION) 273 if (priv->iw_mode != NL80211_IFTYPE_STATION)
274 final_mode = IWL_POWER_MODE_CAM; 274 final_mode = IWL_POWER_MODE_CAM;
275 275
276 if (!iwl_is_rfkill(priv) && !setting->power_disabled && 276 if (iwl_is_ready_rf(priv) && !setting->power_disabled &&
277 ((setting->power_mode != final_mode) || force)) { 277 ((setting->power_mode != final_mode) || force)) {
278 struct iwl_powertable_cmd cmd; 278 struct iwl_powertable_cmd cmd;
279 279
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 1684490d93c0..5798fe49c771 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -1138,8 +1138,10 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, const u8 *addr, int tid)
1138 int sta_id; 1138 int sta_id;
1139 1139
1140 sta_id = iwl_find_station(priv, addr); 1140 sta_id = iwl_find_station(priv, addr);
1141 if (sta_id == IWL_INVALID_STATION) 1141 if (sta_id == IWL_INVALID_STATION) {
1142 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
1142 return -ENXIO; 1143 return -ENXIO;
1144 }
1143 1145
1144 spin_lock_irqsave(&priv->sta_lock, flags); 1146 spin_lock_irqsave(&priv->sta_lock, flags);
1145 priv->stations[sta_id].sta.station_flags_msk = 0; 1147 priv->stations[sta_id].sta.station_flags_msk = 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index dff60fb70214..1f117a49c569 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -174,7 +174,7 @@ EXPORT_SYMBOL(iwl_tx_queue_free);
174 * Free all buffers. 174 * Free all buffers.
175 * 0-fill, but do not free "txq" descriptor structure. 175 * 0-fill, but do not free "txq" descriptor structure.
176 */ 176 */
177static void iwl_cmd_queue_free(struct iwl_priv *priv) 177void iwl_cmd_queue_free(struct iwl_priv *priv)
178{ 178{
179 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; 179 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
180 struct iwl_queue *q = &txq->q; 180 struct iwl_queue *q = &txq->q;
@@ -193,12 +193,14 @@ static void iwl_cmd_queue_free(struct iwl_priv *priv)
193 193
194 /* De-alloc circular buffer of TFDs */ 194 /* De-alloc circular buffer of TFDs */
195 if (txq->q.n_bd) 195 if (txq->q.n_bd)
196 pci_free_consistent(dev, sizeof(struct iwl_tfd) * 196 pci_free_consistent(dev, priv->hw_params.tfd_size *
197 txq->q.n_bd, txq->tfds, txq->q.dma_addr); 197 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
198 198
199 /* 0-fill queue descriptor structure */ 199 /* 0-fill queue descriptor structure */
200 memset(txq, 0, sizeof(*txq)); 200 memset(txq, 0, sizeof(*txq));
201} 201}
202EXPORT_SYMBOL(iwl_cmd_queue_free);
203
202/*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 204/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
203 * DMA services 205 * DMA services
204 * 206 *
@@ -761,8 +763,10 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
761 hdr->seq_ctrl |= cpu_to_le16(seq_number); 763 hdr->seq_ctrl |= cpu_to_le16(seq_number);
762 seq_number += 0x10; 764 seq_number += 0x10;
763 /* aggregation is on for this <sta,tid> */ 765 /* aggregation is on for this <sta,tid> */
764 if (info->flags & IEEE80211_TX_CTL_AMPDU) 766 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
765 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; 767 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
768 swq_id = iwl_virtual_agg_queue_num(swq_id, txq_id);
769 }
766 priv->stations[sta_id].tid[tid].tfds_in_queue++; 770 priv->stations[sta_id].tid[tid].tfds_in_queue++;
767 } 771 }
768 772
@@ -893,7 +897,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
893 iwl_txq_update_write_ptr(priv, txq); 897 iwl_txq_update_write_ptr(priv, txq);
894 spin_unlock_irqrestore(&priv->lock, flags); 898 spin_unlock_irqrestore(&priv->lock, flags);
895 } else { 899 } else {
896 ieee80211_stop_queue(priv->hw, txq->swq_id); 900 iwl_stop_queue(priv, txq->swq_id);
897 } 901 }
898 } 902 }
899 903
@@ -1221,8 +1225,10 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
1221 1225
1222 sta_id = iwl_find_station(priv, ra); 1226 sta_id = iwl_find_station(priv, ra);
1223 1227
1224 if (sta_id == IWL_INVALID_STATION) 1228 if (sta_id == IWL_INVALID_STATION) {
1229 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
1225 return -ENXIO; 1230 return -ENXIO;
1231 }
1226 1232
1227 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON) 1233 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
1228 IWL_WARN(priv, "Stopping AGG while state not IWL_AGG_ON\n"); 1234 IWL_WARN(priv, "Stopping AGG while state not IWL_AGG_ON\n");
@@ -1429,7 +1435,7 @@ void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
1429 if ((iwl_queue_space(&txq->q) > txq->q.low_mark) && 1435 if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
1430 priv->mac80211_registered && 1436 priv->mac80211_registered &&
1431 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) 1437 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
1432 ieee80211_wake_queue(priv->hw, txq->swq_id); 1438 iwl_wake_queue(priv, txq->swq_id);
1433 1439
1434 iwl_txq_check_empty(priv, sta_id, tid, scd_flow); 1440 iwl_txq_check_empty(priv, sta_id, tid, scd_flow);
1435 } 1441 }
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 4465320f2735..a71b08ca7c71 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -485,14 +485,14 @@ static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
485 memcpy(priv->stations_39[sta_id].sta.key.key, keyconf->key, 485 memcpy(priv->stations_39[sta_id].sta.key.key, keyconf->key,
486 keyconf->keylen); 486 keyconf->keylen);
487 487
488 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) 488 if ((priv->stations_39[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
489 == STA_KEY_FLG_NO_ENC) 489 == STA_KEY_FLG_NO_ENC)
490 priv->stations[sta_id].sta.key.key_offset = 490 priv->stations_39[sta_id].sta.key.key_offset =
491 iwl_get_free_ucode_key_index(priv); 491 iwl_get_free_ucode_key_index(priv);
492 /* else, we are overriding an existing key => no need to allocated room 492 /* else, we are overriding an existing key => no need to allocated room
493 * in uCode. */ 493 * in uCode. */
494 494
495 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, 495 WARN(priv->stations_39[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
496 "no space for a new key"); 496 "no space for a new key");
497 497
498 priv->stations_39[sta_id].sta.key.key_flags = key_flags; 498 priv->stations_39[sta_id].sta.key.key_flags = key_flags;
@@ -560,7 +560,7 @@ static int iwl3945_set_dynamic_key(struct iwl_priv *priv,
560 ret = iwl3945_set_wep_dynamic_key_info(priv, keyconf, sta_id); 560 ret = iwl3945_set_wep_dynamic_key_info(priv, keyconf, sta_id);
561 break; 561 break;
562 default: 562 default:
563 IWL_ERR(priv,"Unknown alg: %s alg = %d\n", __func__, keyconf->alg); 563 IWL_ERR(priv, "Unknown alg: %s alg = %d\n", __func__, keyconf->alg);
564 ret = -EINVAL; 564 ret = -EINVAL;
565 } 565 }
566 566
@@ -1168,7 +1168,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
1168 spin_unlock_irqrestore(&priv->lock, flags); 1168 spin_unlock_irqrestore(&priv->lock, flags);
1169 } 1169 }
1170 1170
1171 ieee80211_stop_queue(priv->hw, skb_get_queue_mapping(skb)); 1171 iwl_stop_queue(priv, skb_get_queue_mapping(skb));
1172 } 1172 }
1173 1173
1174 return 0; 1174 return 0;
@@ -3773,15 +3773,19 @@ static int iwl3945_mac_config(struct ieee80211_hw *hw, u32 changed)
3773 } 3773 }
3774#endif 3774#endif
3775 3775
3776 if (conf->radio_enabled && iwl_radio_kill_sw_enable_radio(priv)) { 3776 if (changed & IEEE80211_CONF_CHANGE_RADIO_ENABLED) {
3777 IWL_DEBUG_MAC80211(priv, "leave - RF-KILL - waiting for uCode\n"); 3777 if (conf->radio_enabled &&
3778 goto out; 3778 iwl_radio_kill_sw_enable_radio(priv)) {
3779 } 3779 IWL_DEBUG_MAC80211(priv, "leave - RF-KILL - "
3780 "waiting for uCode\n");
3781 goto out;
3782 }
3780 3783
3781 if (!conf->radio_enabled) { 3784 if (!conf->radio_enabled) {
3782 iwl_radio_kill_sw_disable_radio(priv); 3785 iwl_radio_kill_sw_disable_radio(priv);
3783 IWL_DEBUG_MAC80211(priv, "leave - radio disabled\n"); 3786 IWL_DEBUG_MAC80211(priv, "leave - radio disabled\n");
3784 goto out; 3787 goto out;
3788 }
3785 } 3789 }
3786 3790
3787 if (iwl_is_rfkill(priv)) { 3791 if (iwl_is_rfkill(priv)) {
@@ -4546,11 +4550,6 @@ static ssize_t store_power_level(struct device *d,
4546 4550
4547 mutex_lock(&priv->mutex); 4551 mutex_lock(&priv->mutex);
4548 4552
4549 if (!iwl_is_ready(priv)) {
4550 ret = -EAGAIN;
4551 goto out;
4552 }
4553
4554 ret = strict_strtoul(buf, 10, &mode); 4553 ret = strict_strtoul(buf, 10, &mode);
4555 if (ret) 4554 if (ret)
4556 goto out; 4555 goto out;
@@ -4905,7 +4904,8 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
4905 4904
4906 /* Tell mac80211 our characteristics */ 4905 /* Tell mac80211 our characteristics */
4907 hw->flags = IEEE80211_HW_SIGNAL_DBM | 4906 hw->flags = IEEE80211_HW_SIGNAL_DBM |
4908 IEEE80211_HW_NOISE_DBM; 4907 IEEE80211_HW_NOISE_DBM |
4908 IEEE80211_HW_SPECTRUM_MGMT;
4909 4909
4910 hw->wiphy->interface_modes = 4910 hw->wiphy->interface_modes =
4911 BIT(NL80211_IFTYPE_STATION) | 4911 BIT(NL80211_IFTYPE_STATION) |
diff --git a/drivers/net/wireless/libertas/radiotap.h b/drivers/net/wireless/libertas/radiotap.h
index f8eb9097ff0a..d16b26416e82 100644
--- a/drivers/net/wireless/libertas/radiotap.h
+++ b/drivers/net/wireless/libertas/radiotap.h
@@ -33,22 +33,12 @@ struct rx_radiotap_hdr {
33 struct ieee80211_radiotap_header hdr; 33 struct ieee80211_radiotap_header hdr;
34 u8 flags; 34 u8 flags;
35 u8 rate; 35 u8 rate;
36 u16 chan_freq;
37 u16 chan_flags;
38 u8 antenna;
39 u8 antsignal; 36 u8 antsignal;
40 u16 rx_flags;
41#if 0
42 u8 pad[IEEE80211_RADIOTAP_HDRLEN - 18];
43#endif
44} __attribute__ ((packed)); 37} __attribute__ ((packed));
45 38
46#define RX_RADIOTAP_PRESENT ( \ 39#define RX_RADIOTAP_PRESENT ( \
47 (1 << IEEE80211_RADIOTAP_FLAGS) | \ 40 (1 << IEEE80211_RADIOTAP_FLAGS) | \
48 (1 << IEEE80211_RADIOTAP_RATE) | \ 41 (1 << IEEE80211_RADIOTAP_RATE) | \
49 (1 << IEEE80211_RADIOTAP_CHANNEL) | \
50 (1 << IEEE80211_RADIOTAP_ANTENNA) | \
51 (1 << IEEE80211_RADIOTAP_DB_ANTSIGNAL) |\ 42 (1 << IEEE80211_RADIOTAP_DB_ANTSIGNAL) |\
52 (1 << IEEE80211_RADIOTAP_RX_FLAGS) | \
53 0) 43 0)
54 44
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index 4f60948dde9c..63d7e19ce9bd 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -351,19 +351,11 @@ static int process_rxed_802_11_packet(struct lbs_private *priv,
351 radiotap_hdr.hdr.it_pad = 0; 351 radiotap_hdr.hdr.it_pad = 0;
352 radiotap_hdr.hdr.it_len = cpu_to_le16 (sizeof(struct rx_radiotap_hdr)); 352 radiotap_hdr.hdr.it_len = cpu_to_le16 (sizeof(struct rx_radiotap_hdr));
353 radiotap_hdr.hdr.it_present = cpu_to_le32 (RX_RADIOTAP_PRESENT); 353 radiotap_hdr.hdr.it_present = cpu_to_le32 (RX_RADIOTAP_PRESENT);
354 /* unknown values */ 354 if (!(prxpd->status & cpu_to_le16(MRVDRV_RXPD_STATUS_OK)))
355 radiotap_hdr.flags = 0; 355 radiotap_hdr.flags |= IEEE80211_RADIOTAP_F_BADFCS;
356 radiotap_hdr.chan_freq = 0;
357 radiotap_hdr.chan_flags = 0;
358 radiotap_hdr.antenna = 0;
359 /* known values */
360 radiotap_hdr.rate = convert_mv_rate_to_radiotap(prxpd->rx_rate); 356 radiotap_hdr.rate = convert_mv_rate_to_radiotap(prxpd->rx_rate);
361 /* XXX must check no carryout */ 357 /* XXX must check no carryout */
362 radiotap_hdr.antsignal = prxpd->snr + prxpd->nf; 358 radiotap_hdr.antsignal = prxpd->snr + prxpd->nf;
363 radiotap_hdr.rx_flags = 0;
364 if (!(prxpd->status & cpu_to_le16(MRVDRV_RXPD_STATUS_OK)))
365 radiotap_hdr.rx_flags |= IEEE80211_RADIOTAP_F_RX_BADFCS;
366 //memset(radiotap_hdr.pad, 0x11, IEEE80211_RADIOTAP_HDRLEN - 18);
367 359
368 /* chop the rxpd */ 360 /* chop the rxpd */
369 skb_pull(skb, sizeof(struct rxpd)); 361 skb_pull(skb, sizeof(struct rxpd));
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 2368b7f825a2..d4fdc8b7d7d8 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -933,7 +933,6 @@ static int __init init_mac80211_hwsim(void)
933 BIT(NL80211_IFTYPE_STATION) | 933 BIT(NL80211_IFTYPE_STATION) |
934 BIT(NL80211_IFTYPE_AP) | 934 BIT(NL80211_IFTYPE_AP) |
935 BIT(NL80211_IFTYPE_MESH_POINT); 935 BIT(NL80211_IFTYPE_MESH_POINT);
936 hw->ampdu_queues = 1;
937 936
938 hw->flags = IEEE80211_HW_MFP_CAPABLE; 937 hw->flags = IEEE80211_HW_MFP_CAPABLE;
939 938
@@ -1041,6 +1040,9 @@ static int __init init_mac80211_hwsim(void)
1041 break; 1040 break;
1042 } 1041 }
1043 1042
1043 /* give the regulatory workqueue a chance to run */
1044 if (regtest)
1045 schedule_timeout_interruptible(1);
1044 err = ieee80211_register_hw(hw); 1046 err = ieee80211_register_hw(hw);
1045 if (err < 0) { 1047 if (err < 0) {
1046 printk(KERN_DEBUG "mac80211_hwsim: " 1048 printk(KERN_DEBUG "mac80211_hwsim: "
diff --git a/drivers/net/wireless/p54/Kconfig b/drivers/net/wireless/p54/Kconfig
index cfc5f41aa136..b45d6a4ed1e8 100644
--- a/drivers/net/wireless/p54/Kconfig
+++ b/drivers/net/wireless/p54/Kconfig
@@ -1,9 +1,10 @@
1config P54_COMMON 1config P54_COMMON
2 tristate "Softmac Prism54 support" 2 tristate "Softmac Prism54 support"
3 depends on MAC80211 && WLAN_80211 && FW_LOADER && EXPERIMENTAL 3 depends on MAC80211 && WLAN_80211 && EXPERIMENTAL
4 select FW_LOADER
4 ---help--- 5 ---help---
5 This is common code for isl38xx based cards. 6 This is common code for isl38xx/stlc45xx based modules.
6 This module does nothing by itself - the USB/PCI frontends 7 This module does nothing by itself - the USB/PCI/SPI front-ends
7 also need to be enabled in order to support any devices. 8 also need to be enabled in order to support any devices.
8 9
9 These devices require softmac firmware which can be found at 10 These devices require softmac firmware which can be found at
@@ -17,31 +18,6 @@ config P54_USB
17 select CRC32 18 select CRC32
18 ---help--- 19 ---help---
19 This driver is for USB isl38xx based wireless cards. 20 This driver is for USB isl38xx based wireless cards.
20 These are USB based adapters found in devices such as:
21
22 3COM 3CRWE254G72
23 SMC 2862W-G
24 Accton 802.11g WN4501 USB
25 Siemens Gigaset USB
26 Netgear WG121
27 Netgear WG111
28 Medion 40900, Roper Europe
29 Shuttle PN15, Airvast WM168g, IOGear GWU513
30 Linksys WUSB54G
31 Linksys WUSB54G Portable
32 DLink DWL-G120 Spinnaker
33 DLink DWL-G122
34 Belkin F5D7050 ver 1000
35 Cohiba Proto board
36 SMC 2862W-G version 2
37 U.S. Robotics U5 802.11g Adapter
38 FUJITSU E-5400 USB D1700
39 Sagem XG703A
40 DLink DWL-G120 Cohiba
41 Spinnaker Proto board
42 Linksys WUSB54AG
43 Inventel UR054G
44 Spinnaker DUT
45 21
46 These devices require softmac firmware which can be found at 22 These devices require softmac firmware which can be found at
47 http://prism54.org/ 23 http://prism54.org/
@@ -64,10 +40,15 @@ config P54_PCI
64 40
65config P54_SPI 41config P54_SPI
66 tristate "Prism54 SPI (stlc45xx) support" 42 tristate "Prism54 SPI (stlc45xx) support"
67 depends on P54_COMMON && SPI_MASTER 43 depends on P54_COMMON && SPI_MASTER && GENERIC_HARDIRQS
68 ---help--- 44 ---help---
69 This driver is for stlc4550 or stlc4560 based wireless chips. 45 This driver is for stlc4550 or stlc4560 based wireless chips.
70 This driver is experimental, untested and will probably only work on 46 This driver is experimental, untested and will probably only work on
71 Nokia's N800/N810 Portable Internet Tablet. 47 Nokia's N800/N810 Portable Internet Tablet.
72 48
73 If you choose to build a module, it'll be called p54spi. 49 If you choose to build a module, it'll be called p54spi.
50
51config P54_LEDS
52 bool
53 depends on P54_COMMON && MAC80211_LEDS && (LEDS_CLASS = y || LEDS_CLASS = P54_COMMON)
54 default y
diff --git a/drivers/net/wireless/p54/p54common.c b/drivers/net/wireless/p54/p54common.c
index 0a989834b70d..0c1b0577d4ee 100644
--- a/drivers/net/wireless/p54/p54common.c
+++ b/drivers/net/wireless/p54/p54common.c
@@ -21,9 +21,9 @@
21#include <linux/etherdevice.h> 21#include <linux/etherdevice.h>
22 22
23#include <net/mac80211.h> 23#include <net/mac80211.h>
24#ifdef CONFIG_MAC80211_LEDS 24#ifdef CONFIG_P54_LEDS
25#include <linux/leds.h> 25#include <linux/leds.h>
26#endif /* CONFIG_MAC80211_LEDS */ 26#endif /* CONFIG_P54_LEDS */
27 27
28#include "p54.h" 28#include "p54.h"
29#include "p54common.h" 29#include "p54common.h"
@@ -2420,7 +2420,7 @@ static int p54_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
2420 return 0; 2420 return 0;
2421} 2421}
2422 2422
2423#ifdef CONFIG_MAC80211_LEDS 2423#ifdef CONFIG_P54_LEDS
2424static void p54_led_brightness_set(struct led_classdev *led_dev, 2424static void p54_led_brightness_set(struct led_classdev *led_dev,
2425 enum led_brightness brightness) 2425 enum led_brightness brightness)
2426{ 2426{
@@ -2508,7 +2508,7 @@ static void p54_unregister_leds(struct ieee80211_hw *dev)
2508 if (priv->assoc_led.registered) 2508 if (priv->assoc_led.registered)
2509 led_classdev_unregister(&priv->assoc_led.led_dev); 2509 led_classdev_unregister(&priv->assoc_led.led_dev);
2510} 2510}
2511#endif /* CONFIG_MAC80211_LEDS */ 2511#endif /* CONFIG_P54_LEDS */
2512 2512
2513static const struct ieee80211_ops p54_ops = { 2513static const struct ieee80211_ops p54_ops = {
2514 .tx = p54_tx, 2514 .tx = p54_tx,
@@ -2592,11 +2592,11 @@ int p54_register_common(struct ieee80211_hw *dev, struct device *pdev)
2592 return err; 2592 return err;
2593 } 2593 }
2594 2594
2595 #ifdef CONFIG_MAC80211_LEDS 2595#ifdef CONFIG_P54_LEDS
2596 err = p54_init_leds(dev); 2596 err = p54_init_leds(dev);
2597 if (err) 2597 if (err)
2598 return err; 2598 return err;
2599 #endif /* CONFIG_MAC80211_LEDS */ 2599#endif /* CONFIG_P54_LEDS */
2600 2600
2601 dev_info(pdev, "is registered as '%s'\n", wiphy_name(dev->wiphy)); 2601 dev_info(pdev, "is registered as '%s'\n", wiphy_name(dev->wiphy));
2602 return 0; 2602 return 0;
@@ -2610,9 +2610,9 @@ void p54_free_common(struct ieee80211_hw *dev)
2610 kfree(priv->output_limit); 2610 kfree(priv->output_limit);
2611 kfree(priv->curve_data); 2611 kfree(priv->curve_data);
2612 2612
2613 #ifdef CONFIG_MAC80211_LEDS 2613#ifdef CONFIG_P54_LEDS
2614 p54_unregister_leds(dev); 2614 p54_unregister_leds(dev);
2615 #endif /* CONFIG_MAC80211_LEDS */ 2615#endif /* CONFIG_P54_LEDS */
2616} 2616}
2617EXPORT_SYMBOL_GPL(p54_free_common); 2617EXPORT_SYMBOL_GPL(p54_free_common);
2618 2618
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 24fdfdfee3df..420fff42c0dd 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2425,6 +2425,8 @@ static struct usb_device_id rt73usb_device_table[] = {
2425 { USB_DEVICE(0x0df6, 0x9712), USB_DEVICE_DATA(&rt73usb_ops) }, 2425 { USB_DEVICE(0x0df6, 0x9712), USB_DEVICE_DATA(&rt73usb_ops) },
2426 /* Surecom */ 2426 /* Surecom */
2427 { USB_DEVICE(0x0769, 0x31f3), USB_DEVICE_DATA(&rt73usb_ops) }, 2427 { USB_DEVICE(0x0769, 0x31f3), USB_DEVICE_DATA(&rt73usb_ops) },
2428 /* Tilgin */
2429 { USB_DEVICE(0x6933, 0x5001), USB_DEVICE_DATA(&rt73usb_ops) },
2428 /* Philips */ 2430 /* Philips */
2429 { USB_DEVICE(0x0471, 0x200a), USB_DEVICE_DATA(&rt73usb_ops) }, 2431 { USB_DEVICE(0x0471, 0x200a), USB_DEVICE_DATA(&rt73usb_ops) },
2430 /* Planex */ 2432 /* Planex */
diff --git a/drivers/net/wireless/wavelan.c b/drivers/net/wireless/wavelan.c
index b728541f2fb5..3ab3eb957189 100644
--- a/drivers/net/wireless/wavelan.c
+++ b/drivers/net/wireless/wavelan.c
@@ -735,9 +735,9 @@ if (lp->tx_n_in_use > 0)
735 if (tx_status & AC_SFLD_OK) { 735 if (tx_status & AC_SFLD_OK) {
736 int ncollisions; 736 int ncollisions;
737 737
738 lp->stats.tx_packets++; 738 dev->stats.tx_packets++;
739 ncollisions = tx_status & AC_SFLD_MAXCOL; 739 ncollisions = tx_status & AC_SFLD_MAXCOL;
740 lp->stats.collisions += ncollisions; 740 dev->stats.collisions += ncollisions;
741#ifdef DEBUG_TX_INFO 741#ifdef DEBUG_TX_INFO
742 if (ncollisions > 0) 742 if (ncollisions > 0)
743 printk(KERN_DEBUG 743 printk(KERN_DEBUG
@@ -745,9 +745,9 @@ if (lp->tx_n_in_use > 0)
745 dev->name, ncollisions); 745 dev->name, ncollisions);
746#endif 746#endif
747 } else { 747 } else {
748 lp->stats.tx_errors++; 748 dev->stats.tx_errors++;
749 if (tx_status & AC_SFLD_S10) { 749 if (tx_status & AC_SFLD_S10) {
750 lp->stats.tx_carrier_errors++; 750 dev->stats.tx_carrier_errors++;
751#ifdef DEBUG_TX_FAIL 751#ifdef DEBUG_TX_FAIL
752 printk(KERN_DEBUG 752 printk(KERN_DEBUG
753 "%s: wv_complete(): tx error: no CS.\n", 753 "%s: wv_complete(): tx error: no CS.\n",
@@ -755,7 +755,7 @@ if (lp->tx_n_in_use > 0)
755#endif 755#endif
756 } 756 }
757 if (tx_status & AC_SFLD_S9) { 757 if (tx_status & AC_SFLD_S9) {
758 lp->stats.tx_carrier_errors++; 758 dev->stats.tx_carrier_errors++;
759#ifdef DEBUG_TX_FAIL 759#ifdef DEBUG_TX_FAIL
760 printk(KERN_DEBUG 760 printk(KERN_DEBUG
761 "%s: wv_complete(): tx error: lost CTS.\n", 761 "%s: wv_complete(): tx error: lost CTS.\n",
@@ -763,7 +763,7 @@ if (lp->tx_n_in_use > 0)
763#endif 763#endif
764 } 764 }
765 if (tx_status & AC_SFLD_S8) { 765 if (tx_status & AC_SFLD_S8) {
766 lp->stats.tx_fifo_errors++; 766 dev->stats.tx_fifo_errors++;
767#ifdef DEBUG_TX_FAIL 767#ifdef DEBUG_TX_FAIL
768 printk(KERN_DEBUG 768 printk(KERN_DEBUG
769 "%s: wv_complete(): tx error: slow DMA.\n", 769 "%s: wv_complete(): tx error: slow DMA.\n",
@@ -771,7 +771,7 @@ if (lp->tx_n_in_use > 0)
771#endif 771#endif
772 } 772 }
773 if (tx_status & AC_SFLD_S6) { 773 if (tx_status & AC_SFLD_S6) {
774 lp->stats.tx_heartbeat_errors++; 774 dev->stats.tx_heartbeat_errors++;
775#ifdef DEBUG_TX_FAIL 775#ifdef DEBUG_TX_FAIL
776 printk(KERN_DEBUG 776 printk(KERN_DEBUG
777 "%s: wv_complete(): tx error: heart beat.\n", 777 "%s: wv_complete(): tx error: heart beat.\n",
@@ -779,7 +779,7 @@ if (lp->tx_n_in_use > 0)
779#endif 779#endif
780 } 780 }
781 if (tx_status & AC_SFLD_S5) { 781 if (tx_status & AC_SFLD_S5) {
782 lp->stats.tx_aborted_errors++; 782 dev->stats.tx_aborted_errors++;
783#ifdef DEBUG_TX_FAIL 783#ifdef DEBUG_TX_FAIL
784 printk(KERN_DEBUG 784 printk(KERN_DEBUG
785 "%s: wv_complete(): tx error: too many collisions.\n", 785 "%s: wv_complete(): tx error: too many collisions.\n",
@@ -1346,20 +1346,6 @@ static void wv_init_info(struct net_device * dev)
1346 * or wireless extensions 1346 * or wireless extensions
1347 */ 1347 */
1348 1348
1349/*------------------------------------------------------------------*/
1350/*
1351 * Get the current Ethernet statistics. This may be called with the
1352 * card open or closed.
1353 * Used when the user read /proc/net/dev
1354 */
1355static en_stats *wavelan_get_stats(struct net_device * dev)
1356{
1357#ifdef DEBUG_IOCTL_TRACE
1358 printk(KERN_DEBUG "%s: <>wavelan_get_stats()\n", dev->name);
1359#endif
1360
1361 return &((net_local *)netdev_priv(dev))->stats;
1362}
1363 1349
1364/*------------------------------------------------------------------*/ 1350/*------------------------------------------------------------------*/
1365/* 1351/*
@@ -2466,7 +2452,7 @@ wv_packet_read(struct net_device * dev, u16 buf_off, int sksize)
2466 "%s: wv_packet_read(): could not alloc_skb(%d, GFP_ATOMIC).\n", 2452 "%s: wv_packet_read(): could not alloc_skb(%d, GFP_ATOMIC).\n",
2467 dev->name, sksize); 2453 dev->name, sksize);
2468#endif 2454#endif
2469 lp->stats.rx_dropped++; 2455 dev->stats.rx_dropped++;
2470 return; 2456 return;
2471 } 2457 }
2472 2458
@@ -2526,8 +2512,8 @@ wv_packet_read(struct net_device * dev, u16 buf_off, int sksize)
2526 netif_rx(skb); 2512 netif_rx(skb);
2527 2513
2528 /* Keep statistics up to date */ 2514 /* Keep statistics up to date */
2529 lp->stats.rx_packets++; 2515 dev->stats.rx_packets++;
2530 lp->stats.rx_bytes += sksize; 2516 dev->stats.rx_bytes += sksize;
2531 2517
2532#ifdef DEBUG_RX_TRACE 2518#ifdef DEBUG_RX_TRACE
2533 printk(KERN_DEBUG "%s: <-wv_packet_read()\n", dev->name); 2519 printk(KERN_DEBUG "%s: <-wv_packet_read()\n", dev->name);
@@ -2608,7 +2594,7 @@ static void wv_receive(struct net_device * dev)
2608#endif 2594#endif
2609 } else { /* If reception was no successful */ 2595 } else { /* If reception was no successful */
2610 2596
2611 lp->stats.rx_errors++; 2597 dev->stats.rx_errors++;
2612 2598
2613#ifdef DEBUG_RX_INFO 2599#ifdef DEBUG_RX_INFO
2614 printk(KERN_DEBUG 2600 printk(KERN_DEBUG
@@ -2624,7 +2610,7 @@ static void wv_receive(struct net_device * dev)
2624#endif 2610#endif
2625 2611
2626 if ((fd.fd_status & FD_STATUS_S7) != 0) { 2612 if ((fd.fd_status & FD_STATUS_S7) != 0) {
2627 lp->stats.rx_length_errors++; 2613 dev->stats.rx_length_errors++;
2628#ifdef DEBUG_RX_FAIL 2614#ifdef DEBUG_RX_FAIL
2629 printk(KERN_DEBUG 2615 printk(KERN_DEBUG
2630 "%s: wv_receive(): frame too short.\n", 2616 "%s: wv_receive(): frame too short.\n",
@@ -2633,7 +2619,7 @@ static void wv_receive(struct net_device * dev)
2633 } 2619 }
2634 2620
2635 if ((fd.fd_status & FD_STATUS_S8) != 0) { 2621 if ((fd.fd_status & FD_STATUS_S8) != 0) {
2636 lp->stats.rx_over_errors++; 2622 dev->stats.rx_over_errors++;
2637#ifdef DEBUG_RX_FAIL 2623#ifdef DEBUG_RX_FAIL
2638 printk(KERN_DEBUG 2624 printk(KERN_DEBUG
2639 "%s: wv_receive(): rx DMA overrun.\n", 2625 "%s: wv_receive(): rx DMA overrun.\n",
@@ -2642,7 +2628,7 @@ static void wv_receive(struct net_device * dev)
2642 } 2628 }
2643 2629
2644 if ((fd.fd_status & FD_STATUS_S9) != 0) { 2630 if ((fd.fd_status & FD_STATUS_S9) != 0) {
2645 lp->stats.rx_fifo_errors++; 2631 dev->stats.rx_fifo_errors++;
2646#ifdef DEBUG_RX_FAIL 2632#ifdef DEBUG_RX_FAIL
2647 printk(KERN_DEBUG 2633 printk(KERN_DEBUG
2648 "%s: wv_receive(): ran out of resources.\n", 2634 "%s: wv_receive(): ran out of resources.\n",
@@ -2651,7 +2637,7 @@ static void wv_receive(struct net_device * dev)
2651 } 2637 }
2652 2638
2653 if ((fd.fd_status & FD_STATUS_S10) != 0) { 2639 if ((fd.fd_status & FD_STATUS_S10) != 0) {
2654 lp->stats.rx_frame_errors++; 2640 dev->stats.rx_frame_errors++;
2655#ifdef DEBUG_RX_FAIL 2641#ifdef DEBUG_RX_FAIL
2656 printk(KERN_DEBUG 2642 printk(KERN_DEBUG
2657 "%s: wv_receive(): alignment error.\n", 2643 "%s: wv_receive(): alignment error.\n",
@@ -2660,7 +2646,7 @@ static void wv_receive(struct net_device * dev)
2660 } 2646 }
2661 2647
2662 if ((fd.fd_status & FD_STATUS_S11) != 0) { 2648 if ((fd.fd_status & FD_STATUS_S11) != 0) {
2663 lp->stats.rx_crc_errors++; 2649 dev->stats.rx_crc_errors++;
2664#ifdef DEBUG_RX_FAIL 2650#ifdef DEBUG_RX_FAIL
2665 printk(KERN_DEBUG 2651 printk(KERN_DEBUG
2666 "%s: wv_receive(): CRC error.\n", 2652 "%s: wv_receive(): CRC error.\n",
@@ -2826,7 +2812,7 @@ static int wv_packet_write(struct net_device * dev, void *buf, short length)
2826 dev->trans_start = jiffies; 2812 dev->trans_start = jiffies;
2827 2813
2828 /* Keep stats up to date. */ 2814 /* Keep stats up to date. */
2829 lp->stats.tx_bytes += length; 2815 dev->stats.tx_bytes += length;
2830 2816
2831 if (lp->tx_first_in_use == I82586NULL) 2817 if (lp->tx_first_in_use == I82586NULL)
2832 lp->tx_first_in_use = txblock; 2818 lp->tx_first_in_use = txblock;
@@ -4038,6 +4024,22 @@ static int wavelan_close(struct net_device * dev)
4038 return 0; 4024 return 0;
4039} 4025}
4040 4026
4027static const struct net_device_ops wavelan_netdev_ops = {
4028 .ndo_open = wavelan_open,
4029 .ndo_stop = wavelan_close,
4030 .ndo_start_xmit = wavelan_packet_xmit,
4031 .ndo_set_multicast_list = wavelan_set_multicast_list,
4032 .ndo_tx_timeout = wavelan_watchdog,
4033 .ndo_change_mtu = eth_change_mtu,
4034 .ndo_validate_addr = eth_validate_addr,
4035#ifdef SET_MAC_ADDRESS
4036 .ndo_set_mac_address = wavelan_set_mac_address
4037#else
4038 .ndo_set_mac_address = eth_mac_addr,
4039#endif
4040};
4041
4042
4041/*------------------------------------------------------------------*/ 4043/*------------------------------------------------------------------*/
4042/* 4044/*
4043 * Probe an I/O address, and if the WaveLAN is there configure the 4045 * Probe an I/O address, and if the WaveLAN is there configure the
@@ -4130,17 +4132,8 @@ static int __init wavelan_config(struct net_device *dev, unsigned short ioaddr)
4130 /* Init spinlock */ 4132 /* Init spinlock */
4131 spin_lock_init(&lp->spinlock); 4133 spin_lock_init(&lp->spinlock);
4132 4134
4133 dev->open = wavelan_open; 4135 dev->netdev_ops = &wavelan_netdev_ops;
4134 dev->stop = wavelan_close; 4136 dev->watchdog_timeo = WATCHDOG_JIFFIES;
4135 dev->hard_start_xmit = wavelan_packet_xmit;
4136 dev->get_stats = wavelan_get_stats;
4137 dev->set_multicast_list = &wavelan_set_multicast_list;
4138 dev->tx_timeout = &wavelan_watchdog;
4139 dev->watchdog_timeo = WATCHDOG_JIFFIES;
4140#ifdef SET_MAC_ADDRESS
4141 dev->set_mac_address = &wavelan_set_mac_address;
4142#endif /* SET_MAC_ADDRESS */
4143
4144 dev->wireless_handlers = &wavelan_handler_def; 4137 dev->wireless_handlers = &wavelan_handler_def;
4145 lp->wireless_data.spy_data = &lp->spy_data; 4138 lp->wireless_data.spy_data = &lp->spy_data;
4146 dev->wireless_data = &lp->wireless_data; 4139 dev->wireless_data = &lp->wireless_data;
diff --git a/drivers/net/wireless/wavelan.p.h b/drivers/net/wireless/wavelan.p.h
index 44d31bbf39e4..2daa0210d789 100644
--- a/drivers/net/wireless/wavelan.p.h
+++ b/drivers/net/wireless/wavelan.p.h
@@ -459,11 +459,9 @@ static const char *version = "wavelan.c : v24 (SMP + wireless extensions) 11/12/
459/****************************** TYPES ******************************/ 459/****************************** TYPES ******************************/
460 460
461/* Shortcuts */ 461/* Shortcuts */
462typedef struct net_device_stats en_stats;
463typedef struct iw_statistics iw_stats; 462typedef struct iw_statistics iw_stats;
464typedef struct iw_quality iw_qual; 463typedef struct iw_quality iw_qual;
465typedef struct iw_freq iw_freq; 464typedef struct iw_freq iw_freq;typedef struct net_local net_local;
466typedef struct net_local net_local;
467typedef struct timer_list timer_list; 465typedef struct timer_list timer_list;
468 466
469/* Basic types */ 467/* Basic types */
@@ -475,15 +473,12 @@ typedef u_char mac_addr[WAVELAN_ADDR_SIZE]; /* Hardware address */
475 * For each network interface, Linux keeps data in two structures: "device" 473 * For each network interface, Linux keeps data in two structures: "device"
476 * keeps the generic data (same format for everybody) and "net_local" keeps 474 * keeps the generic data (same format for everybody) and "net_local" keeps
477 * additional specific data. 475 * additional specific data.
478 * Note that some of this specific data is in fact generic (en_stats, for
479 * example).
480 */ 476 */
481struct net_local 477struct net_local
482{ 478{
483 net_local * next; /* linked list of the devices */ 479 net_local * next; /* linked list of the devices */
484 struct net_device * dev; /* reverse link */ 480 struct net_device * dev; /* reverse link */
485 spinlock_t spinlock; /* Serialize access to the hardware (SMP) */ 481 spinlock_t spinlock; /* Serialize access to the hardware (SMP) */
486 en_stats stats; /* Ethernet interface statistics */
487 int nresets; /* number of hardware resets */ 482 int nresets; /* number of hardware resets */
488 u_char reconfig_82586; /* We need to reconfigure the controller. */ 483 u_char reconfig_82586; /* We need to reconfigure the controller. */
489 u_char promiscuous; /* promiscuous mode */ 484 u_char promiscuous; /* promiscuous mode */
@@ -601,8 +596,6 @@ static void
601static inline void 596static inline void
602 wv_init_info(struct net_device *); /* display startup info */ 597 wv_init_info(struct net_device *); /* display startup info */
603/* ------------------- IOCTL, STATS & RECONFIG ------------------- */ 598/* ------------------- IOCTL, STATS & RECONFIG ------------------- */
604static en_stats *
605 wavelan_get_stats(struct net_device *); /* Give stats /proc/net/dev */
606static iw_stats * 599static iw_stats *
607 wavelan_get_wireless_stats(struct net_device *); 600 wavelan_get_wireless_stats(struct net_device *);
608static void 601static void
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 9f102a6535c4..f67325387902 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1511,7 +1511,7 @@ static int xennet_set_tso(struct net_device *dev, u32 data)
1511static void xennet_set_features(struct net_device *dev) 1511static void xennet_set_features(struct net_device *dev)
1512{ 1512{
1513 /* Turn off all GSO bits except ROBUST. */ 1513 /* Turn off all GSO bits except ROBUST. */
1514 dev->features &= (1 << NETIF_F_GSO_SHIFT) - 1; 1514 dev->features &= ~NETIF_F_GSO_MASK;
1515 dev->features |= NETIF_F_GSO_ROBUST; 1515 dev->features |= NETIF_F_GSO_ROBUST;
1516 xennet_set_sg(dev, 0); 1516 xennet_set_sg(dev, 0);
1517 1517
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 8af7dfbe022c..616c60ffcf2c 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Module interface and handling of zfcp data structures. 4 * Module interface and handling of zfcp data structures.
5 * 5 *
6 * Copyright IBM Corporation 2002, 2008 6 * Copyright IBM Corporation 2002, 2009
7 */ 7 */
8 8
9/* 9/*
@@ -249,8 +249,8 @@ struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter,
249 struct zfcp_port *port; 249 struct zfcp_port *port;
250 250
251 list_for_each_entry(port, &adapter->port_list_head, list) 251 list_for_each_entry(port, &adapter->port_list_head, list)
252 if ((port->wwpn == wwpn) && !(atomic_read(&port->status) & 252 if ((port->wwpn == wwpn) &&
253 (ZFCP_STATUS_PORT_NO_WWPN | ZFCP_STATUS_COMMON_REMOVE))) 253 !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE))
254 return port; 254 return port;
255 return NULL; 255 return NULL;
256} 256}
@@ -421,7 +421,8 @@ int zfcp_status_read_refill(struct zfcp_adapter *adapter)
421 while (atomic_read(&adapter->stat_miss) > 0) 421 while (atomic_read(&adapter->stat_miss) > 0)
422 if (zfcp_fsf_status_read(adapter)) { 422 if (zfcp_fsf_status_read(adapter)) {
423 if (atomic_read(&adapter->stat_miss) >= 16) { 423 if (atomic_read(&adapter->stat_miss) >= 16) {
424 zfcp_erp_adapter_reopen(adapter, 0, 103, NULL); 424 zfcp_erp_adapter_reopen(adapter, 0, "axsref1",
425 NULL);
425 return 1; 426 return 1;
426 } 427 }
427 break; 428 break;
@@ -501,6 +502,7 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
501 spin_lock_init(&adapter->scsi_dbf_lock); 502 spin_lock_init(&adapter->scsi_dbf_lock);
502 spin_lock_init(&adapter->rec_dbf_lock); 503 spin_lock_init(&adapter->rec_dbf_lock);
503 spin_lock_init(&adapter->req_q_lock); 504 spin_lock_init(&adapter->req_q_lock);
505 spin_lock_init(&adapter->qdio_stat_lock);
504 506
505 rwlock_init(&adapter->erp_lock); 507 rwlock_init(&adapter->erp_lock);
506 rwlock_init(&adapter->abort_lock); 508 rwlock_init(&adapter->abort_lock);
@@ -522,7 +524,6 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
522 goto sysfs_failed; 524 goto sysfs_failed;
523 525
524 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); 526 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
525 zfcp_fc_nameserver_init(adapter);
526 527
527 if (!zfcp_adapter_scsi_register(adapter)) 528 if (!zfcp_adapter_scsi_register(adapter))
528 return 0; 529 return 0;
@@ -552,6 +553,7 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
552 553
553 cancel_work_sync(&adapter->scan_work); 554 cancel_work_sync(&adapter->scan_work);
554 cancel_work_sync(&adapter->stat_work); 555 cancel_work_sync(&adapter->stat_work);
556 cancel_delayed_work_sync(&adapter->nsp.work);
555 zfcp_adapter_scsi_unregister(adapter); 557 zfcp_adapter_scsi_unregister(adapter);
556 sysfs_remove_group(&adapter->ccw_device->dev.kobj, 558 sysfs_remove_group(&adapter->ccw_device->dev.kobj,
557 &zfcp_sysfs_adapter_attrs); 559 &zfcp_sysfs_adapter_attrs);
@@ -603,10 +605,13 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
603 init_waitqueue_head(&port->remove_wq); 605 init_waitqueue_head(&port->remove_wq);
604 INIT_LIST_HEAD(&port->unit_list_head); 606 INIT_LIST_HEAD(&port->unit_list_head);
605 INIT_WORK(&port->gid_pn_work, zfcp_erp_port_strategy_open_lookup); 607 INIT_WORK(&port->gid_pn_work, zfcp_erp_port_strategy_open_lookup);
608 INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work);
609 INIT_WORK(&port->rport_work, zfcp_scsi_rport_work);
606 610
607 port->adapter = adapter; 611 port->adapter = adapter;
608 port->d_id = d_id; 612 port->d_id = d_id;
609 port->wwpn = wwpn; 613 port->wwpn = wwpn;
614 port->rport_task = RPORT_NONE;
610 615
611 /* mark port unusable as long as sysfs registration is not complete */ 616 /* mark port unusable as long as sysfs registration is not complete */
612 atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status); 617 atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status);
@@ -620,11 +625,10 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
620 dev_set_drvdata(&port->sysfs_device, port); 625 dev_set_drvdata(&port->sysfs_device, port);
621 626
622 read_lock_irq(&zfcp_data.config_lock); 627 read_lock_irq(&zfcp_data.config_lock);
623 if (!(status & ZFCP_STATUS_PORT_NO_WWPN)) 628 if (zfcp_get_port_by_wwpn(adapter, wwpn)) {
624 if (zfcp_get_port_by_wwpn(adapter, wwpn)) { 629 read_unlock_irq(&zfcp_data.config_lock);
625 read_unlock_irq(&zfcp_data.config_lock); 630 goto err_out_free;
626 goto err_out_free; 631 }
627 }
628 read_unlock_irq(&zfcp_data.config_lock); 632 read_unlock_irq(&zfcp_data.config_lock);
629 633
630 if (device_register(&port->sysfs_device)) 634 if (device_register(&port->sysfs_device))
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 285881f07648..1fe1e2eda512 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Registration and callback for the s390 common I/O layer. 4 * Registration and callback for the s390 common I/O layer.
5 * 5 *
6 * Copyright IBM Corporation 2002, 2008 6 * Copyright IBM Corporation 2002, 2009
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
@@ -72,8 +72,7 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device)
72 72
73 list_for_each_entry_safe(port, p, &port_remove_lh, list) { 73 list_for_each_entry_safe(port, p, &port_remove_lh, list) {
74 list_for_each_entry_safe(unit, u, &unit_remove_lh, list) { 74 list_for_each_entry_safe(unit, u, &unit_remove_lh, list) {
75 if (atomic_read(&unit->status) & 75 if (unit->device)
76 ZFCP_STATUS_UNIT_REGISTERED)
77 scsi_remove_device(unit->device); 76 scsi_remove_device(unit->device);
78 zfcp_unit_dequeue(unit); 77 zfcp_unit_dequeue(unit);
79 } 78 }
@@ -109,11 +108,12 @@ static int zfcp_ccw_set_online(struct ccw_device *ccw_device)
109 /* initialize request counter */ 108 /* initialize request counter */
110 BUG_ON(!zfcp_reqlist_isempty(adapter)); 109 BUG_ON(!zfcp_reqlist_isempty(adapter));
111 adapter->req_no = 0; 110 adapter->req_no = 0;
111 zfcp_fc_nameserver_init(adapter);
112 112
113 zfcp_erp_modify_adapter_status(adapter, 10, NULL, 113 zfcp_erp_modify_adapter_status(adapter, "ccsonl1", NULL,
114 ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); 114 ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
115 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 85, 115 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
116 NULL); 116 "ccsonl2", NULL);
117 zfcp_erp_wait(adapter); 117 zfcp_erp_wait(adapter);
118 up(&zfcp_data.config_sema); 118 up(&zfcp_data.config_sema);
119 flush_work(&adapter->scan_work); 119 flush_work(&adapter->scan_work);
@@ -137,7 +137,7 @@ static int zfcp_ccw_set_offline(struct ccw_device *ccw_device)
137 137
138 down(&zfcp_data.config_sema); 138 down(&zfcp_data.config_sema);
139 adapter = dev_get_drvdata(&ccw_device->dev); 139 adapter = dev_get_drvdata(&ccw_device->dev);
140 zfcp_erp_adapter_shutdown(adapter, 0, 86, NULL); 140 zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1", NULL);
141 zfcp_erp_wait(adapter); 141 zfcp_erp_wait(adapter);
142 zfcp_erp_thread_kill(adapter); 142 zfcp_erp_thread_kill(adapter);
143 up(&zfcp_data.config_sema); 143 up(&zfcp_data.config_sema);
@@ -160,21 +160,21 @@ static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
160 case CIO_GONE: 160 case CIO_GONE:
161 dev_warn(&adapter->ccw_device->dev, 161 dev_warn(&adapter->ccw_device->dev,
162 "The FCP device has been detached\n"); 162 "The FCP device has been detached\n");
163 zfcp_erp_adapter_shutdown(adapter, 0, 87, NULL); 163 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1", NULL);
164 break; 164 break;
165 case CIO_NO_PATH: 165 case CIO_NO_PATH:
166 dev_warn(&adapter->ccw_device->dev, 166 dev_warn(&adapter->ccw_device->dev,
167 "The CHPID for the FCP device is offline\n"); 167 "The CHPID for the FCP device is offline\n");
168 zfcp_erp_adapter_shutdown(adapter, 0, 88, NULL); 168 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2", NULL);
169 break; 169 break;
170 case CIO_OPER: 170 case CIO_OPER:
171 dev_info(&adapter->ccw_device->dev, 171 dev_info(&adapter->ccw_device->dev,
172 "The FCP device is operational again\n"); 172 "The FCP device is operational again\n");
173 zfcp_erp_modify_adapter_status(adapter, 11, NULL, 173 zfcp_erp_modify_adapter_status(adapter, "ccnoti3", NULL,
174 ZFCP_STATUS_COMMON_RUNNING, 174 ZFCP_STATUS_COMMON_RUNNING,
175 ZFCP_SET); 175 ZFCP_SET);
176 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 176 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
177 89, NULL); 177 "ccnoti4", NULL);
178 break; 178 break;
179 } 179 }
180 return 1; 180 return 1;
@@ -190,7 +190,7 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev)
190 190
191 down(&zfcp_data.config_sema); 191 down(&zfcp_data.config_sema);
192 adapter = dev_get_drvdata(&cdev->dev); 192 adapter = dev_get_drvdata(&cdev->dev);
193 zfcp_erp_adapter_shutdown(adapter, 0, 90, NULL); 193 zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1", NULL);
194 zfcp_erp_wait(adapter); 194 zfcp_erp_wait(adapter);
195 up(&zfcp_data.config_sema); 195 up(&zfcp_data.config_sema);
196} 196}
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index cb6df609953e..0a1a5dd8d018 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -490,172 +490,17 @@ static const char *zfcp_rec_dbf_tags[] = {
490 [ZFCP_REC_DBF_ID_ACTION] = "action", 490 [ZFCP_REC_DBF_ID_ACTION] = "action",
491}; 491};
492 492
493static const char *zfcp_rec_dbf_ids[] = {
494 [1] = "new",
495 [2] = "ready",
496 [3] = "kill",
497 [4] = "down sleep",
498 [5] = "down wakeup",
499 [6] = "down sleep ecd",
500 [7] = "down wakeup ecd",
501 [8] = "down sleep epd",
502 [9] = "down wakeup epd",
503 [10] = "online",
504 [11] = "operational",
505 [12] = "scsi slave destroy",
506 [13] = "propagate failed adapter",
507 [14] = "propagate failed port",
508 [15] = "block adapter",
509 [16] = "unblock adapter",
510 [17] = "block port",
511 [18] = "unblock port",
512 [19] = "block unit",
513 [20] = "unblock unit",
514 [21] = "unit recovery failed",
515 [22] = "port recovery failed",
516 [23] = "adapter recovery failed",
517 [24] = "qdio queues down",
518 [25] = "p2p failed",
519 [26] = "nameserver lookup failed",
520 [27] = "nameserver port failed",
521 [28] = "link up",
522 [29] = "link down",
523 [30] = "link up status read",
524 [31] = "open port failed",
525 [32] = "",
526 [33] = "close port",
527 [34] = "open unit failed",
528 [35] = "exclusive open unit failed",
529 [36] = "shared open unit failed",
530 [37] = "link down",
531 [38] = "link down status read no link",
532 [39] = "link down status read fdisc login",
533 [40] = "link down status read firmware update",
534 [41] = "link down status read unknown reason",
535 [42] = "link down ecd incomplete",
536 [43] = "link down epd incomplete",
537 [44] = "sysfs adapter recovery",
538 [45] = "sysfs port recovery",
539 [46] = "sysfs unit recovery",
540 [47] = "port boxed abort",
541 [48] = "unit boxed abort",
542 [49] = "port boxed ct",
543 [50] = "port boxed close physical",
544 [51] = "port boxed open unit",
545 [52] = "port boxed close unit",
546 [53] = "port boxed fcp",
547 [54] = "unit boxed fcp",
548 [55] = "port access denied",
549 [56] = "",
550 [57] = "",
551 [58] = "",
552 [59] = "unit access denied",
553 [60] = "shared unit access denied open unit",
554 [61] = "",
555 [62] = "request timeout",
556 [63] = "adisc link test reject or timeout",
557 [64] = "adisc link test d_id changed",
558 [65] = "adisc link test failed",
559 [66] = "recovery out of memory",
560 [67] = "adapter recovery repeated after state change",
561 [68] = "port recovery repeated after state change",
562 [69] = "unit recovery repeated after state change",
563 [70] = "port recovery follow-up after successful adapter recovery",
564 [71] = "adapter recovery escalation after failed adapter recovery",
565 [72] = "port recovery follow-up after successful physical port "
566 "recovery",
567 [73] = "adapter recovery escalation after failed physical port "
568 "recovery",
569 [74] = "unit recovery follow-up after successful port recovery",
570 [75] = "physical port recovery escalation after failed port "
571 "recovery",
572 [76] = "port recovery escalation after failed unit recovery",
573 [77] = "",
574 [78] = "duplicate request id",
575 [79] = "link down",
576 [80] = "exclusive read-only unit access unsupported",
577 [81] = "shared read-write unit access unsupported",
578 [82] = "incoming rscn",
579 [83] = "incoming wwpn",
580 [84] = "wka port handle not valid close port",
581 [85] = "online",
582 [86] = "offline",
583 [87] = "ccw device gone",
584 [88] = "ccw device no path",
585 [89] = "ccw device operational",
586 [90] = "ccw device shutdown",
587 [91] = "sysfs port addition",
588 [92] = "sysfs port removal",
589 [93] = "sysfs adapter recovery",
590 [94] = "sysfs unit addition",
591 [95] = "sysfs unit removal",
592 [96] = "sysfs port recovery",
593 [97] = "sysfs unit recovery",
594 [98] = "sequence number mismatch",
595 [99] = "link up",
596 [100] = "error state",
597 [101] = "status read physical port closed",
598 [102] = "link up status read",
599 [103] = "too many failed status read buffers",
600 [104] = "port handle not valid abort",
601 [105] = "lun handle not valid abort",
602 [106] = "port handle not valid ct",
603 [107] = "port handle not valid close port",
604 [108] = "port handle not valid close physical port",
605 [109] = "port handle not valid open unit",
606 [110] = "port handle not valid close unit",
607 [111] = "lun handle not valid close unit",
608 [112] = "port handle not valid fcp",
609 [113] = "lun handle not valid fcp",
610 [114] = "handle mismatch fcp",
611 [115] = "lun not valid fcp",
612 [116] = "qdio send failed",
613 [117] = "version mismatch",
614 [118] = "incompatible qtcb type",
615 [119] = "unknown protocol status",
616 [120] = "unknown fsf command",
617 [121] = "no recommendation for status qualifier",
618 [122] = "status read physical port closed in error",
619 [123] = "fc service class not supported",
620 [124] = "",
621 [125] = "need newer zfcp",
622 [126] = "need newer microcode",
623 [127] = "arbitrated loop not supported",
624 [128] = "",
625 [129] = "qtcb size mismatch",
626 [130] = "unknown fsf status ecd",
627 [131] = "fcp request too big",
628 [132] = "",
629 [133] = "data direction not valid fcp",
630 [134] = "command length not valid fcp",
631 [135] = "status read act update",
632 [136] = "status read cfdc update",
633 [137] = "hbaapi port open",
634 [138] = "hbaapi unit open",
635 [139] = "hbaapi unit shutdown",
636 [140] = "qdio error outbound",
637 [141] = "scsi host reset",
638 [142] = "dismissing fsf request for recovery action",
639 [143] = "recovery action timed out",
640 [144] = "recovery action gone",
641 [145] = "recovery action being processed",
642 [146] = "recovery action ready for next step",
643 [147] = "qdio error inbound",
644 [148] = "nameserver needed for port scan",
645 [149] = "port scan",
646 [150] = "ptp attach",
647 [151] = "port validation failed",
648};
649
650static int zfcp_rec_dbf_view_format(debug_info_t *id, struct debug_view *view, 493static int zfcp_rec_dbf_view_format(debug_info_t *id, struct debug_view *view,
651 char *buf, const char *_rec) 494 char *buf, const char *_rec)
652{ 495{
653 struct zfcp_rec_dbf_record *r = (struct zfcp_rec_dbf_record *)_rec; 496 struct zfcp_rec_dbf_record *r = (struct zfcp_rec_dbf_record *)_rec;
654 char *p = buf; 497 char *p = buf;
498 char hint[ZFCP_DBF_ID_SIZE + 1];
655 499
500 memcpy(hint, r->id2, ZFCP_DBF_ID_SIZE);
501 hint[ZFCP_DBF_ID_SIZE] = 0;
656 zfcp_dbf_outs(&p, "tag", zfcp_rec_dbf_tags[r->id]); 502 zfcp_dbf_outs(&p, "tag", zfcp_rec_dbf_tags[r->id]);
657 zfcp_dbf_outs(&p, "hint", zfcp_rec_dbf_ids[r->id2]); 503 zfcp_dbf_outs(&p, "hint", hint);
658 zfcp_dbf_out(&p, "id", "%d", r->id2);
659 switch (r->id) { 504 switch (r->id) {
660 case ZFCP_REC_DBF_ID_THREAD: 505 case ZFCP_REC_DBF_ID_THREAD:
661 zfcp_dbf_out(&p, "total", "%d", r->u.thread.total); 506 zfcp_dbf_out(&p, "total", "%d", r->u.thread.total);
@@ -707,7 +552,7 @@ static struct debug_view zfcp_rec_dbf_view = {
707 * @adapter: adapter 552 * @adapter: adapter
708 * This function assumes that the caller is holding erp_lock. 553 * This function assumes that the caller is holding erp_lock.
709 */ 554 */
710void zfcp_rec_dbf_event_thread(u8 id2, struct zfcp_adapter *adapter) 555void zfcp_rec_dbf_event_thread(char *id2, struct zfcp_adapter *adapter)
711{ 556{
712 struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf; 557 struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf;
713 unsigned long flags = 0; 558 unsigned long flags = 0;
@@ -723,7 +568,7 @@ void zfcp_rec_dbf_event_thread(u8 id2, struct zfcp_adapter *adapter)
723 spin_lock_irqsave(&adapter->rec_dbf_lock, flags); 568 spin_lock_irqsave(&adapter->rec_dbf_lock, flags);
724 memset(r, 0, sizeof(*r)); 569 memset(r, 0, sizeof(*r));
725 r->id = ZFCP_REC_DBF_ID_THREAD; 570 r->id = ZFCP_REC_DBF_ID_THREAD;
726 r->id2 = id2; 571 memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE);
727 r->u.thread.total = total; 572 r->u.thread.total = total;
728 r->u.thread.ready = ready; 573 r->u.thread.ready = ready;
729 r->u.thread.running = running; 574 r->u.thread.running = running;
@@ -737,7 +582,7 @@ void zfcp_rec_dbf_event_thread(u8 id2, struct zfcp_adapter *adapter)
737 * @adapter: adapter 582 * @adapter: adapter
738 * This function assumes that the caller does not hold erp_lock. 583 * This function assumes that the caller does not hold erp_lock.
739 */ 584 */
740void zfcp_rec_dbf_event_thread_lock(u8 id2, struct zfcp_adapter *adapter) 585void zfcp_rec_dbf_event_thread_lock(char *id2, struct zfcp_adapter *adapter)
741{ 586{
742 unsigned long flags; 587 unsigned long flags;
743 588
@@ -746,7 +591,7 @@ void zfcp_rec_dbf_event_thread_lock(u8 id2, struct zfcp_adapter *adapter)
746 read_unlock_irqrestore(&adapter->erp_lock, flags); 591 read_unlock_irqrestore(&adapter->erp_lock, flags);
747} 592}
748 593
749static void zfcp_rec_dbf_event_target(u8 id2, void *ref, 594static void zfcp_rec_dbf_event_target(char *id2, void *ref,
750 struct zfcp_adapter *adapter, 595 struct zfcp_adapter *adapter,
751 atomic_t *status, atomic_t *erp_count, 596 atomic_t *status, atomic_t *erp_count,
752 u64 wwpn, u32 d_id, u64 fcp_lun) 597 u64 wwpn, u32 d_id, u64 fcp_lun)
@@ -757,7 +602,7 @@ static void zfcp_rec_dbf_event_target(u8 id2, void *ref,
757 spin_lock_irqsave(&adapter->rec_dbf_lock, flags); 602 spin_lock_irqsave(&adapter->rec_dbf_lock, flags);
758 memset(r, 0, sizeof(*r)); 603 memset(r, 0, sizeof(*r));
759 r->id = ZFCP_REC_DBF_ID_TARGET; 604 r->id = ZFCP_REC_DBF_ID_TARGET;
760 r->id2 = id2; 605 memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE);
761 r->u.target.ref = (unsigned long)ref; 606 r->u.target.ref = (unsigned long)ref;
762 r->u.target.status = atomic_read(status); 607 r->u.target.status = atomic_read(status);
763 r->u.target.wwpn = wwpn; 608 r->u.target.wwpn = wwpn;
@@ -774,7 +619,8 @@ static void zfcp_rec_dbf_event_target(u8 id2, void *ref,
774 * @ref: additional reference (e.g. request) 619 * @ref: additional reference (e.g. request)
775 * @adapter: adapter 620 * @adapter: adapter
776 */ 621 */
777void zfcp_rec_dbf_event_adapter(u8 id, void *ref, struct zfcp_adapter *adapter) 622void zfcp_rec_dbf_event_adapter(char *id, void *ref,
623 struct zfcp_adapter *adapter)
778{ 624{
779 zfcp_rec_dbf_event_target(id, ref, adapter, &adapter->status, 625 zfcp_rec_dbf_event_target(id, ref, adapter, &adapter->status,
780 &adapter->erp_counter, 0, 0, 0); 626 &adapter->erp_counter, 0, 0, 0);
@@ -786,7 +632,7 @@ void zfcp_rec_dbf_event_adapter(u8 id, void *ref, struct zfcp_adapter *adapter)
786 * @ref: additional reference (e.g. request) 632 * @ref: additional reference (e.g. request)
787 * @port: port 633 * @port: port
788 */ 634 */
789void zfcp_rec_dbf_event_port(u8 id, void *ref, struct zfcp_port *port) 635void zfcp_rec_dbf_event_port(char *id, void *ref, struct zfcp_port *port)
790{ 636{
791 struct zfcp_adapter *adapter = port->adapter; 637 struct zfcp_adapter *adapter = port->adapter;
792 638
@@ -801,7 +647,7 @@ void zfcp_rec_dbf_event_port(u8 id, void *ref, struct zfcp_port *port)
801 * @ref: additional reference (e.g. request) 647 * @ref: additional reference (e.g. request)
802 * @unit: unit 648 * @unit: unit
803 */ 649 */
804void zfcp_rec_dbf_event_unit(u8 id, void *ref, struct zfcp_unit *unit) 650void zfcp_rec_dbf_event_unit(char *id, void *ref, struct zfcp_unit *unit)
805{ 651{
806 struct zfcp_port *port = unit->port; 652 struct zfcp_port *port = unit->port;
807 struct zfcp_adapter *adapter = port->adapter; 653 struct zfcp_adapter *adapter = port->adapter;
@@ -822,7 +668,7 @@ void zfcp_rec_dbf_event_unit(u8 id, void *ref, struct zfcp_unit *unit)
822 * @port: port 668 * @port: port
823 * @unit: unit 669 * @unit: unit
824 */ 670 */
825void zfcp_rec_dbf_event_trigger(u8 id2, void *ref, u8 want, u8 need, 671void zfcp_rec_dbf_event_trigger(char *id2, void *ref, u8 want, u8 need,
826 void *action, struct zfcp_adapter *adapter, 672 void *action, struct zfcp_adapter *adapter,
827 struct zfcp_port *port, struct zfcp_unit *unit) 673 struct zfcp_port *port, struct zfcp_unit *unit)
828{ 674{
@@ -832,7 +678,7 @@ void zfcp_rec_dbf_event_trigger(u8 id2, void *ref, u8 want, u8 need,
832 spin_lock_irqsave(&adapter->rec_dbf_lock, flags); 678 spin_lock_irqsave(&adapter->rec_dbf_lock, flags);
833 memset(r, 0, sizeof(*r)); 679 memset(r, 0, sizeof(*r));
834 r->id = ZFCP_REC_DBF_ID_TRIGGER; 680 r->id = ZFCP_REC_DBF_ID_TRIGGER;
835 r->id2 = id2; 681 memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE);
836 r->u.trigger.ref = (unsigned long)ref; 682 r->u.trigger.ref = (unsigned long)ref;
837 r->u.trigger.want = want; 683 r->u.trigger.want = want;
838 r->u.trigger.need = need; 684 r->u.trigger.need = need;
@@ -855,7 +701,7 @@ void zfcp_rec_dbf_event_trigger(u8 id2, void *ref, u8 want, u8 need,
855 * @id2: identifier 701 * @id2: identifier
856 * @erp_action: error recovery action struct pointer 702 * @erp_action: error recovery action struct pointer
857 */ 703 */
858void zfcp_rec_dbf_event_action(u8 id2, struct zfcp_erp_action *erp_action) 704void zfcp_rec_dbf_event_action(char *id2, struct zfcp_erp_action *erp_action)
859{ 705{
860 struct zfcp_adapter *adapter = erp_action->adapter; 706 struct zfcp_adapter *adapter = erp_action->adapter;
861 struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf; 707 struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf;
@@ -864,7 +710,7 @@ void zfcp_rec_dbf_event_action(u8 id2, struct zfcp_erp_action *erp_action)
864 spin_lock_irqsave(&adapter->rec_dbf_lock, flags); 710 spin_lock_irqsave(&adapter->rec_dbf_lock, flags);
865 memset(r, 0, sizeof(*r)); 711 memset(r, 0, sizeof(*r));
866 r->id = ZFCP_REC_DBF_ID_ACTION; 712 r->id = ZFCP_REC_DBF_ID_ACTION;
867 r->id2 = id2; 713 memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE);
868 r->u.action.action = (unsigned long)erp_action; 714 r->u.action.action = (unsigned long)erp_action;
869 r->u.action.status = erp_action->status; 715 r->u.action.status = erp_action->status;
870 r->u.action.step = erp_action->step; 716 r->u.action.step = erp_action->step;
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 74998ff88e57..a573f7344dd6 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -25,6 +25,7 @@
25#include "zfcp_fsf.h" 25#include "zfcp_fsf.h"
26 26
27#define ZFCP_DBF_TAG_SIZE 4 27#define ZFCP_DBF_TAG_SIZE 4
28#define ZFCP_DBF_ID_SIZE 7
28 29
29struct zfcp_dbf_dump { 30struct zfcp_dbf_dump {
30 u8 tag[ZFCP_DBF_TAG_SIZE]; 31 u8 tag[ZFCP_DBF_TAG_SIZE];
@@ -70,7 +71,7 @@ struct zfcp_rec_dbf_record_action {
70 71
71struct zfcp_rec_dbf_record { 72struct zfcp_rec_dbf_record {
72 u8 id; 73 u8 id;
73 u8 id2; 74 char id2[7];
74 union { 75 union {
75 struct zfcp_rec_dbf_record_action action; 76 struct zfcp_rec_dbf_record_action action;
76 struct zfcp_rec_dbf_record_thread thread; 77 struct zfcp_rec_dbf_record_thread thread;
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 510662783a6f..a0318630f047 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Global definitions for the zfcp device driver. 4 * Global definitions for the zfcp device driver.
5 * 5 *
6 * Copyright IBM Corporation 2002, 2008 6 * Copyright IBM Corporation 2002, 2009
7 */ 7 */
8 8
9#ifndef ZFCP_DEF_H 9#ifndef ZFCP_DEF_H
@@ -243,9 +243,6 @@ struct zfcp_ls_adisc {
243 243
244/* remote port status */ 244/* remote port status */
245#define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001 245#define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001
246#define ZFCP_STATUS_PORT_PHYS_CLOSING 0x00000004
247#define ZFCP_STATUS_PORT_NO_WWPN 0x00000008
248#define ZFCP_STATUS_PORT_INVALID_WWPN 0x00000020
249 246
250/* well known address (WKA) port status*/ 247/* well known address (WKA) port status*/
251enum zfcp_wka_status { 248enum zfcp_wka_status {
@@ -258,7 +255,6 @@ enum zfcp_wka_status {
258/* logical unit status */ 255/* logical unit status */
259#define ZFCP_STATUS_UNIT_SHARED 0x00000004 256#define ZFCP_STATUS_UNIT_SHARED 0x00000004
260#define ZFCP_STATUS_UNIT_READONLY 0x00000008 257#define ZFCP_STATUS_UNIT_READONLY 0x00000008
261#define ZFCP_STATUS_UNIT_REGISTERED 0x00000010
262#define ZFCP_STATUS_UNIT_SCSI_WORK_PENDING 0x00000020 258#define ZFCP_STATUS_UNIT_SCSI_WORK_PENDING 0x00000020
263 259
264/* FSF request status (this does not have a common part) */ 260/* FSF request status (this does not have a common part) */
@@ -447,8 +443,9 @@ struct zfcp_adapter {
447 spinlock_t req_list_lock; /* request list lock */ 443 spinlock_t req_list_lock; /* request list lock */
448 struct zfcp_qdio_queue req_q; /* request queue */ 444 struct zfcp_qdio_queue req_q; /* request queue */
449 spinlock_t req_q_lock; /* for operations on queue */ 445 spinlock_t req_q_lock; /* for operations on queue */
450 int req_q_pci_batch; /* SBALs since PCI indication 446 ktime_t req_q_time; /* time of last fill level change */
451 was last set */ 447 u64 req_q_util; /* for accounting */
448 spinlock_t qdio_stat_lock;
452 u32 fsf_req_seq_no; /* FSF cmnd seq number */ 449 u32 fsf_req_seq_no; /* FSF cmnd seq number */
453 wait_queue_head_t request_wq; /* can be used to wait for 450 wait_queue_head_t request_wq; /* can be used to wait for
454 more avaliable SBALs */ 451 more avaliable SBALs */
@@ -514,6 +511,9 @@ struct zfcp_port {
514 u32 maxframe_size; 511 u32 maxframe_size;
515 u32 supported_classes; 512 u32 supported_classes;
516 struct work_struct gid_pn_work; 513 struct work_struct gid_pn_work;
514 struct work_struct test_link_work;
515 struct work_struct rport_work;
516 enum { RPORT_NONE, RPORT_ADD, RPORT_DEL } rport_task;
517}; 517};
518 518
519struct zfcp_unit { 519struct zfcp_unit {
@@ -587,9 +587,6 @@ struct zfcp_fsf_req_qtcb {
587 587
588/********************** ZFCP SPECIFIC DEFINES ********************************/ 588/********************** ZFCP SPECIFIC DEFINES ********************************/
589 589
590#define ZFCP_REQ_AUTO_CLEANUP 0x00000002
591#define ZFCP_REQ_NO_QTCB 0x00000008
592
593#define ZFCP_SET 0x00000100 590#define ZFCP_SET 0x00000100
594#define ZFCP_CLEAR 0x00000200 591#define ZFCP_CLEAR 0x00000200
595 592
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 387a3af528ac..631bdb1dfd6c 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Error Recovery Procedures (ERP). 4 * Error Recovery Procedures (ERP).
5 * 5 *
6 * Copyright IBM Corporation 2002, 2008 6 * Copyright IBM Corporation 2002, 2009
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
@@ -55,7 +55,7 @@ enum zfcp_erp_act_result {
55 55
56static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int mask) 56static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int mask)
57{ 57{
58 zfcp_erp_modify_adapter_status(adapter, 15, NULL, 58 zfcp_erp_modify_adapter_status(adapter, "erablk1", NULL,
59 ZFCP_STATUS_COMMON_UNBLOCKED | mask, 59 ZFCP_STATUS_COMMON_UNBLOCKED | mask,
60 ZFCP_CLEAR); 60 ZFCP_CLEAR);
61} 61}
@@ -75,9 +75,9 @@ static void zfcp_erp_action_ready(struct zfcp_erp_action *act)
75 struct zfcp_adapter *adapter = act->adapter; 75 struct zfcp_adapter *adapter = act->adapter;
76 76
77 list_move(&act->list, &act->adapter->erp_ready_head); 77 list_move(&act->list, &act->adapter->erp_ready_head);
78 zfcp_rec_dbf_event_action(146, act); 78 zfcp_rec_dbf_event_action("erardy1", act);
79 up(&adapter->erp_ready_sem); 79 up(&adapter->erp_ready_sem);
80 zfcp_rec_dbf_event_thread(2, adapter); 80 zfcp_rec_dbf_event_thread("erardy2", adapter);
81} 81}
82 82
83static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act) 83static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act)
@@ -208,7 +208,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
208 208
209static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, 209static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
210 struct zfcp_port *port, 210 struct zfcp_port *port,
211 struct zfcp_unit *unit, u8 id, void *ref) 211 struct zfcp_unit *unit, char *id, void *ref)
212{ 212{
213 int retval = 1, need; 213 int retval = 1, need;
214 struct zfcp_erp_action *act = NULL; 214 struct zfcp_erp_action *act = NULL;
@@ -228,7 +228,7 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
228 ++adapter->erp_total_count; 228 ++adapter->erp_total_count;
229 list_add_tail(&act->list, &adapter->erp_ready_head); 229 list_add_tail(&act->list, &adapter->erp_ready_head);
230 up(&adapter->erp_ready_sem); 230 up(&adapter->erp_ready_sem);
231 zfcp_rec_dbf_event_thread(1, adapter); 231 zfcp_rec_dbf_event_thread("eracte1", adapter);
232 retval = 0; 232 retval = 0;
233 out: 233 out:
234 zfcp_rec_dbf_event_trigger(id, ref, want, need, act, 234 zfcp_rec_dbf_event_trigger(id, ref, want, need, act,
@@ -237,13 +237,14 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
237} 237}
238 238
239static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, 239static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
240 int clear_mask, u8 id, void *ref) 240 int clear_mask, char *id, void *ref)
241{ 241{
242 zfcp_erp_adapter_block(adapter, clear_mask); 242 zfcp_erp_adapter_block(adapter, clear_mask);
243 zfcp_scsi_schedule_rports_block(adapter);
243 244
244 /* ensure propagation of failed status to new devices */ 245 /* ensure propagation of failed status to new devices */
245 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { 246 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
246 zfcp_erp_adapter_failed(adapter, 13, NULL); 247 zfcp_erp_adapter_failed(adapter, "erareo1", NULL);
247 return -EIO; 248 return -EIO;
248 } 249 }
249 return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, 250 return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
@@ -258,7 +259,7 @@ static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
258 * @ref: Reference for debug trace event. 259 * @ref: Reference for debug trace event.
259 */ 260 */
260void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, 261void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear,
261 u8 id, void *ref) 262 char *id, void *ref)
262{ 263{
263 unsigned long flags; 264 unsigned long flags;
264 265
@@ -277,7 +278,7 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear,
277 * @ref: Reference for debug trace event. 278 * @ref: Reference for debug trace event.
278 */ 279 */
279void zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear, 280void zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear,
280 u8 id, void *ref) 281 char *id, void *ref)
281{ 282{
282 int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; 283 int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
283 zfcp_erp_adapter_reopen(adapter, clear | flags, id, ref); 284 zfcp_erp_adapter_reopen(adapter, clear | flags, id, ref);
@@ -290,7 +291,8 @@ void zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear,
290 * @id: Id for debug trace event. 291 * @id: Id for debug trace event.
291 * @ref: Reference for debug trace event. 292 * @ref: Reference for debug trace event.
292 */ 293 */
293void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, u8 id, void *ref) 294void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *id,
295 void *ref)
294{ 296{
295 int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; 297 int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
296 zfcp_erp_port_reopen(port, clear | flags, id, ref); 298 zfcp_erp_port_reopen(port, clear | flags, id, ref);
@@ -303,7 +305,8 @@ void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, u8 id, void *ref)
303 * @id: Id for debug trace event. 305 * @id: Id for debug trace event.
304 * @ref: Reference for debug trace event. 306 * @ref: Reference for debug trace event.
305 */ 307 */
306void zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear, u8 id, void *ref) 308void zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear, char *id,
309 void *ref)
307{ 310{
308 int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; 311 int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
309 zfcp_erp_unit_reopen(unit, clear | flags, id, ref); 312 zfcp_erp_unit_reopen(unit, clear | flags, id, ref);
@@ -311,15 +314,16 @@ void zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear, u8 id, void *ref)
311 314
312static void zfcp_erp_port_block(struct zfcp_port *port, int clear) 315static void zfcp_erp_port_block(struct zfcp_port *port, int clear)
313{ 316{
314 zfcp_erp_modify_port_status(port, 17, NULL, 317 zfcp_erp_modify_port_status(port, "erpblk1", NULL,
315 ZFCP_STATUS_COMMON_UNBLOCKED | clear, 318 ZFCP_STATUS_COMMON_UNBLOCKED | clear,
316 ZFCP_CLEAR); 319 ZFCP_CLEAR);
317} 320}
318 321
319static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, 322static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port,
320 int clear, u8 id, void *ref) 323 int clear, char *id, void *ref)
321{ 324{
322 zfcp_erp_port_block(port, clear); 325 zfcp_erp_port_block(port, clear);
326 zfcp_scsi_schedule_rport_block(port);
323 327
324 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) 328 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
325 return; 329 return;
@@ -334,7 +338,7 @@ static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port,
334 * @id: Id for debug trace event. 338 * @id: Id for debug trace event.
335 * @ref: Reference for debug trace event. 339 * @ref: Reference for debug trace event.
336 */ 340 */
337void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, u8 id, 341void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, char *id,
338 void *ref) 342 void *ref)
339{ 343{
340 unsigned long flags; 344 unsigned long flags;
@@ -347,14 +351,15 @@ void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, u8 id,
347 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 351 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
348} 352}
349 353
350static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, u8 id, 354static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id,
351 void *ref) 355 void *ref)
352{ 356{
353 zfcp_erp_port_block(port, clear); 357 zfcp_erp_port_block(port, clear);
358 zfcp_scsi_schedule_rport_block(port);
354 359
355 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { 360 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
356 /* ensure propagation of failed status to new devices */ 361 /* ensure propagation of failed status to new devices */
357 zfcp_erp_port_failed(port, 14, NULL); 362 zfcp_erp_port_failed(port, "erpreo1", NULL);
358 return -EIO; 363 return -EIO;
359 } 364 }
360 365
@@ -369,7 +374,7 @@ static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, u8 id,
369 * 374 *
370 * Returns 0 if recovery has been triggered, < 0 if not. 375 * Returns 0 if recovery has been triggered, < 0 if not.
371 */ 376 */
372int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, u8 id, void *ref) 377int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, void *ref)
373{ 378{
374 unsigned long flags; 379 unsigned long flags;
375 int retval; 380 int retval;
@@ -386,12 +391,12 @@ int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, u8 id, void *ref)
386 391
387static void zfcp_erp_unit_block(struct zfcp_unit *unit, int clear_mask) 392static void zfcp_erp_unit_block(struct zfcp_unit *unit, int clear_mask)
388{ 393{
389 zfcp_erp_modify_unit_status(unit, 19, NULL, 394 zfcp_erp_modify_unit_status(unit, "erublk1", NULL,
390 ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask, 395 ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask,
391 ZFCP_CLEAR); 396 ZFCP_CLEAR);
392} 397}
393 398
394static void _zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, u8 id, 399static void _zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, char *id,
395 void *ref) 400 void *ref)
396{ 401{
397 struct zfcp_adapter *adapter = unit->port->adapter; 402 struct zfcp_adapter *adapter = unit->port->adapter;
@@ -411,7 +416,8 @@ static void _zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, u8 id,
411 * @clear_mask: specifies flags in unit status to be cleared 416 * @clear_mask: specifies flags in unit status to be cleared
412 * Return: 0 on success, < 0 on error 417 * Return: 0 on success, < 0 on error
413 */ 418 */
414void zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, u8 id, void *ref) 419void zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, char *id,
420 void *ref)
415{ 421{
416 unsigned long flags; 422 unsigned long flags;
417 struct zfcp_port *port = unit->port; 423 struct zfcp_port *port = unit->port;
@@ -437,28 +443,28 @@ static int status_change_clear(unsigned long mask, atomic_t *status)
437static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) 443static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
438{ 444{
439 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status)) 445 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status))
440 zfcp_rec_dbf_event_adapter(16, NULL, adapter); 446 zfcp_rec_dbf_event_adapter("eraubl1", NULL, adapter);
441 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status); 447 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
442} 448}
443 449
444static void zfcp_erp_port_unblock(struct zfcp_port *port) 450static void zfcp_erp_port_unblock(struct zfcp_port *port)
445{ 451{
446 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status)) 452 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status))
447 zfcp_rec_dbf_event_port(18, NULL, port); 453 zfcp_rec_dbf_event_port("erpubl1", NULL, port);
448 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status); 454 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
449} 455}
450 456
451static void zfcp_erp_unit_unblock(struct zfcp_unit *unit) 457static void zfcp_erp_unit_unblock(struct zfcp_unit *unit)
452{ 458{
453 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status)) 459 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status))
454 zfcp_rec_dbf_event_unit(20, NULL, unit); 460 zfcp_rec_dbf_event_unit("eruubl1", NULL, unit);
455 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status); 461 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status);
456} 462}
457 463
458static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action) 464static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
459{ 465{
460 list_move(&erp_action->list, &erp_action->adapter->erp_running_head); 466 list_move(&erp_action->list, &erp_action->adapter->erp_running_head);
461 zfcp_rec_dbf_event_action(145, erp_action); 467 zfcp_rec_dbf_event_action("erator1", erp_action);
462} 468}
463 469
464static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act) 470static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
@@ -474,11 +480,11 @@ static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
474 if (act->status & (ZFCP_STATUS_ERP_DISMISSED | 480 if (act->status & (ZFCP_STATUS_ERP_DISMISSED |
475 ZFCP_STATUS_ERP_TIMEDOUT)) { 481 ZFCP_STATUS_ERP_TIMEDOUT)) {
476 act->fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; 482 act->fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
477 zfcp_rec_dbf_event_action(142, act); 483 zfcp_rec_dbf_event_action("erscf_1", act);
478 act->fsf_req->erp_action = NULL; 484 act->fsf_req->erp_action = NULL;
479 } 485 }
480 if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) 486 if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
481 zfcp_rec_dbf_event_action(143, act); 487 zfcp_rec_dbf_event_action("erscf_2", act);
482 if (act->fsf_req->status & (ZFCP_STATUS_FSFREQ_COMPLETED | 488 if (act->fsf_req->status & (ZFCP_STATUS_FSFREQ_COMPLETED |
483 ZFCP_STATUS_FSFREQ_DISMISSED)) 489 ZFCP_STATUS_FSFREQ_DISMISSED))
484 act->fsf_req = NULL; 490 act->fsf_req = NULL;
@@ -530,7 +536,7 @@ static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
530} 536}
531 537
532static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter, 538static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
533 int clear, u8 id, void *ref) 539 int clear, char *id, void *ref)
534{ 540{
535 struct zfcp_port *port; 541 struct zfcp_port *port;
536 542
@@ -538,8 +544,8 @@ static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
538 _zfcp_erp_port_reopen(port, clear, id, ref); 544 _zfcp_erp_port_reopen(port, clear, id, ref);
539} 545}
540 546
541static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear, u8 id, 547static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear,
542 void *ref) 548 char *id, void *ref)
543{ 549{
544 struct zfcp_unit *unit; 550 struct zfcp_unit *unit;
545 551
@@ -559,28 +565,28 @@ static void zfcp_erp_strategy_followup_actions(struct zfcp_erp_action *act)
559 565
560 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 566 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
561 if (status == ZFCP_ERP_SUCCEEDED) 567 if (status == ZFCP_ERP_SUCCEEDED)
562 _zfcp_erp_port_reopen_all(adapter, 0, 70, NULL); 568 _zfcp_erp_port_reopen_all(adapter, 0, "ersfa_1", NULL);
563 else 569 else
564 _zfcp_erp_adapter_reopen(adapter, 0, 71, NULL); 570 _zfcp_erp_adapter_reopen(adapter, 0, "ersfa_2", NULL);
565 break; 571 break;
566 572
567 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 573 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
568 if (status == ZFCP_ERP_SUCCEEDED) 574 if (status == ZFCP_ERP_SUCCEEDED)
569 _zfcp_erp_port_reopen(port, 0, 72, NULL); 575 _zfcp_erp_port_reopen(port, 0, "ersfa_3", NULL);
570 else 576 else
571 _zfcp_erp_adapter_reopen(adapter, 0, 73, NULL); 577 _zfcp_erp_adapter_reopen(adapter, 0, "ersfa_4", NULL);
572 break; 578 break;
573 579
574 case ZFCP_ERP_ACTION_REOPEN_PORT: 580 case ZFCP_ERP_ACTION_REOPEN_PORT:
575 if (status == ZFCP_ERP_SUCCEEDED) 581 if (status == ZFCP_ERP_SUCCEEDED)
576 _zfcp_erp_unit_reopen_all(port, 0, 74, NULL); 582 _zfcp_erp_unit_reopen_all(port, 0, "ersfa_5", NULL);
577 else 583 else
578 _zfcp_erp_port_forced_reopen(port, 0, 75, NULL); 584 _zfcp_erp_port_forced_reopen(port, 0, "ersfa_6", NULL);
579 break; 585 break;
580 586
581 case ZFCP_ERP_ACTION_REOPEN_UNIT: 587 case ZFCP_ERP_ACTION_REOPEN_UNIT:
582 if (status != ZFCP_ERP_SUCCEEDED) 588 if (status != ZFCP_ERP_SUCCEEDED)
583 _zfcp_erp_port_reopen(unit->port, 0, 76, NULL); 589 _zfcp_erp_port_reopen(unit->port, 0, "ersfa_7", NULL);
584 break; 590 break;
585 } 591 }
586} 592}
@@ -617,7 +623,7 @@ static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter)
617 adapter->peer_d_id); 623 adapter->peer_d_id);
618 if (IS_ERR(port)) /* error or port already attached */ 624 if (IS_ERR(port)) /* error or port already attached */
619 return; 625 return;
620 _zfcp_erp_port_reopen(port, 0, 150, NULL); 626 _zfcp_erp_port_reopen(port, 0, "ereptp1", NULL);
621} 627}
622 628
623static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action) 629static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
@@ -640,9 +646,9 @@ static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
640 return ZFCP_ERP_FAILED; 646 return ZFCP_ERP_FAILED;
641 } 647 }
642 648
643 zfcp_rec_dbf_event_thread_lock(6, adapter); 649 zfcp_rec_dbf_event_thread_lock("erasfx1", adapter);
644 down(&adapter->erp_ready_sem); 650 down(&adapter->erp_ready_sem);
645 zfcp_rec_dbf_event_thread_lock(7, adapter); 651 zfcp_rec_dbf_event_thread_lock("erasfx2", adapter);
646 if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) 652 if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT)
647 break; 653 break;
648 654
@@ -681,9 +687,9 @@ static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *act)
681 if (ret) 687 if (ret)
682 return ZFCP_ERP_FAILED; 688 return ZFCP_ERP_FAILED;
683 689
684 zfcp_rec_dbf_event_thread_lock(8, adapter); 690 zfcp_rec_dbf_event_thread_lock("erasox1", adapter);
685 down(&adapter->erp_ready_sem); 691 down(&adapter->erp_ready_sem);
686 zfcp_rec_dbf_event_thread_lock(9, adapter); 692 zfcp_rec_dbf_event_thread_lock("erasox2", adapter);
687 if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) 693 if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
688 return ZFCP_ERP_FAILED; 694 return ZFCP_ERP_FAILED;
689 695
@@ -705,60 +711,59 @@ static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act)
705 return ZFCP_ERP_SUCCEEDED; 711 return ZFCP_ERP_SUCCEEDED;
706} 712}
707 713
708static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *act, 714static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act)
709 int close)
710{ 715{
711 int retval = ZFCP_ERP_SUCCEEDED;
712 struct zfcp_adapter *adapter = act->adapter; 716 struct zfcp_adapter *adapter = act->adapter;
713 717
714 if (close)
715 goto close_only;
716
717 retval = zfcp_erp_adapter_strategy_open_qdio(act);
718 if (retval != ZFCP_ERP_SUCCEEDED)
719 goto failed_qdio;
720
721 retval = zfcp_erp_adapter_strategy_open_fsf(act);
722 if (retval != ZFCP_ERP_SUCCEEDED)
723 goto failed_openfcp;
724
725 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &act->adapter->status);
726
727 return ZFCP_ERP_SUCCEEDED;
728
729 close_only:
730 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
731 &act->adapter->status);
732
733 failed_openfcp:
734 /* close queues to ensure that buffers are not accessed by adapter */ 718 /* close queues to ensure that buffers are not accessed by adapter */
735 zfcp_qdio_close(adapter); 719 zfcp_qdio_close(adapter);
736 zfcp_fsf_req_dismiss_all(adapter); 720 zfcp_fsf_req_dismiss_all(adapter);
737 adapter->fsf_req_seq_no = 0; 721 adapter->fsf_req_seq_no = 0;
738 /* all ports and units are closed */ 722 /* all ports and units are closed */
739 zfcp_erp_modify_adapter_status(adapter, 24, NULL, 723 zfcp_erp_modify_adapter_status(adapter, "erascl1", NULL,
740 ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR); 724 ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
741 failed_qdio: 725
742 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK | 726 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
743 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, 727 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
744 &act->adapter->status);
745 return retval;
746} 728}
747 729
748static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *act) 730static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *act)
749{ 731{
750 int retval; 732 struct zfcp_adapter *adapter = act->adapter;
751 733
752 zfcp_erp_adapter_strategy_generic(act, 1); /* close */ 734 if (zfcp_erp_adapter_strategy_open_qdio(act)) {
753 if (act->status & ZFCP_STATUS_ERP_CLOSE_ONLY) 735 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
754 return ZFCP_ERP_EXIT; 736 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
737 &adapter->status);
738 return ZFCP_ERP_FAILED;
739 }
740
741 if (zfcp_erp_adapter_strategy_open_fsf(act)) {
742 zfcp_erp_adapter_strategy_close(act);
743 return ZFCP_ERP_FAILED;
744 }
745
746 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &adapter->status);
747
748 return ZFCP_ERP_SUCCEEDED;
749}
755 750
756 retval = zfcp_erp_adapter_strategy_generic(act, 0); /* open */ 751static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *act)
752{
753 struct zfcp_adapter *adapter = act->adapter;
757 754
758 if (retval == ZFCP_ERP_FAILED) 755 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN) {
756 zfcp_erp_adapter_strategy_close(act);
757 if (act->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
758 return ZFCP_ERP_EXIT;
759 }
760
761 if (zfcp_erp_adapter_strategy_open(act)) {
759 ssleep(8); 762 ssleep(8);
763 return ZFCP_ERP_FAILED;
764 }
760 765
761 return retval; 766 return ZFCP_ERP_SUCCEEDED;
762} 767}
763 768
764static int zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *act) 769static int zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *act)
@@ -777,10 +782,7 @@ static int zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *act)
777 782
778static void zfcp_erp_port_strategy_clearstati(struct zfcp_port *port) 783static void zfcp_erp_port_strategy_clearstati(struct zfcp_port *port)
779{ 784{
780 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | 785 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED, &port->status);
781 ZFCP_STATUS_PORT_PHYS_CLOSING |
782 ZFCP_STATUS_PORT_INVALID_WWPN,
783 &port->status);
784} 786}
785 787
786static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action) 788static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action)
@@ -836,7 +838,7 @@ static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act)
836 struct zfcp_port *port = act->port; 838 struct zfcp_port *port = act->port;
837 839
838 if (port->wwpn != adapter->peer_wwpn) { 840 if (port->wwpn != adapter->peer_wwpn) {
839 zfcp_erp_port_failed(port, 25, NULL); 841 zfcp_erp_port_failed(port, "eroptp1", NULL);
840 return ZFCP_ERP_FAILED; 842 return ZFCP_ERP_FAILED;
841 } 843 }
842 port->d_id = adapter->peer_d_id; 844 port->d_id = adapter->peer_d_id;
@@ -855,7 +857,7 @@ void zfcp_erp_port_strategy_open_lookup(struct work_struct *work)
855 port->erp_action.step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP; 857 port->erp_action.step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP;
856 if (retval) 858 if (retval)
857 zfcp_erp_notify(&port->erp_action, ZFCP_ERP_FAILED); 859 zfcp_erp_notify(&port->erp_action, ZFCP_ERP_FAILED);
858 860 zfcp_port_put(port);
859} 861}
860 862
861static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act) 863static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
@@ -871,17 +873,15 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
871 if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP) 873 if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP)
872 return zfcp_erp_open_ptp_port(act); 874 return zfcp_erp_open_ptp_port(act);
873 if (!port->d_id) { 875 if (!port->d_id) {
874 queue_work(zfcp_data.work_queue, &port->gid_pn_work); 876 zfcp_port_get(port);
877 if (!queue_work(zfcp_data.work_queue,
878 &port->gid_pn_work))
879 zfcp_port_put(port);
875 return ZFCP_ERP_CONTINUES; 880 return ZFCP_ERP_CONTINUES;
876 } 881 }
877 case ZFCP_ERP_STEP_NAMESERVER_LOOKUP: 882 case ZFCP_ERP_STEP_NAMESERVER_LOOKUP:
878 if (!port->d_id) { 883 if (!port->d_id)
879 if (p_status & (ZFCP_STATUS_PORT_INVALID_WWPN)) {
880 zfcp_erp_port_failed(port, 26, NULL);
881 return ZFCP_ERP_EXIT;
882 }
883 return ZFCP_ERP_FAILED; 884 return ZFCP_ERP_FAILED;
884 }
885 return zfcp_erp_port_strategy_open_port(act); 885 return zfcp_erp_port_strategy_open_port(act);
886 886
887 case ZFCP_ERP_STEP_PORT_OPENING: 887 case ZFCP_ERP_STEP_PORT_OPENING:
@@ -995,7 +995,7 @@ static int zfcp_erp_strategy_check_unit(struct zfcp_unit *unit, int result)
995 "port 0x%016Lx\n", 995 "port 0x%016Lx\n",
996 (unsigned long long)unit->fcp_lun, 996 (unsigned long long)unit->fcp_lun,
997 (unsigned long long)unit->port->wwpn); 997 (unsigned long long)unit->port->wwpn);
998 zfcp_erp_unit_failed(unit, 21, NULL); 998 zfcp_erp_unit_failed(unit, "erusck1", NULL);
999 } 999 }
1000 break; 1000 break;
1001 } 1001 }
@@ -1025,7 +1025,7 @@ static int zfcp_erp_strategy_check_port(struct zfcp_port *port, int result)
1025 dev_err(&port->adapter->ccw_device->dev, 1025 dev_err(&port->adapter->ccw_device->dev,
1026 "ERP failed for remote port 0x%016Lx\n", 1026 "ERP failed for remote port 0x%016Lx\n",
1027 (unsigned long long)port->wwpn); 1027 (unsigned long long)port->wwpn);
1028 zfcp_erp_port_failed(port, 22, NULL); 1028 zfcp_erp_port_failed(port, "erpsck1", NULL);
1029 } 1029 }
1030 break; 1030 break;
1031 } 1031 }
@@ -1052,7 +1052,7 @@ static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter,
1052 dev_err(&adapter->ccw_device->dev, 1052 dev_err(&adapter->ccw_device->dev,
1053 "ERP cannot recover an error " 1053 "ERP cannot recover an error "
1054 "on the FCP device\n"); 1054 "on the FCP device\n");
1055 zfcp_erp_adapter_failed(adapter, 23, NULL); 1055 zfcp_erp_adapter_failed(adapter, "erasck1", NULL);
1056 } 1056 }
1057 break; 1057 break;
1058 } 1058 }
@@ -1117,7 +1117,7 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
1117 if (zfcp_erp_strat_change_det(&adapter->status, erp_status)) { 1117 if (zfcp_erp_strat_change_det(&adapter->status, erp_status)) {
1118 _zfcp_erp_adapter_reopen(adapter, 1118 _zfcp_erp_adapter_reopen(adapter,
1119 ZFCP_STATUS_COMMON_ERP_FAILED, 1119 ZFCP_STATUS_COMMON_ERP_FAILED,
1120 67, NULL); 1120 "ersscg1", NULL);
1121 return ZFCP_ERP_EXIT; 1121 return ZFCP_ERP_EXIT;
1122 } 1122 }
1123 break; 1123 break;
@@ -1127,7 +1127,7 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
1127 if (zfcp_erp_strat_change_det(&port->status, erp_status)) { 1127 if (zfcp_erp_strat_change_det(&port->status, erp_status)) {
1128 _zfcp_erp_port_reopen(port, 1128 _zfcp_erp_port_reopen(port,
1129 ZFCP_STATUS_COMMON_ERP_FAILED, 1129 ZFCP_STATUS_COMMON_ERP_FAILED,
1130 68, NULL); 1130 "ersscg2", NULL);
1131 return ZFCP_ERP_EXIT; 1131 return ZFCP_ERP_EXIT;
1132 } 1132 }
1133 break; 1133 break;
@@ -1136,7 +1136,7 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
1136 if (zfcp_erp_strat_change_det(&unit->status, erp_status)) { 1136 if (zfcp_erp_strat_change_det(&unit->status, erp_status)) {
1137 _zfcp_erp_unit_reopen(unit, 1137 _zfcp_erp_unit_reopen(unit,
1138 ZFCP_STATUS_COMMON_ERP_FAILED, 1138 ZFCP_STATUS_COMMON_ERP_FAILED,
1139 69, NULL); 1139 "ersscg3", NULL);
1140 return ZFCP_ERP_EXIT; 1140 return ZFCP_ERP_EXIT;
1141 } 1141 }
1142 break; 1142 break;
@@ -1155,7 +1155,7 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
1155 } 1155 }
1156 1156
1157 list_del(&erp_action->list); 1157 list_del(&erp_action->list);
1158 zfcp_rec_dbf_event_action(144, erp_action); 1158 zfcp_rec_dbf_event_action("eractd1", erp_action);
1159 1159
1160 switch (erp_action->action) { 1160 switch (erp_action->action) {
1161 case ZFCP_ERP_ACTION_REOPEN_UNIT: 1161 case ZFCP_ERP_ACTION_REOPEN_UNIT:
@@ -1214,38 +1214,8 @@ static void zfcp_erp_schedule_work(struct zfcp_unit *unit)
1214 atomic_set_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status); 1214 atomic_set_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
1215 INIT_WORK(&p->work, zfcp_erp_scsi_scan); 1215 INIT_WORK(&p->work, zfcp_erp_scsi_scan);
1216 p->unit = unit; 1216 p->unit = unit;
1217 queue_work(zfcp_data.work_queue, &p->work); 1217 if (!queue_work(zfcp_data.work_queue, &p->work))
1218} 1218 zfcp_unit_put(unit);
1219
1220static void zfcp_erp_rport_register(struct zfcp_port *port)
1221{
1222 struct fc_rport_identifiers ids;
1223 ids.node_name = port->wwnn;
1224 ids.port_name = port->wwpn;
1225 ids.port_id = port->d_id;
1226 ids.roles = FC_RPORT_ROLE_FCP_TARGET;
1227 port->rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
1228 if (!port->rport) {
1229 dev_err(&port->adapter->ccw_device->dev,
1230 "Registering port 0x%016Lx failed\n",
1231 (unsigned long long)port->wwpn);
1232 return;
1233 }
1234
1235 scsi_target_unblock(&port->rport->dev);
1236 port->rport->maxframe_size = port->maxframe_size;
1237 port->rport->supported_classes = port->supported_classes;
1238}
1239
1240static void zfcp_erp_rports_del(struct zfcp_adapter *adapter)
1241{
1242 struct zfcp_port *port;
1243 list_for_each_entry(port, &adapter->port_list_head, list) {
1244 if (!port->rport)
1245 continue;
1246 fc_remote_port_delete(port->rport);
1247 port->rport = NULL;
1248 }
1249} 1219}
1250 1220
1251static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) 1221static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
@@ -1256,10 +1226,8 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
1256 1226
1257 switch (act->action) { 1227 switch (act->action) {
1258 case ZFCP_ERP_ACTION_REOPEN_UNIT: 1228 case ZFCP_ERP_ACTION_REOPEN_UNIT:
1259 if ((result == ZFCP_ERP_SUCCEEDED) && 1229 flush_work(&port->rport_work);
1260 !unit->device && port->rport) { 1230 if ((result == ZFCP_ERP_SUCCEEDED) && !unit->device) {
1261 atomic_set_mask(ZFCP_STATUS_UNIT_REGISTERED,
1262 &unit->status);
1263 if (!(atomic_read(&unit->status) & 1231 if (!(atomic_read(&unit->status) &
1264 ZFCP_STATUS_UNIT_SCSI_WORK_PENDING)) 1232 ZFCP_STATUS_UNIT_SCSI_WORK_PENDING))
1265 zfcp_erp_schedule_work(unit); 1233 zfcp_erp_schedule_work(unit);
@@ -1269,27 +1237,17 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
1269 1237
1270 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 1238 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
1271 case ZFCP_ERP_ACTION_REOPEN_PORT: 1239 case ZFCP_ERP_ACTION_REOPEN_PORT:
1272 if (atomic_read(&port->status) & ZFCP_STATUS_PORT_NO_WWPN) { 1240 if (result == ZFCP_ERP_SUCCEEDED)
1273 zfcp_port_put(port); 1241 zfcp_scsi_schedule_rport_register(port);
1274 return;
1275 }
1276 if ((result == ZFCP_ERP_SUCCEEDED) && !port->rport)
1277 zfcp_erp_rport_register(port);
1278 if ((result != ZFCP_ERP_SUCCEEDED) && port->rport) {
1279 fc_remote_port_delete(port->rport);
1280 port->rport = NULL;
1281 }
1282 zfcp_port_put(port); 1242 zfcp_port_put(port);
1283 break; 1243 break;
1284 1244
1285 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 1245 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
1286 if (result != ZFCP_ERP_SUCCEEDED) { 1246 if (result == ZFCP_ERP_SUCCEEDED) {
1287 unregister_service_level(&adapter->service_level);
1288 zfcp_erp_rports_del(adapter);
1289 } else {
1290 register_service_level(&adapter->service_level); 1247 register_service_level(&adapter->service_level);
1291 schedule_work(&adapter->scan_work); 1248 schedule_work(&adapter->scan_work);
1292 } 1249 } else
1250 unregister_service_level(&adapter->service_level);
1293 zfcp_adapter_put(adapter); 1251 zfcp_adapter_put(adapter);
1294 break; 1252 break;
1295 } 1253 }
@@ -1346,7 +1304,7 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
1346 erp_action->status |= ZFCP_STATUS_ERP_LOWMEM; 1304 erp_action->status |= ZFCP_STATUS_ERP_LOWMEM;
1347 } 1305 }
1348 if (adapter->erp_total_count == adapter->erp_low_mem_count) 1306 if (adapter->erp_total_count == adapter->erp_low_mem_count)
1349 _zfcp_erp_adapter_reopen(adapter, 0, 66, NULL); 1307 _zfcp_erp_adapter_reopen(adapter, 0, "erstgy1", NULL);
1350 else { 1308 else {
1351 zfcp_erp_strategy_memwait(erp_action); 1309 zfcp_erp_strategy_memwait(erp_action);
1352 retval = ZFCP_ERP_CONTINUES; 1310 retval = ZFCP_ERP_CONTINUES;
@@ -1406,9 +1364,9 @@ static int zfcp_erp_thread(void *data)
1406 zfcp_erp_wakeup(adapter); 1364 zfcp_erp_wakeup(adapter);
1407 } 1365 }
1408 1366
1409 zfcp_rec_dbf_event_thread_lock(4, adapter); 1367 zfcp_rec_dbf_event_thread_lock("erthrd1", adapter);
1410 ignore = down_interruptible(&adapter->erp_ready_sem); 1368 ignore = down_interruptible(&adapter->erp_ready_sem);
1411 zfcp_rec_dbf_event_thread_lock(5, adapter); 1369 zfcp_rec_dbf_event_thread_lock("erthrd2", adapter);
1412 } 1370 }
1413 1371
1414 atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status); 1372 atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
@@ -1453,7 +1411,7 @@ void zfcp_erp_thread_kill(struct zfcp_adapter *adapter)
1453{ 1411{
1454 atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, &adapter->status); 1412 atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, &adapter->status);
1455 up(&adapter->erp_ready_sem); 1413 up(&adapter->erp_ready_sem);
1456 zfcp_rec_dbf_event_thread_lock(3, adapter); 1414 zfcp_rec_dbf_event_thread_lock("erthrk1", adapter);
1457 1415
1458 wait_event(adapter->erp_thread_wqh, 1416 wait_event(adapter->erp_thread_wqh,
1459 !(atomic_read(&adapter->status) & 1417 !(atomic_read(&adapter->status) &
@@ -1469,7 +1427,7 @@ void zfcp_erp_thread_kill(struct zfcp_adapter *adapter)
1469 * @id: Event id for debug trace. 1427 * @id: Event id for debug trace.
1470 * @ref: Reference for debug trace. 1428 * @ref: Reference for debug trace.
1471 */ 1429 */
1472void zfcp_erp_adapter_failed(struct zfcp_adapter *adapter, u8 id, void *ref) 1430void zfcp_erp_adapter_failed(struct zfcp_adapter *adapter, char *id, void *ref)
1473{ 1431{
1474 zfcp_erp_modify_adapter_status(adapter, id, ref, 1432 zfcp_erp_modify_adapter_status(adapter, id, ref,
1475 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET); 1433 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
@@ -1481,7 +1439,7 @@ void zfcp_erp_adapter_failed(struct zfcp_adapter *adapter, u8 id, void *ref)
1481 * @id: Event id for debug trace. 1439 * @id: Event id for debug trace.
1482 * @ref: Reference for debug trace. 1440 * @ref: Reference for debug trace.
1483 */ 1441 */
1484void zfcp_erp_port_failed(struct zfcp_port *port, u8 id, void *ref) 1442void zfcp_erp_port_failed(struct zfcp_port *port, char *id, void *ref)
1485{ 1443{
1486 zfcp_erp_modify_port_status(port, id, ref, 1444 zfcp_erp_modify_port_status(port, id, ref,
1487 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET); 1445 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
@@ -1493,7 +1451,7 @@ void zfcp_erp_port_failed(struct zfcp_port *port, u8 id, void *ref)
1493 * @id: Event id for debug trace. 1451 * @id: Event id for debug trace.
1494 * @ref: Reference for debug trace. 1452 * @ref: Reference for debug trace.
1495 */ 1453 */
1496void zfcp_erp_unit_failed(struct zfcp_unit *unit, u8 id, void *ref) 1454void zfcp_erp_unit_failed(struct zfcp_unit *unit, char *id, void *ref)
1497{ 1455{
1498 zfcp_erp_modify_unit_status(unit, id, ref, 1456 zfcp_erp_modify_unit_status(unit, id, ref,
1499 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET); 1457 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
@@ -1520,7 +1478,7 @@ void zfcp_erp_wait(struct zfcp_adapter *adapter)
1520 * 1478 *
1521 * Changes in common status bits are propagated to attached ports and units. 1479 * Changes in common status bits are propagated to attached ports and units.
1522 */ 1480 */
1523void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, u8 id, 1481void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, char *id,
1524 void *ref, u32 mask, int set_or_clear) 1482 void *ref, u32 mask, int set_or_clear)
1525{ 1483{
1526 struct zfcp_port *port; 1484 struct zfcp_port *port;
@@ -1554,7 +1512,7 @@ void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, u8 id,
1554 * 1512 *
1555 * Changes in common status bits are propagated to attached units. 1513 * Changes in common status bits are propagated to attached units.
1556 */ 1514 */
1557void zfcp_erp_modify_port_status(struct zfcp_port *port, u8 id, void *ref, 1515void zfcp_erp_modify_port_status(struct zfcp_port *port, char *id, void *ref,
1558 u32 mask, int set_or_clear) 1516 u32 mask, int set_or_clear)
1559{ 1517{
1560 struct zfcp_unit *unit; 1518 struct zfcp_unit *unit;
@@ -1586,7 +1544,7 @@ void zfcp_erp_modify_port_status(struct zfcp_port *port, u8 id, void *ref,
1586 * @mask: status bits to change 1544 * @mask: status bits to change
1587 * @set_or_clear: ZFCP_SET or ZFCP_CLEAR 1545 * @set_or_clear: ZFCP_SET or ZFCP_CLEAR
1588 */ 1546 */
1589void zfcp_erp_modify_unit_status(struct zfcp_unit *unit, u8 id, void *ref, 1547void zfcp_erp_modify_unit_status(struct zfcp_unit *unit, char *id, void *ref,
1590 u32 mask, int set_or_clear) 1548 u32 mask, int set_or_clear)
1591{ 1549{
1592 if (set_or_clear == ZFCP_SET) { 1550 if (set_or_clear == ZFCP_SET) {
@@ -1609,7 +1567,7 @@ void zfcp_erp_modify_unit_status(struct zfcp_unit *unit, u8 id, void *ref,
1609 * @id: The debug trace id. 1567 * @id: The debug trace id.
1610 * @id: Reference for the debug trace. 1568 * @id: Reference for the debug trace.
1611 */ 1569 */
1612void zfcp_erp_port_boxed(struct zfcp_port *port, u8 id, void *ref) 1570void zfcp_erp_port_boxed(struct zfcp_port *port, char *id, void *ref)
1613{ 1571{
1614 unsigned long flags; 1572 unsigned long flags;
1615 1573
@@ -1626,7 +1584,7 @@ void zfcp_erp_port_boxed(struct zfcp_port *port, u8 id, void *ref)
1626 * @id: The debug trace id. 1584 * @id: The debug trace id.
1627 * @id: Reference for the debug trace. 1585 * @id: Reference for the debug trace.
1628 */ 1586 */
1629void zfcp_erp_unit_boxed(struct zfcp_unit *unit, u8 id, void *ref) 1587void zfcp_erp_unit_boxed(struct zfcp_unit *unit, char *id, void *ref)
1630{ 1588{
1631 zfcp_erp_modify_unit_status(unit, id, ref, 1589 zfcp_erp_modify_unit_status(unit, id, ref,
1632 ZFCP_STATUS_COMMON_ACCESS_BOXED, ZFCP_SET); 1590 ZFCP_STATUS_COMMON_ACCESS_BOXED, ZFCP_SET);
@@ -1642,7 +1600,7 @@ void zfcp_erp_unit_boxed(struct zfcp_unit *unit, u8 id, void *ref)
1642 * Since the adapter has denied access, stop using the port and the 1600 * Since the adapter has denied access, stop using the port and the
1643 * attached units. 1601 * attached units.
1644 */ 1602 */
1645void zfcp_erp_port_access_denied(struct zfcp_port *port, u8 id, void *ref) 1603void zfcp_erp_port_access_denied(struct zfcp_port *port, char *id, void *ref)
1646{ 1604{
1647 unsigned long flags; 1605 unsigned long flags;
1648 1606
@@ -1661,14 +1619,14 @@ void zfcp_erp_port_access_denied(struct zfcp_port *port, u8 id, void *ref)
1661 * 1619 *
1662 * Since the adapter has denied access, stop using the unit. 1620 * Since the adapter has denied access, stop using the unit.
1663 */ 1621 */
1664void zfcp_erp_unit_access_denied(struct zfcp_unit *unit, u8 id, void *ref) 1622void zfcp_erp_unit_access_denied(struct zfcp_unit *unit, char *id, void *ref)
1665{ 1623{
1666 zfcp_erp_modify_unit_status(unit, id, ref, 1624 zfcp_erp_modify_unit_status(unit, id, ref,
1667 ZFCP_STATUS_COMMON_ERP_FAILED | 1625 ZFCP_STATUS_COMMON_ERP_FAILED |
1668 ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET); 1626 ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET);
1669} 1627}
1670 1628
1671static void zfcp_erp_unit_access_changed(struct zfcp_unit *unit, u8 id, 1629static void zfcp_erp_unit_access_changed(struct zfcp_unit *unit, char *id,
1672 void *ref) 1630 void *ref)
1673{ 1631{
1674 int status = atomic_read(&unit->status); 1632 int status = atomic_read(&unit->status);
@@ -1679,7 +1637,7 @@ static void zfcp_erp_unit_access_changed(struct zfcp_unit *unit, u8 id,
1679 zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref); 1637 zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref);
1680} 1638}
1681 1639
1682static void zfcp_erp_port_access_changed(struct zfcp_port *port, u8 id, 1640static void zfcp_erp_port_access_changed(struct zfcp_port *port, char *id,
1683 void *ref) 1641 void *ref)
1684{ 1642{
1685 struct zfcp_unit *unit; 1643 struct zfcp_unit *unit;
@@ -1701,7 +1659,7 @@ static void zfcp_erp_port_access_changed(struct zfcp_port *port, u8 id,
1701 * @id: Id for debug trace 1659 * @id: Id for debug trace
1702 * @ref: Reference for debug trace 1660 * @ref: Reference for debug trace
1703 */ 1661 */
1704void zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter, u8 id, 1662void zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter, char *id,
1705 void *ref) 1663 void *ref)
1706{ 1664{
1707 struct zfcp_port *port; 1665 struct zfcp_port *port;
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index b5adeda93e1d..f6399ca97bcb 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * External function declarations. 4 * External function declarations.
5 * 5 *
6 * Copyright IBM Corporation 2002, 2008 6 * Copyright IBM Corporation 2002, 2009
7 */ 7 */
8 8
9#ifndef ZFCP_EXT_H 9#ifndef ZFCP_EXT_H
@@ -35,15 +35,15 @@ extern struct miscdevice zfcp_cfdc_misc;
35/* zfcp_dbf.c */ 35/* zfcp_dbf.c */
36extern int zfcp_adapter_debug_register(struct zfcp_adapter *); 36extern int zfcp_adapter_debug_register(struct zfcp_adapter *);
37extern void zfcp_adapter_debug_unregister(struct zfcp_adapter *); 37extern void zfcp_adapter_debug_unregister(struct zfcp_adapter *);
38extern void zfcp_rec_dbf_event_thread(u8, struct zfcp_adapter *); 38extern void zfcp_rec_dbf_event_thread(char *, struct zfcp_adapter *);
39extern void zfcp_rec_dbf_event_thread_lock(u8, struct zfcp_adapter *); 39extern void zfcp_rec_dbf_event_thread_lock(char *, struct zfcp_adapter *);
40extern void zfcp_rec_dbf_event_adapter(u8, void *, struct zfcp_adapter *); 40extern void zfcp_rec_dbf_event_adapter(char *, void *, struct zfcp_adapter *);
41extern void zfcp_rec_dbf_event_port(u8, void *, struct zfcp_port *); 41extern void zfcp_rec_dbf_event_port(char *, void *, struct zfcp_port *);
42extern void zfcp_rec_dbf_event_unit(u8, void *, struct zfcp_unit *); 42extern void zfcp_rec_dbf_event_unit(char *, void *, struct zfcp_unit *);
43extern void zfcp_rec_dbf_event_trigger(u8, void *, u8, u8, void *, 43extern void zfcp_rec_dbf_event_trigger(char *, void *, u8, u8, void *,
44 struct zfcp_adapter *, 44 struct zfcp_adapter *,
45 struct zfcp_port *, struct zfcp_unit *); 45 struct zfcp_port *, struct zfcp_unit *);
46extern void zfcp_rec_dbf_event_action(u8, struct zfcp_erp_action *); 46extern void zfcp_rec_dbf_event_action(char *, struct zfcp_erp_action *);
47extern void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *); 47extern void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *);
48extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *, 48extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *,
49 struct fsf_status_read_buffer *); 49 struct fsf_status_read_buffer *);
@@ -66,31 +66,34 @@ extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *,
66 struct scsi_cmnd *); 66 struct scsi_cmnd *);
67 67
68/* zfcp_erp.c */ 68/* zfcp_erp.c */
69extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, u8, void *, 69extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, char *,
70 u32, int); 70 void *, u32, int);
71extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, u8, void *); 71extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *, void *);
72extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, u8, void *); 72extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *,
73extern void zfcp_erp_adapter_failed(struct zfcp_adapter *, u8, void *); 73 void *);
74extern void zfcp_erp_modify_port_status(struct zfcp_port *, u8, void *, u32, 74extern void zfcp_erp_adapter_failed(struct zfcp_adapter *, char *, void *);
75extern void zfcp_erp_modify_port_status(struct zfcp_port *, char *, void *, u32,
75 int); 76 int);
76extern int zfcp_erp_port_reopen(struct zfcp_port *, int, u8, void *); 77extern int zfcp_erp_port_reopen(struct zfcp_port *, int, char *, void *);
77extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, u8, void *); 78extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *, void *);
78extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, u8, void *); 79extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *,
79extern void zfcp_erp_port_failed(struct zfcp_port *, u8, void *); 80 void *);
80extern void zfcp_erp_modify_unit_status(struct zfcp_unit *, u8, void *, u32, 81extern void zfcp_erp_port_failed(struct zfcp_port *, char *, void *);
82extern void zfcp_erp_modify_unit_status(struct zfcp_unit *, char *, void *, u32,
81 int); 83 int);
82extern void zfcp_erp_unit_reopen(struct zfcp_unit *, int, u8, void *); 84extern void zfcp_erp_unit_reopen(struct zfcp_unit *, int, char *, void *);
83extern void zfcp_erp_unit_shutdown(struct zfcp_unit *, int, u8, void *); 85extern void zfcp_erp_unit_shutdown(struct zfcp_unit *, int, char *, void *);
84extern void zfcp_erp_unit_failed(struct zfcp_unit *, u8, void *); 86extern void zfcp_erp_unit_failed(struct zfcp_unit *, char *, void *);
85extern int zfcp_erp_thread_setup(struct zfcp_adapter *); 87extern int zfcp_erp_thread_setup(struct zfcp_adapter *);
86extern void zfcp_erp_thread_kill(struct zfcp_adapter *); 88extern void zfcp_erp_thread_kill(struct zfcp_adapter *);
87extern void zfcp_erp_wait(struct zfcp_adapter *); 89extern void zfcp_erp_wait(struct zfcp_adapter *);
88extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long); 90extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long);
89extern void zfcp_erp_port_boxed(struct zfcp_port *, u8, void *); 91extern void zfcp_erp_port_boxed(struct zfcp_port *, char *, void *);
90extern void zfcp_erp_unit_boxed(struct zfcp_unit *, u8, void *); 92extern void zfcp_erp_unit_boxed(struct zfcp_unit *, char *, void *);
91extern void zfcp_erp_port_access_denied(struct zfcp_port *, u8, void *); 93extern void zfcp_erp_port_access_denied(struct zfcp_port *, char *, void *);
92extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, u8, void *); 94extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, char *, void *);
93extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, u8, void *); 95extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, char *,
96 void *);
94extern void zfcp_erp_timeout_handler(unsigned long); 97extern void zfcp_erp_timeout_handler(unsigned long);
95extern void zfcp_erp_port_strategy_open_lookup(struct work_struct *); 98extern void zfcp_erp_port_strategy_open_lookup(struct work_struct *);
96 99
@@ -101,6 +104,7 @@ extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *);
101extern int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *); 104extern int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *);
102extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *); 105extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *);
103extern void zfcp_test_link(struct zfcp_port *); 106extern void zfcp_test_link(struct zfcp_port *);
107extern void zfcp_fc_link_test_work(struct work_struct *);
104extern void zfcp_fc_nameserver_init(struct zfcp_adapter *); 108extern void zfcp_fc_nameserver_init(struct zfcp_adapter *);
105 109
106/* zfcp_fsf.c */ 110/* zfcp_fsf.c */
@@ -125,16 +129,13 @@ extern int zfcp_status_read_refill(struct zfcp_adapter *adapter);
125extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *, 129extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *,
126 struct zfcp_erp_action *); 130 struct zfcp_erp_action *);
127extern int zfcp_fsf_send_els(struct zfcp_send_els *); 131extern int zfcp_fsf_send_els(struct zfcp_send_els *);
128extern int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *, 132extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *,
129 struct zfcp_unit *, 133 struct scsi_cmnd *);
130 struct scsi_cmnd *, int, int);
131extern void zfcp_fsf_req_complete(struct zfcp_fsf_req *); 134extern void zfcp_fsf_req_complete(struct zfcp_fsf_req *);
132extern void zfcp_fsf_req_free(struct zfcp_fsf_req *); 135extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
133extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *, 136extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *, u8);
134 struct zfcp_unit *, u8, int);
135extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long, 137extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long,
136 struct zfcp_adapter *, 138 struct zfcp_unit *);
137 struct zfcp_unit *, int);
138 139
139/* zfcp_qdio.c */ 140/* zfcp_qdio.c */
140extern int zfcp_qdio_allocate(struct zfcp_adapter *); 141extern int zfcp_qdio_allocate(struct zfcp_adapter *);
@@ -153,6 +154,10 @@ extern int zfcp_adapter_scsi_register(struct zfcp_adapter *);
153extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *); 154extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *);
154extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *); 155extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *);
155extern struct fc_function_template zfcp_transport_functions; 156extern struct fc_function_template zfcp_transport_functions;
157extern void zfcp_scsi_rport_work(struct work_struct *);
158extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *);
159extern void zfcp_scsi_schedule_rport_block(struct zfcp_port *);
160extern void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *);
156 161
157/* zfcp_sysfs.c */ 162/* zfcp_sysfs.c */
158extern struct attribute_group zfcp_sysfs_unit_attrs; 163extern struct attribute_group zfcp_sysfs_unit_attrs;
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index eabdfe24456e..aab8123c5966 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Fibre Channel related functions for the zfcp device driver. 4 * Fibre Channel related functions for the zfcp device driver.
5 * 5 *
6 * Copyright IBM Corporation 2008 6 * Copyright IBM Corporation 2008, 2009
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
@@ -98,8 +98,12 @@ static void zfcp_wka_port_offline(struct work_struct *work)
98 struct zfcp_wka_port *wka_port = 98 struct zfcp_wka_port *wka_port =
99 container_of(dw, struct zfcp_wka_port, work); 99 container_of(dw, struct zfcp_wka_port, work);
100 100
101 wait_event(wka_port->completion_wq, 101 /* Don't wait forvever. If the wka_port is too busy take it offline
102 atomic_read(&wka_port->refcount) == 0); 102 through a new call later */
103 if (!wait_event_timeout(wka_port->completion_wq,
104 atomic_read(&wka_port->refcount) == 0,
105 HZ >> 1))
106 return;
103 107
104 mutex_lock(&wka_port->mutex); 108 mutex_lock(&wka_port->mutex);
105 if ((atomic_read(&wka_port->refcount) != 0) || 109 if ((atomic_read(&wka_port->refcount) != 0) ||
@@ -145,16 +149,10 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
145 struct zfcp_port *port; 149 struct zfcp_port *port;
146 150
147 read_lock_irqsave(&zfcp_data.config_lock, flags); 151 read_lock_irqsave(&zfcp_data.config_lock, flags);
148 list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) { 152 list_for_each_entry(port, &fsf_req->adapter->port_list_head, list)
149 if (!(atomic_read(&port->status) & ZFCP_STATUS_PORT_PHYS_OPEN)) 153 if ((port->d_id & range) == (elem->nport_did & range))
150 /* Try to connect to unused ports anyway. */
151 zfcp_erp_port_reopen(port,
152 ZFCP_STATUS_COMMON_ERP_FAILED,
153 82, fsf_req);
154 else if ((port->d_id & range) == (elem->nport_did & range))
155 /* Check connection status for connected ports */
156 zfcp_test_link(port); 154 zfcp_test_link(port);
157 } 155
158 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 156 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
159} 157}
160 158
@@ -196,7 +194,7 @@ static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn)
196 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 194 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
197 195
198 if (port && (port->wwpn == wwpn)) 196 if (port && (port->wwpn == wwpn))
199 zfcp_erp_port_forced_reopen(port, 0, 83, req); 197 zfcp_erp_port_forced_reopen(port, 0, "fciwwp1", req);
200} 198}
201 199
202static void zfcp_fc_incoming_plogi(struct zfcp_fsf_req *req) 200static void zfcp_fc_incoming_plogi(struct zfcp_fsf_req *req)
@@ -259,10 +257,9 @@ static void zfcp_fc_ns_gid_pn_eval(unsigned long data)
259 257
260 if (ct->status) 258 if (ct->status)
261 return; 259 return;
262 if (ct_iu_resp->header.cmd_rsp_code != ZFCP_CT_ACCEPT) { 260 if (ct_iu_resp->header.cmd_rsp_code != ZFCP_CT_ACCEPT)
263 atomic_set_mask(ZFCP_STATUS_PORT_INVALID_WWPN, &port->status);
264 return; 261 return;
265 } 262
266 /* paranoia */ 263 /* paranoia */
267 if (ct_iu_req->wwpn != port->wwpn) 264 if (ct_iu_req->wwpn != port->wwpn)
268 return; 265 return;
@@ -375,16 +372,22 @@ static void zfcp_fc_adisc_handler(unsigned long data)
375 372
376 if (adisc->els.status) { 373 if (adisc->els.status) {
377 /* request rejected or timed out */ 374 /* request rejected or timed out */
378 zfcp_erp_port_forced_reopen(port, 0, 63, NULL); 375 zfcp_erp_port_forced_reopen(port, 0, "fcadh_1", NULL);
379 goto out; 376 goto out;
380 } 377 }
381 378
382 if (!port->wwnn) 379 if (!port->wwnn)
383 port->wwnn = ls_adisc->wwnn; 380 port->wwnn = ls_adisc->wwnn;
384 381
385 if (port->wwpn != ls_adisc->wwpn) 382 if ((port->wwpn != ls_adisc->wwpn) ||
386 zfcp_erp_port_reopen(port, 0, 64, NULL); 383 !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)) {
384 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
385 "fcadh_2", NULL);
386 goto out;
387 }
387 388
389 /* port is good, unblock rport without going through erp */
390 zfcp_scsi_schedule_rport_register(port);
388 out: 391 out:
389 zfcp_port_put(port); 392 zfcp_port_put(port);
390 kfree(adisc); 393 kfree(adisc);
@@ -422,6 +425,31 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
422 return zfcp_fsf_send_els(&adisc->els); 425 return zfcp_fsf_send_els(&adisc->els);
423} 426}
424 427
428void zfcp_fc_link_test_work(struct work_struct *work)
429{
430 struct zfcp_port *port =
431 container_of(work, struct zfcp_port, test_link_work);
432 int retval;
433
434 if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_UNBLOCKED)) {
435 zfcp_port_put(port);
436 return; /* port erp is running and will update rport status */
437 }
438
439 zfcp_port_get(port);
440 port->rport_task = RPORT_DEL;
441 zfcp_scsi_rport_work(&port->rport_work);
442
443 retval = zfcp_fc_adisc(port);
444 if (retval == 0)
445 return;
446
447 /* send of ADISC was not possible */
448 zfcp_erp_port_forced_reopen(port, 0, "fcltwk1", NULL);
449
450 zfcp_port_put(port);
451}
452
425/** 453/**
426 * zfcp_test_link - lightweight link test procedure 454 * zfcp_test_link - lightweight link test procedure
427 * @port: port to be tested 455 * @port: port to be tested
@@ -432,17 +460,9 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
432 */ 460 */
433void zfcp_test_link(struct zfcp_port *port) 461void zfcp_test_link(struct zfcp_port *port)
434{ 462{
435 int retval;
436
437 zfcp_port_get(port); 463 zfcp_port_get(port);
438 retval = zfcp_fc_adisc(port); 464 if (!queue_work(zfcp_data.work_queue, &port->test_link_work))
439 if (retval == 0) 465 zfcp_port_put(port);
440 return;
441
442 /* send of ADISC was not possible */
443 zfcp_port_put(port);
444 if (retval != -EBUSY)
445 zfcp_erp_port_forced_reopen(port, 0, 65, NULL);
446} 466}
447 467
448static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft, int buf_num) 468static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft, int buf_num)
@@ -529,7 +549,7 @@ static void zfcp_validate_port(struct zfcp_port *port)
529 zfcp_port_put(port); 549 zfcp_port_put(port);
530 return; 550 return;
531 } 551 }
532 zfcp_erp_port_shutdown(port, 0, 151, NULL); 552 zfcp_erp_port_shutdown(port, 0, "fcpval1", NULL);
533 zfcp_erp_wait(adapter); 553 zfcp_erp_wait(adapter);
534 zfcp_port_put(port); 554 zfcp_port_put(port);
535 zfcp_port_dequeue(port); 555 zfcp_port_dequeue(port);
@@ -592,7 +612,7 @@ static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries)
592 if (IS_ERR(port)) 612 if (IS_ERR(port))
593 ret = PTR_ERR(port); 613 ret = PTR_ERR(port);
594 else 614 else
595 zfcp_erp_port_reopen(port, 0, 149, NULL); 615 zfcp_erp_port_reopen(port, 0, "fcegpf1", NULL);
596 } 616 }
597 617
598 zfcp_erp_wait(adapter); 618 zfcp_erp_wait(adapter);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index e6416f8541b0..b29f3121b666 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Implementation of FSF commands. 4 * Implementation of FSF commands.
5 * 5 *
6 * Copyright IBM Corporation 2002, 2008 6 * Copyright IBM Corporation 2002, 2009
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
@@ -12,11 +12,14 @@
12#include <linux/blktrace_api.h> 12#include <linux/blktrace_api.h>
13#include "zfcp_ext.h" 13#include "zfcp_ext.h"
14 14
15#define ZFCP_REQ_AUTO_CLEANUP 0x00000002
16#define ZFCP_REQ_NO_QTCB 0x00000008
17
15static void zfcp_fsf_request_timeout_handler(unsigned long data) 18static void zfcp_fsf_request_timeout_handler(unsigned long data)
16{ 19{
17 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; 20 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
18 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 62, 21 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
19 NULL); 22 "fsrth_1", NULL);
20} 23}
21 24
22static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, 25static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
@@ -75,7 +78,7 @@ static void zfcp_fsf_access_denied_port(struct zfcp_fsf_req *req,
75 (unsigned long long)port->wwpn); 78 (unsigned long long)port->wwpn);
76 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]); 79 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
77 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]); 80 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
78 zfcp_erp_port_access_denied(port, 55, req); 81 zfcp_erp_port_access_denied(port, "fspad_1", req);
79 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 82 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
80} 83}
81 84
@@ -89,7 +92,7 @@ static void zfcp_fsf_access_denied_unit(struct zfcp_fsf_req *req,
89 (unsigned long long)unit->port->wwpn); 92 (unsigned long long)unit->port->wwpn);
90 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]); 93 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
91 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]); 94 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
92 zfcp_erp_unit_access_denied(unit, 59, req); 95 zfcp_erp_unit_access_denied(unit, "fsuad_1", req);
93 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 96 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
94} 97}
95 98
@@ -97,7 +100,7 @@ static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
97{ 100{
98 dev_err(&req->adapter->ccw_device->dev, "FCP device not " 101 dev_err(&req->adapter->ccw_device->dev, "FCP device not "
99 "operational because of an unsupported FC class\n"); 102 "operational because of an unsupported FC class\n");
100 zfcp_erp_adapter_shutdown(req->adapter, 0, 123, req); 103 zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1", req);
101 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 104 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
102} 105}
103 106
@@ -159,20 +162,13 @@ static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
159 list_for_each_entry(port, &adapter->port_list_head, list) 162 list_for_each_entry(port, &adapter->port_list_head, list)
160 if (port->d_id == d_id) { 163 if (port->d_id == d_id) {
161 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 164 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
162 switch (sr_buf->status_subtype) { 165 zfcp_erp_port_reopen(port, 0, "fssrpc1", req);
163 case FSF_STATUS_READ_SUB_CLOSE_PHYS_PORT:
164 zfcp_erp_port_reopen(port, 0, 101, req);
165 break;
166 case FSF_STATUS_READ_SUB_ERROR_PORT:
167 zfcp_erp_port_shutdown(port, 0, 122, req);
168 break;
169 }
170 return; 166 return;
171 } 167 }
172 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 168 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
173} 169}
174 170
175static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, u8 id, 171static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, char *id,
176 struct fsf_link_down_info *link_down) 172 struct fsf_link_down_info *link_down)
177{ 173{
178 struct zfcp_adapter *adapter = req->adapter; 174 struct zfcp_adapter *adapter = req->adapter;
@@ -181,6 +177,7 @@ static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, u8 id,
181 return; 177 return;
182 178
183 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); 179 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
180 zfcp_scsi_schedule_rports_block(adapter);
184 181
185 if (!link_down) 182 if (!link_down)
186 goto out; 183 goto out;
@@ -261,13 +258,13 @@ static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
261 258
262 switch (sr_buf->status_subtype) { 259 switch (sr_buf->status_subtype) {
263 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK: 260 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
264 zfcp_fsf_link_down_info_eval(req, 38, ldi); 261 zfcp_fsf_link_down_info_eval(req, "fssrld1", ldi);
265 break; 262 break;
266 case FSF_STATUS_READ_SUB_FDISC_FAILED: 263 case FSF_STATUS_READ_SUB_FDISC_FAILED:
267 zfcp_fsf_link_down_info_eval(req, 39, ldi); 264 zfcp_fsf_link_down_info_eval(req, "fssrld2", ldi);
268 break; 265 break;
269 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE: 266 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
270 zfcp_fsf_link_down_info_eval(req, 40, NULL); 267 zfcp_fsf_link_down_info_eval(req, "fssrld3", NULL);
271 }; 268 };
272} 269}
273 270
@@ -307,22 +304,23 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
307 dev_info(&adapter->ccw_device->dev, 304 dev_info(&adapter->ccw_device->dev,
308 "The local link has been restored\n"); 305 "The local link has been restored\n");
309 /* All ports should be marked as ready to run again */ 306 /* All ports should be marked as ready to run again */
310 zfcp_erp_modify_adapter_status(adapter, 30, NULL, 307 zfcp_erp_modify_adapter_status(adapter, "fssrh_1", NULL,
311 ZFCP_STATUS_COMMON_RUNNING, 308 ZFCP_STATUS_COMMON_RUNNING,
312 ZFCP_SET); 309 ZFCP_SET);
313 zfcp_erp_adapter_reopen(adapter, 310 zfcp_erp_adapter_reopen(adapter,
314 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 311 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
315 ZFCP_STATUS_COMMON_ERP_FAILED, 312 ZFCP_STATUS_COMMON_ERP_FAILED,
316 102, req); 313 "fssrh_2", req);
317 break; 314 break;
318 case FSF_STATUS_READ_NOTIFICATION_LOST: 315 case FSF_STATUS_READ_NOTIFICATION_LOST:
319 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED) 316 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED)
320 zfcp_erp_adapter_access_changed(adapter, 135, req); 317 zfcp_erp_adapter_access_changed(adapter, "fssrh_3",
318 req);
321 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS) 319 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
322 schedule_work(&adapter->scan_work); 320 schedule_work(&adapter->scan_work);
323 break; 321 break;
324 case FSF_STATUS_READ_CFDC_UPDATED: 322 case FSF_STATUS_READ_CFDC_UPDATED:
325 zfcp_erp_adapter_access_changed(adapter, 136, req); 323 zfcp_erp_adapter_access_changed(adapter, "fssrh_4", req);
326 break; 324 break;
327 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT: 325 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
328 adapter->adapter_features = sr_buf->payload.word[0]; 326 adapter->adapter_features = sr_buf->payload.word[0];
@@ -351,7 +349,7 @@ static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
351 dev_err(&req->adapter->ccw_device->dev, 349 dev_err(&req->adapter->ccw_device->dev,
352 "The FCP adapter reported a problem " 350 "The FCP adapter reported a problem "
353 "that cannot be recovered\n"); 351 "that cannot be recovered\n");
354 zfcp_erp_adapter_shutdown(req->adapter, 0, 121, req); 352 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1", req);
355 break; 353 break;
356 } 354 }
357 /* all non-return stats set FSFREQ_ERROR*/ 355 /* all non-return stats set FSFREQ_ERROR*/
@@ -368,7 +366,7 @@ static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
368 dev_err(&req->adapter->ccw_device->dev, 366 dev_err(&req->adapter->ccw_device->dev,
369 "The FCP adapter does not recognize the command 0x%x\n", 367 "The FCP adapter does not recognize the command 0x%x\n",
370 req->qtcb->header.fsf_command); 368 req->qtcb->header.fsf_command);
371 zfcp_erp_adapter_shutdown(req->adapter, 0, 120, req); 369 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1", req);
372 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 370 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
373 break; 371 break;
374 case FSF_ADAPTER_STATUS_AVAILABLE: 372 case FSF_ADAPTER_STATUS_AVAILABLE:
@@ -400,17 +398,17 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
400 "QTCB version 0x%x not supported by FCP adapter " 398 "QTCB version 0x%x not supported by FCP adapter "
401 "(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION, 399 "(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
402 psq->word[0], psq->word[1]); 400 psq->word[0], psq->word[1]);
403 zfcp_erp_adapter_shutdown(adapter, 0, 117, req); 401 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1", req);
404 break; 402 break;
405 case FSF_PROT_ERROR_STATE: 403 case FSF_PROT_ERROR_STATE:
406 case FSF_PROT_SEQ_NUMB_ERROR: 404 case FSF_PROT_SEQ_NUMB_ERROR:
407 zfcp_erp_adapter_reopen(adapter, 0, 98, req); 405 zfcp_erp_adapter_reopen(adapter, 0, "fspse_2", req);
408 req->status |= ZFCP_STATUS_FSFREQ_RETRY; 406 req->status |= ZFCP_STATUS_FSFREQ_RETRY;
409 break; 407 break;
410 case FSF_PROT_UNSUPP_QTCB_TYPE: 408 case FSF_PROT_UNSUPP_QTCB_TYPE:
411 dev_err(&adapter->ccw_device->dev, 409 dev_err(&adapter->ccw_device->dev,
412 "The QTCB type is not supported by the FCP adapter\n"); 410 "The QTCB type is not supported by the FCP adapter\n");
413 zfcp_erp_adapter_shutdown(adapter, 0, 118, req); 411 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3", req);
414 break; 412 break;
415 case FSF_PROT_HOST_CONNECTION_INITIALIZING: 413 case FSF_PROT_HOST_CONNECTION_INITIALIZING:
416 atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, 414 atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
@@ -420,27 +418,29 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
420 dev_err(&adapter->ccw_device->dev, 418 dev_err(&adapter->ccw_device->dev,
421 "0x%Lx is an ambiguous request identifier\n", 419 "0x%Lx is an ambiguous request identifier\n",
422 (unsigned long long)qtcb->bottom.support.req_handle); 420 (unsigned long long)qtcb->bottom.support.req_handle);
423 zfcp_erp_adapter_shutdown(adapter, 0, 78, req); 421 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4", req);
424 break; 422 break;
425 case FSF_PROT_LINK_DOWN: 423 case FSF_PROT_LINK_DOWN:
426 zfcp_fsf_link_down_info_eval(req, 37, &psq->link_down_info); 424 zfcp_fsf_link_down_info_eval(req, "fspse_5",
425 &psq->link_down_info);
427 /* FIXME: reopening adapter now? better wait for link up */ 426 /* FIXME: reopening adapter now? better wait for link up */
428 zfcp_erp_adapter_reopen(adapter, 0, 79, req); 427 zfcp_erp_adapter_reopen(adapter, 0, "fspse_6", req);
429 break; 428 break;
430 case FSF_PROT_REEST_QUEUE: 429 case FSF_PROT_REEST_QUEUE:
431 /* All ports should be marked as ready to run again */ 430 /* All ports should be marked as ready to run again */
432 zfcp_erp_modify_adapter_status(adapter, 28, NULL, 431 zfcp_erp_modify_adapter_status(adapter, "fspse_7", NULL,
433 ZFCP_STATUS_COMMON_RUNNING, 432 ZFCP_STATUS_COMMON_RUNNING,
434 ZFCP_SET); 433 ZFCP_SET);
435 zfcp_erp_adapter_reopen(adapter, 434 zfcp_erp_adapter_reopen(adapter,
436 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 435 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
437 ZFCP_STATUS_COMMON_ERP_FAILED, 99, req); 436 ZFCP_STATUS_COMMON_ERP_FAILED,
437 "fspse_8", req);
438 break; 438 break;
439 default: 439 default:
440 dev_err(&adapter->ccw_device->dev, 440 dev_err(&adapter->ccw_device->dev,
441 "0x%x is not a valid transfer protocol status\n", 441 "0x%x is not a valid transfer protocol status\n",
442 qtcb->prefix.prot_status); 442 qtcb->prefix.prot_status);
443 zfcp_erp_adapter_shutdown(adapter, 0, 119, req); 443 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9", req);
444 } 444 }
445 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 445 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
446} 446}
@@ -526,7 +526,7 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
526 dev_err(&adapter->ccw_device->dev, 526 dev_err(&adapter->ccw_device->dev,
527 "Unknown or unsupported arbitrated loop " 527 "Unknown or unsupported arbitrated loop "
528 "fibre channel topology detected\n"); 528 "fibre channel topology detected\n");
529 zfcp_erp_adapter_shutdown(adapter, 0, 127, req); 529 zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1", req);
530 return -EIO; 530 return -EIO;
531 } 531 }
532 532
@@ -560,7 +560,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
560 "FCP adapter maximum QTCB size (%d bytes) " 560 "FCP adapter maximum QTCB size (%d bytes) "
561 "is too small\n", 561 "is too small\n",
562 bottom->max_qtcb_size); 562 bottom->max_qtcb_size);
563 zfcp_erp_adapter_shutdown(adapter, 0, 129, req); 563 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1", req);
564 return; 564 return;
565 } 565 }
566 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, 566 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
@@ -577,11 +577,11 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
577 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, 577 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
578 &adapter->status); 578 &adapter->status);
579 579
580 zfcp_fsf_link_down_info_eval(req, 42, 580 zfcp_fsf_link_down_info_eval(req, "fsecdh2",
581 &qtcb->header.fsf_status_qual.link_down_info); 581 &qtcb->header.fsf_status_qual.link_down_info);
582 break; 582 break;
583 default: 583 default:
584 zfcp_erp_adapter_shutdown(adapter, 0, 130, req); 584 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3", req);
585 return; 585 return;
586 } 586 }
587 587
@@ -597,14 +597,14 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
597 dev_err(&adapter->ccw_device->dev, 597 dev_err(&adapter->ccw_device->dev,
598 "The FCP adapter only supports newer " 598 "The FCP adapter only supports newer "
599 "control block versions\n"); 599 "control block versions\n");
600 zfcp_erp_adapter_shutdown(adapter, 0, 125, req); 600 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4", req);
601 return; 601 return;
602 } 602 }
603 if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) { 603 if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
604 dev_err(&adapter->ccw_device->dev, 604 dev_err(&adapter->ccw_device->dev,
605 "The FCP adapter only supports older " 605 "The FCP adapter only supports older "
606 "control block versions\n"); 606 "control block versions\n");
607 zfcp_erp_adapter_shutdown(adapter, 0, 126, req); 607 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5", req);
608 } 608 }
609} 609}
610 610
@@ -617,9 +617,10 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
617 if (req->data) 617 if (req->data)
618 memcpy(req->data, bottom, sizeof(*bottom)); 618 memcpy(req->data, bottom, sizeof(*bottom));
619 619
620 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) 620 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
621 fc_host_permanent_port_name(shost) = bottom->wwpn; 621 fc_host_permanent_port_name(shost) = bottom->wwpn;
622 else 622 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
623 } else
623 fc_host_permanent_port_name(shost) = fc_host_port_name(shost); 624 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
624 fc_host_maxframe_size(shost) = bottom->maximum_frame_size; 625 fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
625 fc_host_supported_speeds(shost) = bottom->supported_speed; 626 fc_host_supported_speeds(shost) = bottom->supported_speed;
@@ -638,20 +639,12 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
638 break; 639 break;
639 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: 640 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
640 zfcp_fsf_exchange_port_evaluate(req); 641 zfcp_fsf_exchange_port_evaluate(req);
641 zfcp_fsf_link_down_info_eval(req, 43, 642 zfcp_fsf_link_down_info_eval(req, "fsepdh1",
642 &qtcb->header.fsf_status_qual.link_down_info); 643 &qtcb->header.fsf_status_qual.link_down_info);
643 break; 644 break;
644 } 645 }
645} 646}
646 647
647static int zfcp_fsf_sbal_available(struct zfcp_adapter *adapter)
648{
649 if (atomic_read(&adapter->req_q.count) > 0)
650 return 1;
651 atomic_inc(&adapter->qdio_outb_full);
652 return 0;
653}
654
655static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter) 648static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter)
656 __releases(&adapter->req_q_lock) 649 __releases(&adapter->req_q_lock)
657 __acquires(&adapter->req_q_lock) 650 __acquires(&adapter->req_q_lock)
@@ -735,7 +728,7 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter,
735 728
736 req->adapter = adapter; 729 req->adapter = adapter;
737 req->fsf_command = fsf_cmd; 730 req->fsf_command = fsf_cmd;
738 req->req_id = adapter->req_no++; 731 req->req_id = adapter->req_no;
739 req->sbal_number = 1; 732 req->sbal_number = 1;
740 req->sbal_first = req_q->first; 733 req->sbal_first = req_q->first;
741 req->sbal_last = req_q->first; 734 req->sbal_last = req_q->first;
@@ -791,13 +784,14 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
791 if (zfcp_reqlist_find_safe(adapter, req)) 784 if (zfcp_reqlist_find_safe(adapter, req))
792 zfcp_reqlist_remove(adapter, req); 785 zfcp_reqlist_remove(adapter, req);
793 spin_unlock_irqrestore(&adapter->req_list_lock, flags); 786 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
794 zfcp_erp_adapter_reopen(adapter, 0, 116, req); 787 zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1", req);
795 return -EIO; 788 return -EIO;
796 } 789 }
797 790
798 /* Don't increase for unsolicited status */ 791 /* Don't increase for unsolicited status */
799 if (req->qtcb) 792 if (req->qtcb)
800 adapter->fsf_req_seq_no++; 793 adapter->fsf_req_seq_no++;
794 adapter->req_no++;
801 795
802 return 0; 796 return 0;
803} 797}
@@ -870,14 +864,14 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
870 switch (req->qtcb->header.fsf_status) { 864 switch (req->qtcb->header.fsf_status) {
871 case FSF_PORT_HANDLE_NOT_VALID: 865 case FSF_PORT_HANDLE_NOT_VALID:
872 if (fsq->word[0] == fsq->word[1]) { 866 if (fsq->word[0] == fsq->word[1]) {
873 zfcp_erp_adapter_reopen(unit->port->adapter, 0, 104, 867 zfcp_erp_adapter_reopen(unit->port->adapter, 0,
874 req); 868 "fsafch1", req);
875 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 869 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
876 } 870 }
877 break; 871 break;
878 case FSF_LUN_HANDLE_NOT_VALID: 872 case FSF_LUN_HANDLE_NOT_VALID:
879 if (fsq->word[0] == fsq->word[1]) { 873 if (fsq->word[0] == fsq->word[1]) {
880 zfcp_erp_port_reopen(unit->port, 0, 105, req); 874 zfcp_erp_port_reopen(unit->port, 0, "fsafch2", req);
881 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 875 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
882 } 876 }
883 break; 877 break;
@@ -885,12 +879,12 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
885 req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED; 879 req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
886 break; 880 break;
887 case FSF_PORT_BOXED: 881 case FSF_PORT_BOXED:
888 zfcp_erp_port_boxed(unit->port, 47, req); 882 zfcp_erp_port_boxed(unit->port, "fsafch3", req);
889 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 883 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
890 ZFCP_STATUS_FSFREQ_RETRY; 884 ZFCP_STATUS_FSFREQ_RETRY;
891 break; 885 break;
892 case FSF_LUN_BOXED: 886 case FSF_LUN_BOXED:
893 zfcp_erp_unit_boxed(unit, 48, req); 887 zfcp_erp_unit_boxed(unit, "fsafch4", req);
894 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 888 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
895 ZFCP_STATUS_FSFREQ_RETRY; 889 ZFCP_STATUS_FSFREQ_RETRY;
896 break; 890 break;
@@ -912,27 +906,22 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
912/** 906/**
913 * zfcp_fsf_abort_fcp_command - abort running SCSI command 907 * zfcp_fsf_abort_fcp_command - abort running SCSI command
914 * @old_req_id: unsigned long 908 * @old_req_id: unsigned long
915 * @adapter: pointer to struct zfcp_adapter
916 * @unit: pointer to struct zfcp_unit 909 * @unit: pointer to struct zfcp_unit
917 * @req_flags: integer specifying the request flags
918 * Returns: pointer to struct zfcp_fsf_req 910 * Returns: pointer to struct zfcp_fsf_req
919 *
920 * FIXME(design): should be watched by a timeout !!!
921 */ 911 */
922 912
923struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id, 913struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
924 struct zfcp_adapter *adapter, 914 struct zfcp_unit *unit)
925 struct zfcp_unit *unit,
926 int req_flags)
927{ 915{
928 struct qdio_buffer_element *sbale; 916 struct qdio_buffer_element *sbale;
929 struct zfcp_fsf_req *req = NULL; 917 struct zfcp_fsf_req *req = NULL;
918 struct zfcp_adapter *adapter = unit->port->adapter;
930 919
931 spin_lock(&adapter->req_q_lock); 920 spin_lock_bh(&adapter->req_q_lock);
932 if (!zfcp_fsf_sbal_available(adapter)) 921 if (zfcp_fsf_req_sbal_get(adapter))
933 goto out; 922 goto out;
934 req = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND, 923 req = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND,
935 req_flags, adapter->pool.fsf_req_abort); 924 0, adapter->pool.fsf_req_abort);
936 if (IS_ERR(req)) { 925 if (IS_ERR(req)) {
937 req = NULL; 926 req = NULL;
938 goto out; 927 goto out;
@@ -960,7 +949,7 @@ out_error_free:
960 zfcp_fsf_req_free(req); 949 zfcp_fsf_req_free(req);
961 req = NULL; 950 req = NULL;
962out: 951out:
963 spin_unlock(&adapter->req_q_lock); 952 spin_unlock_bh(&adapter->req_q_lock);
964 return req; 953 return req;
965} 954}
966 955
@@ -998,7 +987,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
998 ZFCP_STATUS_FSFREQ_RETRY; 987 ZFCP_STATUS_FSFREQ_RETRY;
999 break; 988 break;
1000 case FSF_PORT_HANDLE_NOT_VALID: 989 case FSF_PORT_HANDLE_NOT_VALID:
1001 zfcp_erp_adapter_reopen(adapter, 0, 106, req); 990 zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req);
1002 case FSF_GENERIC_COMMAND_REJECTED: 991 case FSF_GENERIC_COMMAND_REJECTED:
1003 case FSF_PAYLOAD_SIZE_MISMATCH: 992 case FSF_PAYLOAD_SIZE_MISMATCH:
1004 case FSF_REQUEST_SIZE_TOO_LARGE: 993 case FSF_REQUEST_SIZE_TOO_LARGE:
@@ -1174,12 +1163,8 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els)
1174 struct fsf_qtcb_bottom_support *bottom; 1163 struct fsf_qtcb_bottom_support *bottom;
1175 int ret = -EIO; 1164 int ret = -EIO;
1176 1165
1177 if (unlikely(!(atomic_read(&els->port->status) & 1166 spin_lock_bh(&adapter->req_q_lock);
1178 ZFCP_STATUS_COMMON_UNBLOCKED))) 1167 if (zfcp_fsf_req_sbal_get(adapter))
1179 return -EBUSY;
1180
1181 spin_lock(&adapter->req_q_lock);
1182 if (!zfcp_fsf_sbal_available(adapter))
1183 goto out; 1168 goto out;
1184 req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_ELS, 1169 req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_ELS,
1185 ZFCP_REQ_AUTO_CLEANUP, NULL); 1170 ZFCP_REQ_AUTO_CLEANUP, NULL);
@@ -1212,7 +1197,7 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els)
1212failed_send: 1197failed_send:
1213 zfcp_fsf_req_free(req); 1198 zfcp_fsf_req_free(req);
1214out: 1199out:
1215 spin_unlock(&adapter->req_q_lock); 1200 spin_unlock_bh(&adapter->req_q_lock);
1216 return ret; 1201 return ret;
1217} 1202}
1218 1203
@@ -1224,7 +1209,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1224 int retval = -EIO; 1209 int retval = -EIO;
1225 1210
1226 spin_lock_bh(&adapter->req_q_lock); 1211 spin_lock_bh(&adapter->req_q_lock);
1227 if (!zfcp_fsf_sbal_available(adapter)) 1212 if (zfcp_fsf_req_sbal_get(adapter))
1228 goto out; 1213 goto out;
1229 req = zfcp_fsf_req_create(adapter, 1214 req = zfcp_fsf_req_create(adapter,
1230 FSF_QTCB_EXCHANGE_CONFIG_DATA, 1215 FSF_QTCB_EXCHANGE_CONFIG_DATA,
@@ -1320,7 +1305,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1320 return -EOPNOTSUPP; 1305 return -EOPNOTSUPP;
1321 1306
1322 spin_lock_bh(&adapter->req_q_lock); 1307 spin_lock_bh(&adapter->req_q_lock);
1323 if (!zfcp_fsf_sbal_available(adapter)) 1308 if (zfcp_fsf_req_sbal_get(adapter))
1324 goto out; 1309 goto out;
1325 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, 1310 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA,
1326 ZFCP_REQ_AUTO_CLEANUP, 1311 ZFCP_REQ_AUTO_CLEANUP,
@@ -1366,7 +1351,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter,
1366 return -EOPNOTSUPP; 1351 return -EOPNOTSUPP;
1367 1352
1368 spin_lock_bh(&adapter->req_q_lock); 1353 spin_lock_bh(&adapter->req_q_lock);
1369 if (!zfcp_fsf_sbal_available(adapter)) 1354 if (zfcp_fsf_req_sbal_get(adapter))
1370 goto out; 1355 goto out;
1371 1356
1372 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, 0, 1357 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, 0,
@@ -1416,7 +1401,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1416 "Not enough FCP adapter resources to open " 1401 "Not enough FCP adapter resources to open "
1417 "remote port 0x%016Lx\n", 1402 "remote port 0x%016Lx\n",
1418 (unsigned long long)port->wwpn); 1403 (unsigned long long)port->wwpn);
1419 zfcp_erp_port_failed(port, 31, req); 1404 zfcp_erp_port_failed(port, "fsoph_1", req);
1420 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1405 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1421 break; 1406 break;
1422 case FSF_ADAPTER_STATUS_AVAILABLE: 1407 case FSF_ADAPTER_STATUS_AVAILABLE:
@@ -1522,13 +1507,13 @@ static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
1522 1507
1523 switch (req->qtcb->header.fsf_status) { 1508 switch (req->qtcb->header.fsf_status) {
1524 case FSF_PORT_HANDLE_NOT_VALID: 1509 case FSF_PORT_HANDLE_NOT_VALID:
1525 zfcp_erp_adapter_reopen(port->adapter, 0, 107, req); 1510 zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1", req);
1526 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1511 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1527 break; 1512 break;
1528 case FSF_ADAPTER_STATUS_AVAILABLE: 1513 case FSF_ADAPTER_STATUS_AVAILABLE:
1529 break; 1514 break;
1530 case FSF_GOOD: 1515 case FSF_GOOD:
1531 zfcp_erp_modify_port_status(port, 33, req, 1516 zfcp_erp_modify_port_status(port, "fscph_2", req,
1532 ZFCP_STATUS_COMMON_OPEN, 1517 ZFCP_STATUS_COMMON_OPEN,
1533 ZFCP_CLEAR); 1518 ZFCP_CLEAR);
1534 break; 1519 break;
@@ -1657,7 +1642,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
1657 1642
1658 if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) { 1643 if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
1659 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1644 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1660 zfcp_erp_adapter_reopen(wka_port->adapter, 0, 84, req); 1645 zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1", req);
1661 } 1646 }
1662 1647
1663 wka_port->status = ZFCP_WKA_PORT_OFFLINE; 1648 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
@@ -1712,18 +1697,18 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1712 struct zfcp_unit *unit; 1697 struct zfcp_unit *unit;
1713 1698
1714 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1699 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1715 goto skip_fsfstatus; 1700 return;
1716 1701
1717 switch (header->fsf_status) { 1702 switch (header->fsf_status) {
1718 case FSF_PORT_HANDLE_NOT_VALID: 1703 case FSF_PORT_HANDLE_NOT_VALID:
1719 zfcp_erp_adapter_reopen(port->adapter, 0, 108, req); 1704 zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1", req);
1720 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1705 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1721 break; 1706 break;
1722 case FSF_ACCESS_DENIED: 1707 case FSF_ACCESS_DENIED:
1723 zfcp_fsf_access_denied_port(req, port); 1708 zfcp_fsf_access_denied_port(req, port);
1724 break; 1709 break;
1725 case FSF_PORT_BOXED: 1710 case FSF_PORT_BOXED:
1726 zfcp_erp_port_boxed(port, 50, req); 1711 zfcp_erp_port_boxed(port, "fscpph2", req);
1727 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 1712 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
1728 ZFCP_STATUS_FSFREQ_RETRY; 1713 ZFCP_STATUS_FSFREQ_RETRY;
1729 /* can't use generic zfcp_erp_modify_port_status because 1714 /* can't use generic zfcp_erp_modify_port_status because
@@ -1752,8 +1737,6 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1752 &unit->status); 1737 &unit->status);
1753 break; 1738 break;
1754 } 1739 }
1755skip_fsfstatus:
1756 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_CLOSING, &port->status);
1757} 1740}
1758 1741
1759/** 1742/**
@@ -1789,8 +1772,6 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1789 req->erp_action = erp_action; 1772 req->erp_action = erp_action;
1790 req->handler = zfcp_fsf_close_physical_port_handler; 1773 req->handler = zfcp_fsf_close_physical_port_handler;
1791 erp_action->fsf_req = req; 1774 erp_action->fsf_req = req;
1792 atomic_set_mask(ZFCP_STATUS_PORT_PHYS_CLOSING,
1793 &erp_action->port->status);
1794 1775
1795 zfcp_fsf_start_erp_timer(req); 1776 zfcp_fsf_start_erp_timer(req);
1796 retval = zfcp_fsf_req_send(req); 1777 retval = zfcp_fsf_req_send(req);
@@ -1825,7 +1806,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1825 switch (header->fsf_status) { 1806 switch (header->fsf_status) {
1826 1807
1827 case FSF_PORT_HANDLE_NOT_VALID: 1808 case FSF_PORT_HANDLE_NOT_VALID:
1828 zfcp_erp_adapter_reopen(unit->port->adapter, 0, 109, req); 1809 zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fsouh_1", req);
1829 /* fall through */ 1810 /* fall through */
1830 case FSF_LUN_ALREADY_OPEN: 1811 case FSF_LUN_ALREADY_OPEN:
1831 break; 1812 break;
@@ -1835,7 +1816,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1835 atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status); 1816 atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
1836 break; 1817 break;
1837 case FSF_PORT_BOXED: 1818 case FSF_PORT_BOXED:
1838 zfcp_erp_port_boxed(unit->port, 51, req); 1819 zfcp_erp_port_boxed(unit->port, "fsouh_2", req);
1839 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 1820 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
1840 ZFCP_STATUS_FSFREQ_RETRY; 1821 ZFCP_STATUS_FSFREQ_RETRY;
1841 break; 1822 break;
@@ -1851,7 +1832,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1851 else 1832 else
1852 zfcp_act_eval_err(adapter, 1833 zfcp_act_eval_err(adapter,
1853 header->fsf_status_qual.word[2]); 1834 header->fsf_status_qual.word[2]);
1854 zfcp_erp_unit_access_denied(unit, 60, req); 1835 zfcp_erp_unit_access_denied(unit, "fsouh_3", req);
1855 atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status); 1836 atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status);
1856 atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status); 1837 atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
1857 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1838 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -1862,7 +1843,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1862 "0x%016Lx on port 0x%016Lx\n", 1843 "0x%016Lx on port 0x%016Lx\n",
1863 (unsigned long long)unit->fcp_lun, 1844 (unsigned long long)unit->fcp_lun,
1864 (unsigned long long)unit->port->wwpn); 1845 (unsigned long long)unit->port->wwpn);
1865 zfcp_erp_unit_failed(unit, 34, req); 1846 zfcp_erp_unit_failed(unit, "fsouh_4", req);
1866 /* fall through */ 1847 /* fall through */
1867 case FSF_INVALID_COMMAND_OPTION: 1848 case FSF_INVALID_COMMAND_OPTION:
1868 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1849 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -1911,9 +1892,9 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1911 "port 0x%016Lx)\n", 1892 "port 0x%016Lx)\n",
1912 (unsigned long long)unit->fcp_lun, 1893 (unsigned long long)unit->fcp_lun,
1913 (unsigned long long)unit->port->wwpn); 1894 (unsigned long long)unit->port->wwpn);
1914 zfcp_erp_unit_failed(unit, 35, req); 1895 zfcp_erp_unit_failed(unit, "fsouh_5", req);
1915 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1896 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1916 zfcp_erp_unit_shutdown(unit, 0, 80, req); 1897 zfcp_erp_unit_shutdown(unit, 0, "fsouh_6", req);
1917 } else if (!exclusive && readwrite) { 1898 } else if (!exclusive && readwrite) {
1918 dev_err(&adapter->ccw_device->dev, 1899 dev_err(&adapter->ccw_device->dev,
1919 "Shared read-write access not " 1900 "Shared read-write access not "
@@ -1921,9 +1902,9 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1921 "0x%016Lx)\n", 1902 "0x%016Lx)\n",
1922 (unsigned long long)unit->fcp_lun, 1903 (unsigned long long)unit->fcp_lun,
1923 (unsigned long long)unit->port->wwpn); 1904 (unsigned long long)unit->port->wwpn);
1924 zfcp_erp_unit_failed(unit, 36, req); 1905 zfcp_erp_unit_failed(unit, "fsouh_7", req);
1925 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1906 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1926 zfcp_erp_unit_shutdown(unit, 0, 81, req); 1907 zfcp_erp_unit_shutdown(unit, 0, "fsouh_8", req);
1927 } 1908 }
1928 } 1909 }
1929 break; 1910 break;
@@ -1988,15 +1969,15 @@ static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
1988 1969
1989 switch (req->qtcb->header.fsf_status) { 1970 switch (req->qtcb->header.fsf_status) {
1990 case FSF_PORT_HANDLE_NOT_VALID: 1971 case FSF_PORT_HANDLE_NOT_VALID:
1991 zfcp_erp_adapter_reopen(unit->port->adapter, 0, 110, req); 1972 zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fscuh_1", req);
1992 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1973 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1993 break; 1974 break;
1994 case FSF_LUN_HANDLE_NOT_VALID: 1975 case FSF_LUN_HANDLE_NOT_VALID:
1995 zfcp_erp_port_reopen(unit->port, 0, 111, req); 1976 zfcp_erp_port_reopen(unit->port, 0, "fscuh_2", req);
1996 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1977 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1997 break; 1978 break;
1998 case FSF_PORT_BOXED: 1979 case FSF_PORT_BOXED:
1999 zfcp_erp_port_boxed(unit->port, 52, req); 1980 zfcp_erp_port_boxed(unit->port, "fscuh_3", req);
2000 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 1981 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
2001 ZFCP_STATUS_FSFREQ_RETRY; 1982 ZFCP_STATUS_FSFREQ_RETRY;
2002 break; 1983 break;
@@ -2073,7 +2054,6 @@ static void zfcp_fsf_req_latency(struct zfcp_fsf_req *req)
2073 struct fsf_qual_latency_info *lat_inf; 2054 struct fsf_qual_latency_info *lat_inf;
2074 struct latency_cont *lat; 2055 struct latency_cont *lat;
2075 struct zfcp_unit *unit = req->unit; 2056 struct zfcp_unit *unit = req->unit;
2076 unsigned long flags;
2077 2057
2078 lat_inf = &req->qtcb->prefix.prot_status_qual.latency_info; 2058 lat_inf = &req->qtcb->prefix.prot_status_qual.latency_info;
2079 2059
@@ -2091,11 +2071,11 @@ static void zfcp_fsf_req_latency(struct zfcp_fsf_req *req)
2091 return; 2071 return;
2092 } 2072 }
2093 2073
2094 spin_lock_irqsave(&unit->latencies.lock, flags); 2074 spin_lock(&unit->latencies.lock);
2095 zfcp_fsf_update_lat(&lat->channel, lat_inf->channel_lat); 2075 zfcp_fsf_update_lat(&lat->channel, lat_inf->channel_lat);
2096 zfcp_fsf_update_lat(&lat->fabric, lat_inf->fabric_lat); 2076 zfcp_fsf_update_lat(&lat->fabric, lat_inf->fabric_lat);
2097 lat->counter++; 2077 lat->counter++;
2098 spin_unlock_irqrestore(&unit->latencies.lock, flags); 2078 spin_unlock(&unit->latencies.lock);
2099} 2079}
2100 2080
2101#ifdef CONFIG_BLK_DEV_IO_TRACE 2081#ifdef CONFIG_BLK_DEV_IO_TRACE
@@ -2147,7 +2127,6 @@ static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
2147 2127
2148 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ABORTED)) { 2128 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ABORTED)) {
2149 set_host_byte(scpnt, DID_SOFT_ERROR); 2129 set_host_byte(scpnt, DID_SOFT_ERROR);
2150 set_driver_byte(scpnt, SUGGEST_RETRY);
2151 goto skip_fsfstatus; 2130 goto skip_fsfstatus;
2152 } 2131 }
2153 2132
@@ -2237,12 +2216,12 @@ static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req)
2237 switch (header->fsf_status) { 2216 switch (header->fsf_status) {
2238 case FSF_HANDLE_MISMATCH: 2217 case FSF_HANDLE_MISMATCH:
2239 case FSF_PORT_HANDLE_NOT_VALID: 2218 case FSF_PORT_HANDLE_NOT_VALID:
2240 zfcp_erp_adapter_reopen(unit->port->adapter, 0, 112, req); 2219 zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fssfch1", req);
2241 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2220 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2242 break; 2221 break;
2243 case FSF_FCPLUN_NOT_VALID: 2222 case FSF_FCPLUN_NOT_VALID:
2244 case FSF_LUN_HANDLE_NOT_VALID: 2223 case FSF_LUN_HANDLE_NOT_VALID:
2245 zfcp_erp_port_reopen(unit->port, 0, 113, req); 2224 zfcp_erp_port_reopen(unit->port, 0, "fssfch2", req);
2246 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2225 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2247 break; 2226 break;
2248 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 2227 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
@@ -2258,7 +2237,8 @@ static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req)
2258 req->qtcb->bottom.io.data_direction, 2237 req->qtcb->bottom.io.data_direction,
2259 (unsigned long long)unit->fcp_lun, 2238 (unsigned long long)unit->fcp_lun,
2260 (unsigned long long)unit->port->wwpn); 2239 (unsigned long long)unit->port->wwpn);
2261 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 133, req); 2240 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, "fssfch3",
2241 req);
2262 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2242 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2263 break; 2243 break;
2264 case FSF_CMND_LENGTH_NOT_VALID: 2244 case FSF_CMND_LENGTH_NOT_VALID:
@@ -2268,16 +2248,17 @@ static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req)
2268 req->qtcb->bottom.io.fcp_cmnd_length, 2248 req->qtcb->bottom.io.fcp_cmnd_length,
2269 (unsigned long long)unit->fcp_lun, 2249 (unsigned long long)unit->fcp_lun,
2270 (unsigned long long)unit->port->wwpn); 2250 (unsigned long long)unit->port->wwpn);
2271 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 134, req); 2251 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, "fssfch4",
2252 req);
2272 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2253 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2273 break; 2254 break;
2274 case FSF_PORT_BOXED: 2255 case FSF_PORT_BOXED:
2275 zfcp_erp_port_boxed(unit->port, 53, req); 2256 zfcp_erp_port_boxed(unit->port, "fssfch5", req);
2276 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 2257 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
2277 ZFCP_STATUS_FSFREQ_RETRY; 2258 ZFCP_STATUS_FSFREQ_RETRY;
2278 break; 2259 break;
2279 case FSF_LUN_BOXED: 2260 case FSF_LUN_BOXED:
2280 zfcp_erp_unit_boxed(unit, 54, req); 2261 zfcp_erp_unit_boxed(unit, "fssfch6", req);
2281 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 2262 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
2282 ZFCP_STATUS_FSFREQ_RETRY; 2263 ZFCP_STATUS_FSFREQ_RETRY;
2283 break; 2264 break;
@@ -2314,30 +2295,29 @@ static void zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, u32 fcp_dl)
2314 2295
2315/** 2296/**
2316 * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command) 2297 * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command)
2317 * @adapter: adapter where scsi command is issued
2318 * @unit: unit where command is sent to 2298 * @unit: unit where command is sent to
2319 * @scsi_cmnd: scsi command to be sent 2299 * @scsi_cmnd: scsi command to be sent
2320 * @timer: timer to be started when request is initiated
2321 * @req_flags: flags for fsf_request
2322 */ 2300 */
2323int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter, 2301int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2324 struct zfcp_unit *unit, 2302 struct scsi_cmnd *scsi_cmnd)
2325 struct scsi_cmnd *scsi_cmnd,
2326 int use_timer, int req_flags)
2327{ 2303{
2328 struct zfcp_fsf_req *req; 2304 struct zfcp_fsf_req *req;
2329 struct fcp_cmnd_iu *fcp_cmnd_iu; 2305 struct fcp_cmnd_iu *fcp_cmnd_iu;
2330 unsigned int sbtype; 2306 unsigned int sbtype;
2331 int real_bytes, retval = -EIO; 2307 int real_bytes, retval = -EIO;
2308 struct zfcp_adapter *adapter = unit->port->adapter;
2332 2309
2333 if (unlikely(!(atomic_read(&unit->status) & 2310 if (unlikely(!(atomic_read(&unit->status) &
2334 ZFCP_STATUS_COMMON_UNBLOCKED))) 2311 ZFCP_STATUS_COMMON_UNBLOCKED)))
2335 return -EBUSY; 2312 return -EBUSY;
2336 2313
2337 spin_lock(&adapter->req_q_lock); 2314 spin_lock(&adapter->req_q_lock);
2338 if (!zfcp_fsf_sbal_available(adapter)) 2315 if (atomic_read(&adapter->req_q.count) <= 0) {
2316 atomic_inc(&adapter->qdio_outb_full);
2339 goto out; 2317 goto out;
2340 req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags, 2318 }
2319 req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND,
2320 ZFCP_REQ_AUTO_CLEANUP,
2341 adapter->pool.fsf_req_scsi); 2321 adapter->pool.fsf_req_scsi);
2342 if (IS_ERR(req)) { 2322 if (IS_ERR(req)) {
2343 retval = PTR_ERR(req); 2323 retval = PTR_ERR(req);
@@ -2411,7 +2391,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
2411 "on port 0x%016Lx closed\n", 2391 "on port 0x%016Lx closed\n",
2412 (unsigned long long)unit->fcp_lun, 2392 (unsigned long long)unit->fcp_lun,
2413 (unsigned long long)unit->port->wwpn); 2393 (unsigned long long)unit->port->wwpn);
2414 zfcp_erp_unit_shutdown(unit, 0, 131, req); 2394 zfcp_erp_unit_shutdown(unit, 0, "fssfct1", req);
2415 retval = -EINVAL; 2395 retval = -EINVAL;
2416 } 2396 }
2417 goto failed_scsi_cmnd; 2397 goto failed_scsi_cmnd;
@@ -2419,9 +2399,6 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
2419 2399
2420 zfcp_set_fcp_dl(fcp_cmnd_iu, real_bytes); 2400 zfcp_set_fcp_dl(fcp_cmnd_iu, real_bytes);
2421 2401
2422 if (use_timer)
2423 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
2424
2425 retval = zfcp_fsf_req_send(req); 2402 retval = zfcp_fsf_req_send(req);
2426 if (unlikely(retval)) 2403 if (unlikely(retval))
2427 goto failed_scsi_cmnd; 2404 goto failed_scsi_cmnd;
@@ -2439,28 +2416,25 @@ out:
2439 2416
2440/** 2417/**
2441 * zfcp_fsf_send_fcp_ctm - send SCSI task management command 2418 * zfcp_fsf_send_fcp_ctm - send SCSI task management command
2442 * @adapter: pointer to struct zfcp-adapter
2443 * @unit: pointer to struct zfcp_unit 2419 * @unit: pointer to struct zfcp_unit
2444 * @tm_flags: unsigned byte for task management flags 2420 * @tm_flags: unsigned byte for task management flags
2445 * @req_flags: int request flags
2446 * Returns: on success pointer to struct fsf_req, NULL otherwise 2421 * Returns: on success pointer to struct fsf_req, NULL otherwise
2447 */ 2422 */
2448struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *adapter, 2423struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
2449 struct zfcp_unit *unit,
2450 u8 tm_flags, int req_flags)
2451{ 2424{
2452 struct qdio_buffer_element *sbale; 2425 struct qdio_buffer_element *sbale;
2453 struct zfcp_fsf_req *req = NULL; 2426 struct zfcp_fsf_req *req = NULL;
2454 struct fcp_cmnd_iu *fcp_cmnd_iu; 2427 struct fcp_cmnd_iu *fcp_cmnd_iu;
2428 struct zfcp_adapter *adapter = unit->port->adapter;
2455 2429
2456 if (unlikely(!(atomic_read(&unit->status) & 2430 if (unlikely(!(atomic_read(&unit->status) &
2457 ZFCP_STATUS_COMMON_UNBLOCKED))) 2431 ZFCP_STATUS_COMMON_UNBLOCKED)))
2458 return NULL; 2432 return NULL;
2459 2433
2460 spin_lock(&adapter->req_q_lock); 2434 spin_lock_bh(&adapter->req_q_lock);
2461 if (!zfcp_fsf_sbal_available(adapter)) 2435 if (zfcp_fsf_req_sbal_get(adapter))
2462 goto out; 2436 goto out;
2463 req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags, 2437 req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, 0,
2464 adapter->pool.fsf_req_scsi); 2438 adapter->pool.fsf_req_scsi);
2465 if (IS_ERR(req)) { 2439 if (IS_ERR(req)) {
2466 req = NULL; 2440 req = NULL;
@@ -2492,7 +2466,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *adapter,
2492 zfcp_fsf_req_free(req); 2466 zfcp_fsf_req_free(req);
2493 req = NULL; 2467 req = NULL;
2494out: 2468out:
2495 spin_unlock(&adapter->req_q_lock); 2469 spin_unlock_bh(&adapter->req_q_lock);
2496 return req; 2470 return req;
2497} 2471}
2498 2472
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index 8bb200252347..df7f232faba8 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -127,10 +127,6 @@
127#define FSF_STATUS_READ_CFDC_UPDATED 0x0000000A 127#define FSF_STATUS_READ_CFDC_UPDATED 0x0000000A
128#define FSF_STATUS_READ_FEATURE_UPDATE_ALERT 0x0000000C 128#define FSF_STATUS_READ_FEATURE_UPDATE_ALERT 0x0000000C
129 129
130/* status subtypes in status read buffer */
131#define FSF_STATUS_READ_SUB_CLOSE_PHYS_PORT 0x00000001
132#define FSF_STATUS_READ_SUB_ERROR_PORT 0x00000002
133
134/* status subtypes for link down */ 130/* status subtypes for link down */
135#define FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK 0x00000000 131#define FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK 0x00000000
136#define FSF_STATUS_READ_SUB_FDISC_FAILED 0x00000001 132#define FSF_STATUS_READ_SUB_FDISC_FAILED 0x00000001
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 33e0a206a0a4..e0a215309df0 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -11,9 +11,6 @@
11 11
12#include "zfcp_ext.h" 12#include "zfcp_ext.h"
13 13
14/* FIXME(tune): free space should be one max. SBAL chain plus what? */
15#define ZFCP_QDIO_PCI_INTERVAL (QDIO_MAX_BUFFERS_PER_Q \
16 - (FSF_MAX_SBALS_PER_REQ + 4))
17#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer)) 14#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
18 15
19static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal) 16static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal)
@@ -58,7 +55,7 @@ void zfcp_qdio_free(struct zfcp_adapter *adapter)
58 } 55 }
59} 56}
60 57
61static void zfcp_qdio_handler_error(struct zfcp_adapter *adapter, u8 id) 58static void zfcp_qdio_handler_error(struct zfcp_adapter *adapter, char *id)
62{ 59{
63 dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n"); 60 dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n");
64 61
@@ -77,6 +74,23 @@ static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
77 } 74 }
78} 75}
79 76
77/* this needs to be called prior to updating the queue fill level */
78static void zfcp_qdio_account(struct zfcp_adapter *adapter)
79{
80 ktime_t now;
81 s64 span;
82 int free, used;
83
84 spin_lock(&adapter->qdio_stat_lock);
85 now = ktime_get();
86 span = ktime_us_delta(now, adapter->req_q_time);
87 free = max(0, atomic_read(&adapter->req_q.count));
88 used = QDIO_MAX_BUFFERS_PER_Q - free;
89 adapter->req_q_util += used * span;
90 adapter->req_q_time = now;
91 spin_unlock(&adapter->qdio_stat_lock);
92}
93
80static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, 94static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
81 int queue_no, int first, int count, 95 int queue_no, int first, int count,
82 unsigned long parm) 96 unsigned long parm)
@@ -86,13 +100,14 @@ static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
86 100
87 if (unlikely(qdio_err)) { 101 if (unlikely(qdio_err)) {
88 zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count); 102 zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count);
89 zfcp_qdio_handler_error(adapter, 140); 103 zfcp_qdio_handler_error(adapter, "qdireq1");
90 return; 104 return;
91 } 105 }
92 106
93 /* cleanup all SBALs being program-owned now */ 107 /* cleanup all SBALs being program-owned now */
94 zfcp_qdio_zero_sbals(queue->sbal, first, count); 108 zfcp_qdio_zero_sbals(queue->sbal, first, count);
95 109
110 zfcp_qdio_account(adapter);
96 atomic_add(count, &queue->count); 111 atomic_add(count, &queue->count);
97 wake_up(&adapter->request_wq); 112 wake_up(&adapter->request_wq);
98} 113}
@@ -154,7 +169,7 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
154 169
155 if (unlikely(qdio_err)) { 170 if (unlikely(qdio_err)) {
156 zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count); 171 zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count);
157 zfcp_qdio_handler_error(adapter, 147); 172 zfcp_qdio_handler_error(adapter, "qdires1");
158 return; 173 return;
159 } 174 }
160 175
@@ -346,21 +361,12 @@ int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req)
346 struct zfcp_qdio_queue *req_q = &adapter->req_q; 361 struct zfcp_qdio_queue *req_q = &adapter->req_q;
347 int first = fsf_req->sbal_first; 362 int first = fsf_req->sbal_first;
348 int count = fsf_req->sbal_number; 363 int count = fsf_req->sbal_number;
349 int retval, pci, pci_batch; 364 int retval;
350 struct qdio_buffer_element *sbale; 365 unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
351 366
352 /* acknowledgements for transferred buffers */ 367 zfcp_qdio_account(adapter);
353 pci_batch = adapter->req_q_pci_batch + count;
354 if (unlikely(pci_batch >= ZFCP_QDIO_PCI_INTERVAL)) {
355 pci_batch %= ZFCP_QDIO_PCI_INTERVAL;
356 pci = first + count - (pci_batch + 1);
357 pci %= QDIO_MAX_BUFFERS_PER_Q;
358 sbale = zfcp_qdio_sbale(req_q, pci, 0);
359 sbale->flags |= SBAL_FLAGS0_PCI;
360 }
361 368
362 retval = do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, first, 369 retval = do_QDIO(adapter->ccw_device, qdio_flags, 0, first, count);
363 count);
364 if (unlikely(retval)) { 370 if (unlikely(retval)) {
365 zfcp_qdio_zero_sbals(req_q->sbal, first, count); 371 zfcp_qdio_zero_sbals(req_q->sbal, first, count);
366 return retval; 372 return retval;
@@ -370,7 +376,6 @@ int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req)
370 atomic_sub(count, &req_q->count); 376 atomic_sub(count, &req_q->count);
371 req_q->first += count; 377 req_q->first += count;
372 req_q->first %= QDIO_MAX_BUFFERS_PER_Q; 378 req_q->first %= QDIO_MAX_BUFFERS_PER_Q;
373 adapter->req_q_pci_batch = pci_batch;
374 return 0; 379 return 0;
375} 380}
376 381
@@ -441,7 +446,6 @@ void zfcp_qdio_close(struct zfcp_adapter *adapter)
441 } 446 }
442 req_q->first = 0; 447 req_q->first = 0;
443 atomic_set(&req_q->count, 0); 448 atomic_set(&req_q->count, 0);
444 adapter->req_q_pci_batch = 0;
445 adapter->resp_q.first = 0; 449 adapter->resp_q.first = 0;
446 atomic_set(&adapter->resp_q.count, 0); 450 atomic_set(&adapter->resp_q.count, 0);
447} 451}
@@ -479,7 +483,6 @@ int zfcp_qdio_open(struct zfcp_adapter *adapter)
479 /* set index of first avalable SBALS / number of available SBALS */ 483 /* set index of first avalable SBALS / number of available SBALS */
480 adapter->req_q.first = 0; 484 adapter->req_q.first = 0;
481 atomic_set(&adapter->req_q.count, QDIO_MAX_BUFFERS_PER_Q); 485 atomic_set(&adapter->req_q.count, QDIO_MAX_BUFFERS_PER_Q);
482 adapter->req_q_pci_batch = 0;
483 486
484 return 0; 487 return 0;
485 488
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 9dc42a68fbdd..58201e1ae478 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Interface to Linux SCSI midlayer. 4 * Interface to Linux SCSI midlayer.
5 * 5 *
6 * Copyright IBM Corporation 2002, 2008 6 * Copyright IBM Corporation 2002, 2009
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
@@ -27,9 +27,7 @@ char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
27static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) 27static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
28{ 28{
29 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; 29 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
30 atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status);
31 unit->device = NULL; 30 unit->device = NULL;
32 zfcp_erp_unit_failed(unit, 12, NULL);
33 zfcp_unit_put(unit); 31 zfcp_unit_put(unit);
34} 32}
35 33
@@ -58,8 +56,8 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
58{ 56{
59 struct zfcp_unit *unit; 57 struct zfcp_unit *unit;
60 struct zfcp_adapter *adapter; 58 struct zfcp_adapter *adapter;
61 int status; 59 int status, scsi_result, ret;
62 int ret; 60 struct fc_rport *rport = starget_to_rport(scsi_target(scpnt->device));
63 61
64 /* reset the status for this request */ 62 /* reset the status for this request */
65 scpnt->result = 0; 63 scpnt->result = 0;
@@ -81,6 +79,14 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
81 return 0; 79 return 0;
82 } 80 }
83 81
82 scsi_result = fc_remote_port_chkready(rport);
83 if (unlikely(scsi_result)) {
84 scpnt->result = scsi_result;
85 zfcp_scsi_dbf_event_result("fail", 4, adapter, scpnt, NULL);
86 scpnt->scsi_done(scpnt);
87 return 0;
88 }
89
84 status = atomic_read(&unit->status); 90 status = atomic_read(&unit->status);
85 if (unlikely((status & ZFCP_STATUS_COMMON_ERP_FAILED) || 91 if (unlikely((status & ZFCP_STATUS_COMMON_ERP_FAILED) ||
86 !(status & ZFCP_STATUS_COMMON_RUNNING))) { 92 !(status & ZFCP_STATUS_COMMON_RUNNING))) {
@@ -88,8 +94,7 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
88 return 0;; 94 return 0;;
89 } 95 }
90 96
91 ret = zfcp_fsf_send_fcp_command_task(adapter, unit, scpnt, 0, 97 ret = zfcp_fsf_send_fcp_command_task(unit, scpnt);
92 ZFCP_REQ_AUTO_CLEANUP);
93 if (unlikely(ret == -EBUSY)) 98 if (unlikely(ret == -EBUSY))
94 return SCSI_MLQUEUE_DEVICE_BUSY; 99 return SCSI_MLQUEUE_DEVICE_BUSY;
95 else if (unlikely(ret < 0)) 100 else if (unlikely(ret < 0))
@@ -133,8 +138,7 @@ static int zfcp_scsi_slave_alloc(struct scsi_device *sdp)
133 138
134 read_lock_irqsave(&zfcp_data.config_lock, flags); 139 read_lock_irqsave(&zfcp_data.config_lock, flags);
135 unit = zfcp_unit_lookup(adapter, sdp->channel, sdp->id, sdp->lun); 140 unit = zfcp_unit_lookup(adapter, sdp->channel, sdp->id, sdp->lun);
136 if (unit && 141 if (unit) {
137 (atomic_read(&unit->status) & ZFCP_STATUS_UNIT_REGISTERED)) {
138 sdp->hostdata = unit; 142 sdp->hostdata = unit;
139 unit->device = sdp; 143 unit->device = sdp;
140 zfcp_unit_get(unit); 144 zfcp_unit_get(unit);
@@ -147,79 +151,91 @@ out:
147 151
148static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) 152static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
149{ 153{
150 struct Scsi_Host *scsi_host; 154 struct Scsi_Host *scsi_host = scpnt->device->host;
151 struct zfcp_adapter *adapter; 155 struct zfcp_adapter *adapter =
152 struct zfcp_unit *unit; 156 (struct zfcp_adapter *) scsi_host->hostdata[0];
153 struct zfcp_fsf_req *fsf_req; 157 struct zfcp_unit *unit = scpnt->device->hostdata;
158 struct zfcp_fsf_req *old_req, *abrt_req;
154 unsigned long flags; 159 unsigned long flags;
155 unsigned long old_req_id = (unsigned long) scpnt->host_scribble; 160 unsigned long old_req_id = (unsigned long) scpnt->host_scribble;
156 int retval = SUCCESS; 161 int retval = SUCCESS;
157 162 int retry = 3;
158 scsi_host = scpnt->device->host;
159 adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
160 unit = scpnt->device->hostdata;
161 163
162 /* avoid race condition between late normal completion and abort */ 164 /* avoid race condition between late normal completion and abort */
163 write_lock_irqsave(&adapter->abort_lock, flags); 165 write_lock_irqsave(&adapter->abort_lock, flags);
164 166
165 /* Check whether corresponding fsf_req is still pending */
166 spin_lock(&adapter->req_list_lock); 167 spin_lock(&adapter->req_list_lock);
167 fsf_req = zfcp_reqlist_find(adapter, old_req_id); 168 old_req = zfcp_reqlist_find(adapter, old_req_id);
168 spin_unlock(&adapter->req_list_lock); 169 spin_unlock(&adapter->req_list_lock);
169 if (!fsf_req) { 170 if (!old_req) {
170 write_unlock_irqrestore(&adapter->abort_lock, flags); 171 write_unlock_irqrestore(&adapter->abort_lock, flags);
171 zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL, 0); 172 zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL,
172 return retval; 173 old_req_id);
174 return SUCCESS;
173 } 175 }
174 fsf_req->data = NULL; 176 old_req->data = NULL;
175 177
176 /* don't access old fsf_req after releasing the abort_lock */ 178 /* don't access old fsf_req after releasing the abort_lock */
177 write_unlock_irqrestore(&adapter->abort_lock, flags); 179 write_unlock_irqrestore(&adapter->abort_lock, flags);
178 180
179 fsf_req = zfcp_fsf_abort_fcp_command(old_req_id, adapter, unit, 0); 181 while (retry--) {
180 if (!fsf_req) { 182 abrt_req = zfcp_fsf_abort_fcp_command(old_req_id, unit);
181 zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL, 183 if (abrt_req)
182 old_req_id); 184 break;
183 retval = FAILED; 185
184 return retval; 186 zfcp_erp_wait(adapter);
187 if (!(atomic_read(&adapter->status) &
188 ZFCP_STATUS_COMMON_RUNNING)) {
189 zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL,
190 old_req_id);
191 return SUCCESS;
192 }
185 } 193 }
194 if (!abrt_req)
195 return FAILED;
186 196
187 __wait_event(fsf_req->completion_wq, 197 wait_event(abrt_req->completion_wq,
188 fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED); 198 abrt_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
189 199
190 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) { 200 if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED)
191 zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, fsf_req, 0); 201 zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, abrt_req, 0);
192 } else if (fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) { 202 else if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED)
193 zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, fsf_req, 0); 203 zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, abrt_req, 0);
194 } else { 204 else {
195 zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, fsf_req, 0); 205 zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, abrt_req, 0);
196 retval = FAILED; 206 retval = FAILED;
197 } 207 }
198 zfcp_fsf_req_free(fsf_req); 208 zfcp_fsf_req_free(abrt_req);
199
200 return retval; 209 return retval;
201} 210}
202 211
203static int zfcp_task_mgmt_function(struct zfcp_unit *unit, u8 tm_flags, 212static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
204 struct scsi_cmnd *scpnt)
205{ 213{
214 struct zfcp_unit *unit = scpnt->device->hostdata;
206 struct zfcp_adapter *adapter = unit->port->adapter; 215 struct zfcp_adapter *adapter = unit->port->adapter;
207 struct zfcp_fsf_req *fsf_req; 216 struct zfcp_fsf_req *fsf_req;
208 int retval = SUCCESS; 217 int retval = SUCCESS;
209 218 int retry = 3;
210 /* issue task management function */ 219
211 fsf_req = zfcp_fsf_send_fcp_ctm(adapter, unit, tm_flags, 0); 220 while (retry--) {
212 if (!fsf_req) { 221 fsf_req = zfcp_fsf_send_fcp_ctm(unit, tm_flags);
213 zfcp_scsi_dbf_event_devreset("nres", tm_flags, unit, scpnt); 222 if (fsf_req)
214 return FAILED; 223 break;
224
225 zfcp_erp_wait(adapter);
226 if (!(atomic_read(&adapter->status) &
227 ZFCP_STATUS_COMMON_RUNNING)) {
228 zfcp_scsi_dbf_event_devreset("nres", tm_flags, unit,
229 scpnt);
230 return SUCCESS;
231 }
215 } 232 }
233 if (!fsf_req)
234 return FAILED;
216 235
217 __wait_event(fsf_req->completion_wq, 236 wait_event(fsf_req->completion_wq,
218 fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED); 237 fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
219 238
220 /*
221 * check completion status of task management function
222 */
223 if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { 239 if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
224 zfcp_scsi_dbf_event_devreset("fail", tm_flags, unit, scpnt); 240 zfcp_scsi_dbf_event_devreset("fail", tm_flags, unit, scpnt);
225 retval = FAILED; 241 retval = FAILED;
@@ -230,40 +246,25 @@ static int zfcp_task_mgmt_function(struct zfcp_unit *unit, u8 tm_flags,
230 zfcp_scsi_dbf_event_devreset("okay", tm_flags, unit, scpnt); 246 zfcp_scsi_dbf_event_devreset("okay", tm_flags, unit, scpnt);
231 247
232 zfcp_fsf_req_free(fsf_req); 248 zfcp_fsf_req_free(fsf_req);
233
234 return retval; 249 return retval;
235} 250}
236 251
237static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt) 252static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
238{ 253{
239 struct zfcp_unit *unit = scpnt->device->hostdata; 254 return zfcp_task_mgmt_function(scpnt, FCP_LOGICAL_UNIT_RESET);
240
241 if (!unit) {
242 WARN_ON(1);
243 return SUCCESS;
244 }
245 return zfcp_task_mgmt_function(unit, FCP_LOGICAL_UNIT_RESET, scpnt);
246} 255}
247 256
248static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt) 257static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt)
249{ 258{
250 struct zfcp_unit *unit = scpnt->device->hostdata; 259 return zfcp_task_mgmt_function(scpnt, FCP_TARGET_RESET);
251
252 if (!unit) {
253 WARN_ON(1);
254 return SUCCESS;
255 }
256 return zfcp_task_mgmt_function(unit, FCP_TARGET_RESET, scpnt);
257} 260}
258 261
259static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) 262static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
260{ 263{
261 struct zfcp_unit *unit; 264 struct zfcp_unit *unit = scpnt->device->hostdata;
262 struct zfcp_adapter *adapter; 265 struct zfcp_adapter *adapter = unit->port->adapter;
263 266
264 unit = scpnt->device->hostdata; 267 zfcp_erp_adapter_reopen(adapter, 0, "schrh_1", scpnt);
265 adapter = unit->port->adapter;
266 zfcp_erp_adapter_reopen(adapter, 0, 141, scpnt);
267 zfcp_erp_wait(adapter); 268 zfcp_erp_wait(adapter);
268 269
269 return SUCCESS; 270 return SUCCESS;
@@ -479,6 +480,109 @@ static void zfcp_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
479 rport->dev_loss_tmo = timeout; 480 rport->dev_loss_tmo = timeout;
480} 481}
481 482
483/**
484 * zfcp_scsi_dev_loss_tmo_callbk - Free any reference to rport
485 * @rport: The rport that is about to be deleted.
486 */
487static void zfcp_scsi_dev_loss_tmo_callbk(struct fc_rport *rport)
488{
489 struct zfcp_port *port = rport->dd_data;
490
491 write_lock_irq(&zfcp_data.config_lock);
492 port->rport = NULL;
493 write_unlock_irq(&zfcp_data.config_lock);
494}
495
496/**
497 * zfcp_scsi_terminate_rport_io - Terminate all I/O on a rport
498 * @rport: The FC rport where to teminate I/O
499 *
500 * Abort all pending SCSI commands for a port by closing the
501 * port. Using a reopen for avoids a conflict with a shutdown
502 * overwriting a reopen.
503 */
504static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
505{
506 struct zfcp_port *port = rport->dd_data;
507
508 zfcp_erp_port_reopen(port, 0, "sctrpi1", NULL);
509}
510
511static void zfcp_scsi_rport_register(struct zfcp_port *port)
512{
513 struct fc_rport_identifiers ids;
514 struct fc_rport *rport;
515
516 ids.node_name = port->wwnn;
517 ids.port_name = port->wwpn;
518 ids.port_id = port->d_id;
519 ids.roles = FC_RPORT_ROLE_FCP_TARGET;
520
521 rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
522 if (!rport) {
523 dev_err(&port->adapter->ccw_device->dev,
524 "Registering port 0x%016Lx failed\n",
525 (unsigned long long)port->wwpn);
526 return;
527 }
528
529 rport->dd_data = port;
530 rport->maxframe_size = port->maxframe_size;
531 rport->supported_classes = port->supported_classes;
532 port->rport = rport;
533}
534
535static void zfcp_scsi_rport_block(struct zfcp_port *port)
536{
537 if (port->rport)
538 fc_remote_port_delete(port->rport);
539}
540
541void zfcp_scsi_schedule_rport_register(struct zfcp_port *port)
542{
543 zfcp_port_get(port);
544 port->rport_task = RPORT_ADD;
545
546 if (!queue_work(zfcp_data.work_queue, &port->rport_work))
547 zfcp_port_put(port);
548}
549
550void zfcp_scsi_schedule_rport_block(struct zfcp_port *port)
551{
552 zfcp_port_get(port);
553 port->rport_task = RPORT_DEL;
554
555 if (!queue_work(zfcp_data.work_queue, &port->rport_work))
556 zfcp_port_put(port);
557}
558
559void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *adapter)
560{
561 struct zfcp_port *port;
562
563 list_for_each_entry(port, &adapter->port_list_head, list)
564 zfcp_scsi_schedule_rport_block(port);
565}
566
567void zfcp_scsi_rport_work(struct work_struct *work)
568{
569 struct zfcp_port *port = container_of(work, struct zfcp_port,
570 rport_work);
571
572 while (port->rport_task) {
573 if (port->rport_task == RPORT_ADD) {
574 port->rport_task = RPORT_NONE;
575 zfcp_scsi_rport_register(port);
576 } else {
577 port->rport_task = RPORT_NONE;
578 zfcp_scsi_rport_block(port);
579 }
580 }
581
582 zfcp_port_put(port);
583}
584
585
482struct fc_function_template zfcp_transport_functions = { 586struct fc_function_template zfcp_transport_functions = {
483 .show_starget_port_id = 1, 587 .show_starget_port_id = 1,
484 .show_starget_port_name = 1, 588 .show_starget_port_name = 1,
@@ -497,6 +601,8 @@ struct fc_function_template zfcp_transport_functions = {
497 .reset_fc_host_stats = zfcp_reset_fc_host_stats, 601 .reset_fc_host_stats = zfcp_reset_fc_host_stats,
498 .set_rport_dev_loss_tmo = zfcp_set_rport_dev_loss_tmo, 602 .set_rport_dev_loss_tmo = zfcp_set_rport_dev_loss_tmo,
499 .get_host_port_state = zfcp_get_host_port_state, 603 .get_host_port_state = zfcp_get_host_port_state,
604 .dev_loss_tmo_callbk = zfcp_scsi_dev_loss_tmo_callbk,
605 .terminate_rport_io = zfcp_scsi_terminate_rport_io,
500 .show_host_port_state = 1, 606 .show_host_port_state = 1,
501 /* no functions registered for following dynamic attributes but 607 /* no functions registered for following dynamic attributes but
502 directly set by LLDD */ 608 directly set by LLDD */
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 899af2b45b1e..9a3b8e261c0a 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -112,9 +112,9 @@ static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO, \
112 zfcp_sysfs_##_feat##_failed_show, \ 112 zfcp_sysfs_##_feat##_failed_show, \
113 zfcp_sysfs_##_feat##_failed_store); 113 zfcp_sysfs_##_feat##_failed_store);
114 114
115ZFCP_SYSFS_FAILED(zfcp_adapter, adapter, adapter, 44, 93); 115ZFCP_SYSFS_FAILED(zfcp_adapter, adapter, adapter, "syafai1", "syafai2");
116ZFCP_SYSFS_FAILED(zfcp_port, port, port->adapter, 45, 96); 116ZFCP_SYSFS_FAILED(zfcp_port, port, port->adapter, "sypfai1", "sypfai2");
117ZFCP_SYSFS_FAILED(zfcp_unit, unit, unit->port->adapter, 46, 97); 117ZFCP_SYSFS_FAILED(zfcp_unit, unit, unit->port->adapter, "syufai1", "syufai2");
118 118
119static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev, 119static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
120 struct device_attribute *attr, 120 struct device_attribute *attr,
@@ -168,7 +168,7 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
168 goto out; 168 goto out;
169 } 169 }
170 170
171 zfcp_erp_port_shutdown(port, 0, 92, NULL); 171 zfcp_erp_port_shutdown(port, 0, "syprs_1", NULL);
172 zfcp_erp_wait(adapter); 172 zfcp_erp_wait(adapter);
173 zfcp_port_put(port); 173 zfcp_port_put(port);
174 zfcp_port_dequeue(port); 174 zfcp_port_dequeue(port);
@@ -222,7 +222,7 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
222 222
223 retval = 0; 223 retval = 0;
224 224
225 zfcp_erp_unit_reopen(unit, 0, 94, NULL); 225 zfcp_erp_unit_reopen(unit, 0, "syuas_1", NULL);
226 zfcp_erp_wait(unit->port->adapter); 226 zfcp_erp_wait(unit->port->adapter);
227 zfcp_unit_put(unit); 227 zfcp_unit_put(unit);
228out: 228out:
@@ -268,7 +268,7 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
268 goto out; 268 goto out;
269 } 269 }
270 270
271 zfcp_erp_unit_shutdown(unit, 0, 95, NULL); 271 zfcp_erp_unit_shutdown(unit, 0, "syurs_1", NULL);
272 zfcp_erp_wait(unit->port->adapter); 272 zfcp_erp_wait(unit->port->adapter);
273 zfcp_unit_put(unit); 273 zfcp_unit_put(unit);
274 zfcp_unit_dequeue(unit); 274 zfcp_unit_dequeue(unit);
@@ -318,10 +318,9 @@ zfcp_sysfs_unit_##_name##_latency_show(struct device *dev, \
318 struct zfcp_unit *unit = sdev->hostdata; \ 318 struct zfcp_unit *unit = sdev->hostdata; \
319 struct zfcp_latencies *lat = &unit->latencies; \ 319 struct zfcp_latencies *lat = &unit->latencies; \
320 struct zfcp_adapter *adapter = unit->port->adapter; \ 320 struct zfcp_adapter *adapter = unit->port->adapter; \
321 unsigned long flags; \
322 unsigned long long fsum, fmin, fmax, csum, cmin, cmax, cc; \ 321 unsigned long long fsum, fmin, fmax, csum, cmin, cmax, cc; \
323 \ 322 \
324 spin_lock_irqsave(&lat->lock, flags); \ 323 spin_lock_bh(&lat->lock); \
325 fsum = lat->_name.fabric.sum * adapter->timer_ticks; \ 324 fsum = lat->_name.fabric.sum * adapter->timer_ticks; \
326 fmin = lat->_name.fabric.min * adapter->timer_ticks; \ 325 fmin = lat->_name.fabric.min * adapter->timer_ticks; \
327 fmax = lat->_name.fabric.max * adapter->timer_ticks; \ 326 fmax = lat->_name.fabric.max * adapter->timer_ticks; \
@@ -329,7 +328,7 @@ zfcp_sysfs_unit_##_name##_latency_show(struct device *dev, \
329 cmin = lat->_name.channel.min * adapter->timer_ticks; \ 328 cmin = lat->_name.channel.min * adapter->timer_ticks; \
330 cmax = lat->_name.channel.max * adapter->timer_ticks; \ 329 cmax = lat->_name.channel.max * adapter->timer_ticks; \
331 cc = lat->_name.counter; \ 330 cc = lat->_name.counter; \
332 spin_unlock_irqrestore(&lat->lock, flags); \ 331 spin_unlock_bh(&lat->lock); \
333 \ 332 \
334 do_div(fsum, 1000); \ 333 do_div(fsum, 1000); \
335 do_div(fmin, 1000); \ 334 do_div(fmin, 1000); \
@@ -487,7 +486,8 @@ static ssize_t zfcp_sysfs_adapter_q_full_show(struct device *dev,
487 struct zfcp_adapter *adapter = 486 struct zfcp_adapter *adapter =
488 (struct zfcp_adapter *) scsi_host->hostdata[0]; 487 (struct zfcp_adapter *) scsi_host->hostdata[0];
489 488
490 return sprintf(buf, "%d\n", atomic_read(&adapter->qdio_outb_full)); 489 return sprintf(buf, "%d %llu\n", atomic_read(&adapter->qdio_outb_full),
490 (unsigned long long)adapter->req_q_util);
491} 491}
492static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL); 492static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL);
493 493
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 5311317c2e4c..a12783ebb42d 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -4,7 +4,7 @@
4 Written By: Adam Radford <linuxraid@amcc.com> 4 Written By: Adam Radford <linuxraid@amcc.com>
5 Modifications By: Tom Couch <linuxraid@amcc.com> 5 Modifications By: Tom Couch <linuxraid@amcc.com>
6 6
7 Copyright (C) 2004-2008 Applied Micro Circuits Corporation. 7 Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
8 8
9 This program is free software; you can redistribute it and/or modify 9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by 10 it under the terms of the GNU General Public License as published by
@@ -75,6 +75,7 @@
75 Add MSI support and "use_msi" module parameter. 75 Add MSI support and "use_msi" module parameter.
76 Fix bug in twa_get_param() on 4GB+. 76 Fix bug in twa_get_param() on 4GB+.
77 Use pci_resource_len() for ioremap(). 77 Use pci_resource_len() for ioremap().
78 2.26.02.012 - Add power management support.
78*/ 79*/
79 80
80#include <linux/module.h> 81#include <linux/module.h>
@@ -99,7 +100,7 @@
99#include "3w-9xxx.h" 100#include "3w-9xxx.h"
100 101
101/* Globals */ 102/* Globals */
102#define TW_DRIVER_VERSION "2.26.02.011" 103#define TW_DRIVER_VERSION "2.26.02.012"
103static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT]; 104static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
104static unsigned int twa_device_extension_count; 105static unsigned int twa_device_extension_count;
105static int twa_major = -1; 106static int twa_major = -1;
@@ -2182,6 +2183,98 @@ static void twa_remove(struct pci_dev *pdev)
2182 twa_device_extension_count--; 2183 twa_device_extension_count--;
2183} /* End twa_remove() */ 2184} /* End twa_remove() */
2184 2185
2186#ifdef CONFIG_PM
2187/* This function is called on PCI suspend */
2188static int twa_suspend(struct pci_dev *pdev, pm_message_t state)
2189{
2190 struct Scsi_Host *host = pci_get_drvdata(pdev);
2191 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2192
2193 printk(KERN_WARNING "3w-9xxx: Suspending host %d.\n", tw_dev->host->host_no);
2194
2195 TW_DISABLE_INTERRUPTS(tw_dev);
2196 free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
2197
2198 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2199 pci_disable_msi(pdev);
2200
2201 /* Tell the card we are shutting down */
2202 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
2203 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x38, "Connection shutdown failed during suspend");
2204 } else {
2205 printk(KERN_WARNING "3w-9xxx: Suspend complete.\n");
2206 }
2207 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
2208
2209 pci_save_state(pdev);
2210 pci_disable_device(pdev);
2211 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2212
2213 return 0;
2214} /* End twa_suspend() */
2215
2216/* This function is called on PCI resume */
2217static int twa_resume(struct pci_dev *pdev)
2218{
2219 int retval = 0;
2220 struct Scsi_Host *host = pci_get_drvdata(pdev);
2221 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2222
2223 printk(KERN_WARNING "3w-9xxx: Resuming host %d.\n", tw_dev->host->host_no);
2224 pci_set_power_state(pdev, PCI_D0);
2225 pci_enable_wake(pdev, PCI_D0, 0);
2226 pci_restore_state(pdev);
2227
2228 retval = pci_enable_device(pdev);
2229 if (retval) {
2230 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x39, "Enable device failed during resume");
2231 return retval;
2232 }
2233
2234 pci_set_master(pdev);
2235 pci_try_set_mwi(pdev);
2236
2237 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK)
2238 || pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
2239 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)
2240 || pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
2241 TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
2242 retval = -ENODEV;
2243 goto out_disable_device;
2244 }
2245
2246 /* Initialize the card */
2247 if (twa_reset_sequence(tw_dev, 0)) {
2248 retval = -ENODEV;
2249 goto out_disable_device;
2250 }
2251
2252 /* Now setup the interrupt handler */
2253 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2254 if (retval) {
2255 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x42, "Error requesting IRQ during resume");
2256 retval = -ENODEV;
2257 goto out_disable_device;
2258 }
2259
2260 /* Now enable MSI if enabled */
2261 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2262 pci_enable_msi(pdev);
2263
2264 /* Re-enable interrupts on the card */
2265 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2266
2267 printk(KERN_WARNING "3w-9xxx: Resume complete.\n");
2268 return 0;
2269
2270out_disable_device:
2271 scsi_remove_host(host);
2272 pci_disable_device(pdev);
2273
2274 return retval;
2275} /* End twa_resume() */
2276#endif
2277
2185/* PCI Devices supported by this driver */ 2278/* PCI Devices supported by this driver */
2186static struct pci_device_id twa_pci_tbl[] __devinitdata = { 2279static struct pci_device_id twa_pci_tbl[] __devinitdata = {
2187 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000, 2280 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
@@ -2202,6 +2295,10 @@ static struct pci_driver twa_driver = {
2202 .id_table = twa_pci_tbl, 2295 .id_table = twa_pci_tbl,
2203 .probe = twa_probe, 2296 .probe = twa_probe,
2204 .remove = twa_remove, 2297 .remove = twa_remove,
2298#ifdef CONFIG_PM
2299 .suspend = twa_suspend,
2300 .resume = twa_resume,
2301#endif
2205 .shutdown = twa_shutdown 2302 .shutdown = twa_shutdown
2206}; 2303};
2207 2304
diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
index 1729a8785fea..2893eec78ed2 100644
--- a/drivers/scsi/3w-9xxx.h
+++ b/drivers/scsi/3w-9xxx.h
@@ -4,7 +4,7 @@
4 Written By: Adam Radford <linuxraid@amcc.com> 4 Written By: Adam Radford <linuxraid@amcc.com>
5 Modifications By: Tom Couch <linuxraid@amcc.com> 5 Modifications By: Tom Couch <linuxraid@amcc.com>
6 6
7 Copyright (C) 2004-2008 Applied Micro Circuits Corporation. 7 Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
8 8
9 This program is free software; you can redistribute it and/or modify 9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by 10 it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 256c7bec7bd7..e2f44e6c0bcb 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -224,14 +224,15 @@ config SCSI_LOGGING
224 can enable logging by saying Y to "/proc file system support" and 224 can enable logging by saying Y to "/proc file system support" and
225 "Sysctl support" below and executing the command 225 "Sysctl support" below and executing the command
226 226
227 echo "scsi log token [level]" > /proc/scsi/scsi 227 echo <bitmask> > /proc/sys/dev/scsi/logging_level
228 228
229 at boot time after the /proc file system has been mounted. 229 where <bitmask> is a four byte value representing the logging type
230 and logging level for each type of logging selected.
230 231
231 There are a number of things that can be used for 'token' (you can 232 There are a number of logging types and you can find them in the
232 find them in the source: <file:drivers/scsi/scsi.c>), and this 233 source at <file:drivers/scsi/scsi_logging.h>. The logging levels
233 allows you to select the types of information you want, and the 234 are also described in that file and they determine the verbosity of
234 level allows you to select the level of verbosity. 235 the logging for each logging type.
235 236
236 If you say N here, it may be harder to track down some types of SCSI 237 If you say N here, it may be harder to track down some types of SCSI
237 problems. If you say Y here your kernel will be somewhat larger, but 238 problems. If you say Y here your kernel will be somewhat larger, but
@@ -570,6 +571,7 @@ config SCSI_ARCMSR_AER
570 To enable this function, choose Y here. 571 To enable this function, choose Y here.
571 572
572source "drivers/scsi/megaraid/Kconfig.megaraid" 573source "drivers/scsi/megaraid/Kconfig.megaraid"
574source "drivers/scsi/mpt2sas/Kconfig"
573 575
574config SCSI_HPTIOP 576config SCSI_HPTIOP
575 tristate "HighPoint RocketRAID 3xxx/4xxx Controller support" 577 tristate "HighPoint RocketRAID 3xxx/4xxx Controller support"
@@ -608,6 +610,7 @@ config SCSI_FLASHPOINT
608config LIBFC 610config LIBFC
609 tristate "LibFC module" 611 tristate "LibFC module"
610 select SCSI_FC_ATTRS 612 select SCSI_FC_ATTRS
613 select CRC32
611 ---help--- 614 ---help---
612 Fibre Channel library module 615 Fibre Channel library module
613 616
@@ -1535,6 +1538,7 @@ config SCSI_NSP32
1535config SCSI_DEBUG 1538config SCSI_DEBUG
1536 tristate "SCSI debugging host simulator" 1539 tristate "SCSI debugging host simulator"
1537 depends on SCSI 1540 depends on SCSI
1541 select CRC_T10DIF
1538 help 1542 help
1539 This is a host adapter simulator that can simulate multiple hosts 1543 This is a host adapter simulator that can simulate multiple hosts
1540 each with multiple dummy SCSI devices (disks). It defaults to one 1544 each with multiple dummy SCSI devices (disks). It defaults to one
@@ -1803,4 +1807,6 @@ source "drivers/scsi/pcmcia/Kconfig"
1803 1807
1804source "drivers/scsi/device_handler/Kconfig" 1808source "drivers/scsi/device_handler/Kconfig"
1805 1809
1810source "drivers/scsi/osd/Kconfig"
1811
1806endmenu 1812endmenu
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 7461eb09a031..cf7929634668 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -99,6 +99,7 @@ obj-$(CONFIG_SCSI_DC390T) += tmscsim.o
99obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o 99obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
100obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/ 100obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
101obj-$(CONFIG_MEGARAID_SAS) += megaraid/ 101obj-$(CONFIG_MEGARAID_SAS) += megaraid/
102obj-$(CONFIG_SCSI_MPT2SAS) += mpt2sas/
102obj-$(CONFIG_SCSI_ACARD) += atp870u.o 103obj-$(CONFIG_SCSI_ACARD) += atp870u.o
103obj-$(CONFIG_SCSI_SUNESP) += esp_scsi.o sun_esp.o 104obj-$(CONFIG_SCSI_SUNESP) += esp_scsi.o sun_esp.o
104obj-$(CONFIG_SCSI_GDTH) += gdth.o 105obj-$(CONFIG_SCSI_GDTH) += gdth.o
@@ -137,6 +138,8 @@ obj-$(CONFIG_CHR_DEV_SG) += sg.o
137obj-$(CONFIG_CHR_DEV_SCH) += ch.o 138obj-$(CONFIG_CHR_DEV_SCH) += ch.o
138obj-$(CONFIG_SCSI_ENCLOSURE) += ses.o 139obj-$(CONFIG_SCSI_ENCLOSURE) += ses.o
139 140
141obj-$(CONFIG_SCSI_OSD_INITIATOR) += osd/
142
140# This goes last, so that "real" scsi devices probe earlier 143# This goes last, so that "real" scsi devices probe earlier
141obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o 144obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o
142 145
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index af9725409f43..7b1633a8c15a 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -41,6 +41,7 @@ MODULE_DESCRIPTION("device driver for scsi media changer devices");
41MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org>"); 41MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org>");
42MODULE_LICENSE("GPL"); 42MODULE_LICENSE("GPL");
43MODULE_ALIAS_CHARDEV_MAJOR(SCSI_CHANGER_MAJOR); 43MODULE_ALIAS_CHARDEV_MAJOR(SCSI_CHANGER_MAJOR);
44MODULE_ALIAS_SCSI_DEVICE(TYPE_MEDIUM_CHANGER);
44 45
45static int init = 1; 46static int init = 1;
46module_param(init, int, 0444); 47module_param(init, int, 0444);
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index 4003deefb7d8..e79e18101f87 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -1373,21 +1373,14 @@ static const char * const driverbyte_table[]={
1373"DRIVER_INVALID", "DRIVER_TIMEOUT", "DRIVER_HARD", "DRIVER_SENSE"}; 1373"DRIVER_INVALID", "DRIVER_TIMEOUT", "DRIVER_HARD", "DRIVER_SENSE"};
1374#define NUM_DRIVERBYTE_STRS ARRAY_SIZE(driverbyte_table) 1374#define NUM_DRIVERBYTE_STRS ARRAY_SIZE(driverbyte_table)
1375 1375
1376static const char * const driversuggest_table[]={"SUGGEST_OK",
1377"SUGGEST_RETRY", "SUGGEST_ABORT", "SUGGEST_REMAP", "SUGGEST_DIE",
1378"SUGGEST_5", "SUGGEST_6", "SUGGEST_7", "SUGGEST_SENSE"};
1379#define NUM_SUGGEST_STRS ARRAY_SIZE(driversuggest_table)
1380
1381void scsi_show_result(int result) 1376void scsi_show_result(int result)
1382{ 1377{
1383 int hb = host_byte(result); 1378 int hb = host_byte(result);
1384 int db = (driver_byte(result) & DRIVER_MASK); 1379 int db = driver_byte(result);
1385 int su = ((driver_byte(result) & SUGGEST_MASK) >> 4);
1386 1380
1387 printk("Result: hostbyte=%s driverbyte=%s,%s\n", 1381 printk("Result: hostbyte=%s driverbyte=%s\n",
1388 (hb < NUM_HOSTBYTE_STRS ? hostbyte_table[hb] : "invalid"), 1382 (hb < NUM_HOSTBYTE_STRS ? hostbyte_table[hb] : "invalid"),
1389 (db < NUM_DRIVERBYTE_STRS ? driverbyte_table[db] : "invalid"), 1383 (db < NUM_DRIVERBYTE_STRS ? driverbyte_table[db] : "invalid"));
1390 (su < NUM_SUGGEST_STRS ? driversuggest_table[su] : "invalid"));
1391} 1384}
1392 1385
1393#else 1386#else
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.c b/drivers/scsi/cxgb3i/cxgb3i_ddp.c
index a83d36e4926f..4eb6f5593b3e 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_ddp.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.c
@@ -196,7 +196,7 @@ static inline int ddp_alloc_gl_skb(struct cxgb3i_ddp_info *ddp, int idx,
196} 196}
197 197
198/** 198/**
199 * cxgb3i_ddp_find_page_index - return ddp page index for a given page size. 199 * cxgb3i_ddp_find_page_index - return ddp page index for a given page size
200 * @pgsz: page size 200 * @pgsz: page size
201 * return the ddp page index, if no match is found return DDP_PGIDX_MAX. 201 * return the ddp page index, if no match is found return DDP_PGIDX_MAX.
202 */ 202 */
@@ -355,8 +355,7 @@ EXPORT_SYMBOL_GPL(cxgb3i_ddp_release_gl);
355 * @tdev: t3cdev adapter 355 * @tdev: t3cdev adapter
356 * @tid: connection id 356 * @tid: connection id
357 * @tformat: tag format 357 * @tformat: tag format
358 * @tagp: the s/w tag, if ddp setup is successful, it will be updated with 358 * @tagp: contains s/w tag initially, will be updated with ddp/hw tag
359 * ddp/hw tag
360 * @gl: the page momory list 359 * @gl: the page momory list
361 * @gfp: allocation mode 360 * @gfp: allocation mode
362 * 361 *
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.h b/drivers/scsi/cxgb3i/cxgb3i_ddp.h
index 3faae7831c83..75a63a81e873 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_ddp.h
+++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.h
@@ -185,12 +185,11 @@ static inline int cxgb3i_is_ddp_tag(struct cxgb3i_tag_format *tformat, u32 tag)
185} 185}
186 186
187/** 187/**
188 * cxgb3i_sw_tag_usable - check if a given s/w tag has enough bits left for 188 * cxgb3i_sw_tag_usable - check if s/w tag has enough bits left for hw bits
189 * the reserved/hw bits
190 * @tformat: tag format information 189 * @tformat: tag format information
191 * @sw_tag: s/w tag to be checked 190 * @sw_tag: s/w tag to be checked
192 * 191 *
193 * return true if the tag is a ddp tag, false otherwise. 192 * return true if the tag can be used for hw ddp tag, false otherwise.
194 */ 193 */
195static inline int cxgb3i_sw_tag_usable(struct cxgb3i_tag_format *tformat, 194static inline int cxgb3i_sw_tag_usable(struct cxgb3i_tag_format *tformat,
196 u32 sw_tag) 195 u32 sw_tag)
@@ -222,8 +221,7 @@ static inline u32 cxgb3i_set_non_ddp_tag(struct cxgb3i_tag_format *tformat,
222} 221}
223 222
224/** 223/**
225 * cxgb3i_ddp_tag_base - shift the s/w tag bits so that reserved bits are not 224 * cxgb3i_ddp_tag_base - shift s/w tag bits so that reserved bits are not used
226 * used.
227 * @tformat: tag format information 225 * @tformat: tag format information
228 * @sw_tag: s/w tag to be checked 226 * @sw_tag: s/w tag to be checked
229 */ 227 */
diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
index fa2a44f37b36..e185dedc4c1f 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
@@ -101,8 +101,7 @@ free_snic:
101} 101}
102 102
103/** 103/**
104 * cxgb3i_adapter_remove - release all the resources held and cleanup any 104 * cxgb3i_adapter_remove - release the resources held and cleanup h/w settings
105 * h/w settings
106 * @t3dev: t3cdev adapter 105 * @t3dev: t3cdev adapter
107 */ 106 */
108void cxgb3i_adapter_remove(struct t3cdev *t3dev) 107void cxgb3i_adapter_remove(struct t3cdev *t3dev)
@@ -135,8 +134,7 @@ void cxgb3i_adapter_remove(struct t3cdev *t3dev)
135} 134}
136 135
137/** 136/**
138 * cxgb3i_hba_find_by_netdev - find the cxgb3i_hba structure with a given 137 * cxgb3i_hba_find_by_netdev - find the cxgb3i_hba structure via net_device
139 * net_device
140 * @t3dev: t3cdev adapter 138 * @t3dev: t3cdev adapter
141 */ 139 */
142struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev) 140struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev)
@@ -170,8 +168,7 @@ struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *snic,
170 int err; 168 int err;
171 169
172 shost = iscsi_host_alloc(&cxgb3i_host_template, 170 shost = iscsi_host_alloc(&cxgb3i_host_template,
173 sizeof(struct cxgb3i_hba), 171 sizeof(struct cxgb3i_hba), 1);
174 CXGB3I_SCSI_QDEPTH_DFLT);
175 if (!shost) { 172 if (!shost) {
176 cxgb3i_log_info("iscsi_host_alloc failed.\n"); 173 cxgb3i_log_info("iscsi_host_alloc failed.\n");
177 return NULL; 174 return NULL;
@@ -335,13 +332,12 @@ static void cxgb3i_ep_disconnect(struct iscsi_endpoint *ep)
335 * @cmds_max: max # of commands 332 * @cmds_max: max # of commands
336 * @qdepth: scsi queue depth 333 * @qdepth: scsi queue depth
337 * @initial_cmdsn: initial iscsi CMDSN for this session 334 * @initial_cmdsn: initial iscsi CMDSN for this session
338 * @host_no: pointer to return host no
339 * 335 *
340 * Creates a new iSCSI session 336 * Creates a new iSCSI session
341 */ 337 */
342static struct iscsi_cls_session * 338static struct iscsi_cls_session *
343cxgb3i_session_create(struct iscsi_endpoint *ep, u16 cmds_max, u16 qdepth, 339cxgb3i_session_create(struct iscsi_endpoint *ep, u16 cmds_max, u16 qdepth,
344 u32 initial_cmdsn, u32 *host_no) 340 u32 initial_cmdsn)
345{ 341{
346 struct cxgb3i_endpoint *cep; 342 struct cxgb3i_endpoint *cep;
347 struct cxgb3i_hba *hba; 343 struct cxgb3i_hba *hba;
@@ -360,8 +356,6 @@ cxgb3i_session_create(struct iscsi_endpoint *ep, u16 cmds_max, u16 qdepth,
360 cxgb3i_api_debug("ep 0x%p, cep 0x%p, hba 0x%p.\n", ep, cep, hba); 356 cxgb3i_api_debug("ep 0x%p, cep 0x%p, hba 0x%p.\n", ep, cep, hba);
361 BUG_ON(hba != iscsi_host_priv(shost)); 357 BUG_ON(hba != iscsi_host_priv(shost));
362 358
363 *host_no = shost->host_no;
364
365 cls_session = iscsi_session_setup(&cxgb3i_iscsi_transport, shost, 359 cls_session = iscsi_session_setup(&cxgb3i_iscsi_transport, shost,
366 cmds_max, 360 cmds_max,
367 sizeof(struct iscsi_tcp_task) + 361 sizeof(struct iscsi_tcp_task) +
@@ -394,9 +388,9 @@ static void cxgb3i_session_destroy(struct iscsi_cls_session *cls_session)
394} 388}
395 389
396/** 390/**
397 * cxgb3i_conn_max_xmit_dlength -- check the max. xmit pdu segment size, 391 * cxgb3i_conn_max_xmit_dlength -- calc the max. xmit pdu segment size
398 * reduce it to be within the hardware limit if needed
399 * @conn: iscsi connection 392 * @conn: iscsi connection
393 * check the max. xmit pdu payload, reduce it if needed
400 */ 394 */
401static inline int cxgb3i_conn_max_xmit_dlength(struct iscsi_conn *conn) 395static inline int cxgb3i_conn_max_xmit_dlength(struct iscsi_conn *conn)
402 396
@@ -417,8 +411,7 @@ static inline int cxgb3i_conn_max_xmit_dlength(struct iscsi_conn *conn)
417} 411}
418 412
419/** 413/**
420 * cxgb3i_conn_max_recv_dlength -- check the max. recv pdu segment size against 414 * cxgb3i_conn_max_recv_dlength -- check the max. recv pdu segment size
421 * the hardware limit
422 * @conn: iscsi connection 415 * @conn: iscsi connection
423 * return 0 if the value is valid, < 0 otherwise. 416 * return 0 if the value is valid, < 0 otherwise.
424 */ 417 */
@@ -759,9 +752,9 @@ static void cxgb3i_parse_itt(struct iscsi_conn *conn, itt_t itt,
759 752
760/** 753/**
761 * cxgb3i_reserve_itt - generate tag for a give task 754 * cxgb3i_reserve_itt - generate tag for a give task
762 * Try to set up ddp for a scsi read task.
763 * @task: iscsi task 755 * @task: iscsi task
764 * @hdr_itt: tag, filled in by this function 756 * @hdr_itt: tag, filled in by this function
757 * Set up ddp for scsi read tasks if possible.
765 */ 758 */
766int cxgb3i_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt) 759int cxgb3i_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt)
767{ 760{
@@ -809,9 +802,9 @@ int cxgb3i_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt)
809 802
810/** 803/**
811 * cxgb3i_release_itt - release the tag for a given task 804 * cxgb3i_release_itt - release the tag for a given task
812 * if the tag is a ddp tag, release the ddp setup
813 * @task: iscsi task 805 * @task: iscsi task
814 * @hdr_itt: tag 806 * @hdr_itt: tag
807 * If the tag is a ddp tag, release the ddp setup
815 */ 808 */
816void cxgb3i_release_itt(struct iscsi_task *task, itt_t hdr_itt) 809void cxgb3i_release_itt(struct iscsi_task *task, itt_t hdr_itt)
817{ 810{
@@ -843,7 +836,7 @@ static struct scsi_host_template cxgb3i_host_template = {
843 .can_queue = CXGB3I_SCSI_QDEPTH_DFLT - 1, 836 .can_queue = CXGB3I_SCSI_QDEPTH_DFLT - 1,
844 .sg_tablesize = SG_ALL, 837 .sg_tablesize = SG_ALL,
845 .max_sectors = 0xFFFF, 838 .max_sectors = 0xFFFF,
846 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, 839 .cmd_per_lun = CXGB3I_SCSI_QDEPTH_DFLT,
847 .eh_abort_handler = iscsi_eh_abort, 840 .eh_abort_handler = iscsi_eh_abort,
848 .eh_device_reset_handler = iscsi_eh_device_reset, 841 .eh_device_reset_handler = iscsi_eh_device_reset,
849 .eh_target_reset_handler = iscsi_eh_target_reset, 842 .eh_target_reset_handler = iscsi_eh_target_reset,
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c
index de3b3b614cca..c2e434e54e28 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c
@@ -1417,8 +1417,7 @@ static void c3cn_active_close(struct s3_conn *c3cn)
1417} 1417}
1418 1418
1419/** 1419/**
1420 * cxgb3i_c3cn_release - close and release an iscsi tcp connection and any 1420 * cxgb3i_c3cn_release - close and release an iscsi tcp connection
1421 * resource held
1422 * @c3cn: the iscsi tcp connection 1421 * @c3cn: the iscsi tcp connection
1423 */ 1422 */
1424void cxgb3i_c3cn_release(struct s3_conn *c3cn) 1423void cxgb3i_c3cn_release(struct s3_conn *c3cn)
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.h b/drivers/scsi/cxgb3i/cxgb3i_offload.h
index 6344b9eb2589..275f23f16eb7 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.h
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.h
@@ -139,6 +139,7 @@ enum c3cn_flags {
139 139
140/** 140/**
141 * cxgb3i_sdev_data - Per adapter data. 141 * cxgb3i_sdev_data - Per adapter data.
142 *
142 * Linked off of each Ethernet device port on the adapter. 143 * Linked off of each Ethernet device port on the adapter.
143 * Also available via the t3cdev structure since we have pointers to our port 144 * Also available via the t3cdev structure since we have pointers to our port
144 * net_device's there ... 145 * net_device's there ...
diff --git a/drivers/scsi/cxgb3i/cxgb3i_pdu.c b/drivers/scsi/cxgb3i/cxgb3i_pdu.c
index 17115c230d65..7eebc9a7cb35 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_pdu.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_pdu.c
@@ -479,7 +479,7 @@ void cxgb3i_conn_tx_open(struct s3_conn *c3cn)
479 cxgb3i_tx_debug("cn 0x%p.\n", c3cn); 479 cxgb3i_tx_debug("cn 0x%p.\n", c3cn);
480 if (conn) { 480 if (conn) {
481 cxgb3i_tx_debug("cn 0x%p, cid %d.\n", c3cn, conn->id); 481 cxgb3i_tx_debug("cn 0x%p, cid %d.\n", c3cn, conn->id);
482 scsi_queue_work(conn->session->host, &conn->xmitwork); 482 iscsi_conn_queue_work(conn);
483 } 483 }
484} 484}
485 485
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index e356b43753ff..dba154c8ff64 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -247,8 +247,8 @@ static unsigned submit_stpg(struct scsi_device *sdev, struct alua_dh_data *h)
247 /* Prepare the data buffer */ 247 /* Prepare the data buffer */
248 memset(h->buff, 0, stpg_len); 248 memset(h->buff, 0, stpg_len);
249 h->buff[4] = TPGS_STATE_OPTIMIZED & 0x0f; 249 h->buff[4] = TPGS_STATE_OPTIMIZED & 0x0f;
250 h->buff[6] = (h->group_id >> 8) & 0x0f; 250 h->buff[6] = (h->group_id >> 8) & 0xff;
251 h->buff[7] = h->group_id & 0x0f; 251 h->buff[7] = h->group_id & 0xff;
252 252
253 rq = get_alua_req(sdev, h->buff, stpg_len, WRITE); 253 rq = get_alua_req(sdev, h->buff, stpg_len, WRITE);
254 if (!rq) 254 if (!rq)
@@ -461,6 +461,15 @@ static int alua_check_sense(struct scsi_device *sdev,
461 */ 461 */
462 return ADD_TO_MLQUEUE; 462 return ADD_TO_MLQUEUE;
463 } 463 }
464 if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x0e) {
465 /*
466 * REPORTED_LUNS_DATA_HAS_CHANGED is reported
467 * when switching controllers on targets like
468 * Intel Multi-Flex. We can just retry.
469 */
470 return ADD_TO_MLQUEUE;
471 }
472
464 break; 473 break;
465 } 474 }
466 475
@@ -691,6 +700,7 @@ static const struct scsi_dh_devlist alua_dev_list[] = {
691 {"IBM", "2107900" }, 700 {"IBM", "2107900" },
692 {"IBM", "2145" }, 701 {"IBM", "2145" },
693 {"Pillar", "Axiom" }, 702 {"Pillar", "Axiom" },
703 {"Intel", "Multi-Flex"},
694 {NULL, NULL} 704 {NULL, NULL}
695}; 705};
696 706
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 53664765570a..43b8c51e98d0 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -449,28 +449,40 @@ static int mode_select_handle_sense(struct scsi_device *sdev,
449 unsigned char *sensebuf) 449 unsigned char *sensebuf)
450{ 450{
451 struct scsi_sense_hdr sense_hdr; 451 struct scsi_sense_hdr sense_hdr;
452 int sense, err = SCSI_DH_IO, ret; 452 int err = SCSI_DH_IO, ret;
453 453
454 ret = scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sense_hdr); 454 ret = scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
455 if (!ret) 455 if (!ret)
456 goto done; 456 goto done;
457 457
458 err = SCSI_DH_OK; 458 err = SCSI_DH_OK;
459 sense = (sense_hdr.sense_key << 16) | (sense_hdr.asc << 8) | 459
460 sense_hdr.ascq; 460 switch (sense_hdr.sense_key) {
461 /* If it is retryable failure, submit the c9 inquiry again */ 461 case NO_SENSE:
462 if (sense == 0x59136 || sense == 0x68b02 || sense == 0xb8b02 || 462 case ABORTED_COMMAND:
463 sense == 0x62900) { 463 case UNIT_ATTENTION:
464 /* 0x59136 - Command lock contention
465 * 0x[6b]8b02 - Quiesense in progress or achieved
466 * 0x62900 - Power On, Reset, or Bus Device Reset
467 */
468 err = SCSI_DH_RETRY; 464 err = SCSI_DH_RETRY;
465 break;
466 case NOT_READY:
467 if (sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x01)
468 /* LUN Not Ready and is in the Process of Becoming
469 * Ready
470 */
471 err = SCSI_DH_RETRY;
472 break;
473 case ILLEGAL_REQUEST:
474 if (sense_hdr.asc == 0x91 && sense_hdr.ascq == 0x36)
475 /*
476 * Command Lock contention
477 */
478 err = SCSI_DH_RETRY;
479 break;
480 default:
481 sdev_printk(KERN_INFO, sdev,
482 "MODE_SELECT failed with sense %02x/%02x/%02x.\n",
483 sense_hdr.sense_key, sense_hdr.asc, sense_hdr.ascq);
469 } 484 }
470 485
471 if (sense)
472 sdev_printk(KERN_INFO, sdev,
473 "MODE_SELECT failed with sense 0x%x.\n", sense);
474done: 486done:
475 return err; 487 return err;
476} 488}
@@ -562,6 +574,12 @@ static int rdac_check_sense(struct scsi_device *sdev,
562 * Just retry and wait. 574 * Just retry and wait.
563 */ 575 */
564 return ADD_TO_MLQUEUE; 576 return ADD_TO_MLQUEUE;
577 if (sense_hdr->asc == 0xA1 && sense_hdr->ascq == 0x02)
578 /* LUN Not Ready - Quiescense in progress
579 * or has been achieved
580 * Just retry.
581 */
582 return ADD_TO_MLQUEUE;
565 break; 583 break;
566 case ILLEGAL_REQUEST: 584 case ILLEGAL_REQUEST:
567 if (sense_hdr->asc == 0x94 && sense_hdr->ascq == 0x01) { 585 if (sense_hdr->asc == 0x94 && sense_hdr->ascq == 0x01) {
@@ -579,6 +597,11 @@ static int rdac_check_sense(struct scsi_device *sdev,
579 * Power On, Reset, or Bus Device Reset, just retry. 597 * Power On, Reset, or Bus Device Reset, just retry.
580 */ 598 */
581 return ADD_TO_MLQUEUE; 599 return ADD_TO_MLQUEUE;
600 if (sense_hdr->asc == 0x8b && sense_hdr->ascq == 0x02)
601 /*
602 * Quiescence in progress , just retry.
603 */
604 return ADD_TO_MLQUEUE;
582 break; 605 break;
583 } 606 }
584 /* success just means we do not care what scsi-ml does */ 607 /* success just means we do not care what scsi-ml does */
diff --git a/drivers/scsi/fcoe/fcoe_sw.c b/drivers/scsi/fcoe/fcoe_sw.c
index da210eba1941..2bbbe3c0cc7b 100644
--- a/drivers/scsi/fcoe/fcoe_sw.c
+++ b/drivers/scsi/fcoe/fcoe_sw.c
@@ -133,6 +133,13 @@ static int fcoe_sw_lport_config(struct fc_lport *lp)
133 /* lport fc_lport related configuration */ 133 /* lport fc_lport related configuration */
134 fc_lport_config(lp); 134 fc_lport_config(lp);
135 135
136 /* offload related configuration */
137 lp->crc_offload = 0;
138 lp->seq_offload = 0;
139 lp->lro_enabled = 0;
140 lp->lro_xid = 0;
141 lp->lso_max = 0;
142
136 return 0; 143 return 0;
137} 144}
138 145
@@ -186,7 +193,27 @@ static int fcoe_sw_netdev_config(struct fc_lport *lp, struct net_device *netdev)
186 if (fc->real_dev->features & NETIF_F_SG) 193 if (fc->real_dev->features & NETIF_F_SG)
187 lp->sg_supp = 1; 194 lp->sg_supp = 1;
188 195
189 196#ifdef NETIF_F_FCOE_CRC
197 if (netdev->features & NETIF_F_FCOE_CRC) {
198 lp->crc_offload = 1;
199 printk(KERN_DEBUG "fcoe:%s supports FCCRC offload\n",
200 netdev->name);
201 }
202#endif
203#ifdef NETIF_F_FSO
204 if (netdev->features & NETIF_F_FSO) {
205 lp->seq_offload = 1;
206 lp->lso_max = netdev->gso_max_size;
207 printk(KERN_DEBUG "fcoe:%s supports LSO for max len 0x%x\n",
208 netdev->name, lp->lso_max);
209 }
210#endif
211 if (netdev->fcoe_ddp_xid) {
212 lp->lro_enabled = 1;
213 lp->lro_xid = netdev->fcoe_ddp_xid;
214 printk(KERN_DEBUG "fcoe:%s supports LRO for max xid 0x%x\n",
215 netdev->name, lp->lro_xid);
216 }
190 skb_queue_head_init(&fc->fcoe_pending_queue); 217 skb_queue_head_init(&fc->fcoe_pending_queue);
191 fc->fcoe_pending_queue_active = 0; 218 fc->fcoe_pending_queue_active = 0;
192 219
@@ -346,8 +373,46 @@ static int fcoe_sw_destroy(struct net_device *netdev)
346 return 0; 373 return 0;
347} 374}
348 375
376/*
377 * fcoe_sw_ddp_setup - calls LLD's ddp_setup through net_device
378 * @lp: the corresponding fc_lport
379 * @xid: the exchange id for this ddp transfer
380 * @sgl: the scatterlist describing this transfer
381 * @sgc: number of sg items
382 *
383 * Returns : 0 no ddp
384 */
385static int fcoe_sw_ddp_setup(struct fc_lport *lp, u16 xid,
386 struct scatterlist *sgl, unsigned int sgc)
387{
388 struct net_device *n = fcoe_netdev(lp);
389
390 if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_setup)
391 return n->netdev_ops->ndo_fcoe_ddp_setup(n, xid, sgl, sgc);
392
393 return 0;
394}
395
396/*
397 * fcoe_sw_ddp_done - calls LLD's ddp_done through net_device
398 * @lp: the corresponding fc_lport
399 * @xid: the exchange id for this ddp transfer
400 *
401 * Returns : the length of data that have been completed by ddp
402 */
403static int fcoe_sw_ddp_done(struct fc_lport *lp, u16 xid)
404{
405 struct net_device *n = fcoe_netdev(lp);
406
407 if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_done)
408 return n->netdev_ops->ndo_fcoe_ddp_done(n, xid);
409 return 0;
410}
411
349static struct libfc_function_template fcoe_sw_libfc_fcn_templ = { 412static struct libfc_function_template fcoe_sw_libfc_fcn_templ = {
350 .frame_send = fcoe_xmit, 413 .frame_send = fcoe_xmit,
414 .ddp_setup = fcoe_sw_ddp_setup,
415 .ddp_done = fcoe_sw_ddp_done,
351}; 416};
352 417
353/** 418/**
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
index 5548bf3bb58b..0d6f5beb7f9e 100644
--- a/drivers/scsi/fcoe/libfcoe.c
+++ b/drivers/scsi/fcoe/libfcoe.c
@@ -423,7 +423,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
423 423
424 /* crc offload */ 424 /* crc offload */
425 if (likely(lp->crc_offload)) { 425 if (likely(lp->crc_offload)) {
426 skb->ip_summed = CHECKSUM_COMPLETE; 426 skb->ip_summed = CHECKSUM_PARTIAL;
427 skb->csum_start = skb_headroom(skb); 427 skb->csum_start = skb_headroom(skb);
428 skb->csum_offset = skb->len; 428 skb->csum_offset = skb->len;
429 crc = 0; 429 crc = 0;
@@ -460,7 +460,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
460 skb_reset_mac_header(skb); 460 skb_reset_mac_header(skb);
461 skb_reset_network_header(skb); 461 skb_reset_network_header(skb);
462 skb->mac_len = elen; 462 skb->mac_len = elen;
463 skb->protocol = htons(ETH_P_802_3); 463 skb->protocol = htons(ETH_P_FCOE);
464 skb->dev = fc->real_dev; 464 skb->dev = fc->real_dev;
465 465
466 /* fill up mac and fcoe headers */ 466 /* fill up mac and fcoe headers */
@@ -483,6 +483,16 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
483 FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER); 483 FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
484 hp->fcoe_sof = sof; 484 hp->fcoe_sof = sof;
485 485
486#ifdef NETIF_F_FSO
487 /* fcoe lso, mss is in max_payload which is non-zero for FCP data */
488 if (lp->seq_offload && fr_max_payload(fp)) {
489 skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
490 skb_shinfo(skb)->gso_size = fr_max_payload(fp);
491 } else {
492 skb_shinfo(skb)->gso_type = 0;
493 skb_shinfo(skb)->gso_size = 0;
494 }
495#endif
486 /* update tx stats: regardless if LLD fails */ 496 /* update tx stats: regardless if LLD fails */
487 stats = lp->dev_stats[smp_processor_id()]; 497 stats = lp->dev_stats[smp_processor_id()];
488 if (stats) { 498 if (stats) {
@@ -623,7 +633,7 @@ int fcoe_percpu_receive_thread(void *arg)
623 * it's solicited data, in which case, the FCP layer would 633 * it's solicited data, in which case, the FCP layer would
624 * check it during the copy. 634 * check it during the copy.
625 */ 635 */
626 if (lp->crc_offload) 636 if (lp->crc_offload && skb->ip_summed == CHECKSUM_UNNECESSARY)
627 fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; 637 fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
628 else 638 else
629 fr_flags(fp) |= FCPHF_CRC_UNCHECKED; 639 fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index aa670a1d1513..89d41a424b33 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -176,7 +176,6 @@ void scsi_remove_host(struct Scsi_Host *shost)
176 transport_unregister_device(&shost->shost_gendev); 176 transport_unregister_device(&shost->shost_gendev);
177 device_unregister(&shost->shost_dev); 177 device_unregister(&shost->shost_dev);
178 device_del(&shost->shost_gendev); 178 device_del(&shost->shost_gendev);
179 scsi_proc_hostdir_rm(shost->hostt);
180} 179}
181EXPORT_SYMBOL(scsi_remove_host); 180EXPORT_SYMBOL(scsi_remove_host);
182 181
@@ -270,6 +269,8 @@ static void scsi_host_dev_release(struct device *dev)
270 struct Scsi_Host *shost = dev_to_shost(dev); 269 struct Scsi_Host *shost = dev_to_shost(dev);
271 struct device *parent = dev->parent; 270 struct device *parent = dev->parent;
272 271
272 scsi_proc_hostdir_rm(shost->hostt);
273
273 if (shost->ehandler) 274 if (shost->ehandler)
274 kthread_stop(shost->ehandler); 275 kthread_stop(shost->ehandler);
275 if (shost->work_q) 276 if (shost->work_q)
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index 34be88d7afa5..af1f0af0c5ac 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -580,8 +580,7 @@ static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
580 break; 580 break;
581 581
582 default: 582 default:
583 scp->result = ((DRIVER_INVALID|SUGGEST_ABORT)<<24) | 583 scp->result = DRIVER_INVALID << 24 | DID_ABORT << 16;
584 (DID_ABORT<<16);
585 break; 584 break;
586 } 585 }
587 586
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index ed1e728763a2..93d1fbe4ee5d 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -2767,6 +2767,40 @@ static void ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
2767 ibmvfc_init_tgt(tgt, job_step); 2767 ibmvfc_init_tgt(tgt, job_step);
2768} 2768}
2769 2769
2770/* Defined in FC-LS */
2771static const struct {
2772 int code;
2773 int retry;
2774 int logged_in;
2775} prli_rsp [] = {
2776 { 0, 1, 0 },
2777 { 1, 0, 1 },
2778 { 2, 1, 0 },
2779 { 3, 1, 0 },
2780 { 4, 0, 0 },
2781 { 5, 0, 0 },
2782 { 6, 0, 1 },
2783 { 7, 0, 0 },
2784 { 8, 1, 0 },
2785};
2786
2787/**
2788 * ibmvfc_get_prli_rsp - Find PRLI response index
2789 * @flags: PRLI response flags
2790 *
2791 **/
2792static int ibmvfc_get_prli_rsp(u16 flags)
2793{
2794 int i;
2795 int code = (flags & 0x0f00) >> 8;
2796
2797 for (i = 0; i < ARRAY_SIZE(prli_rsp); i++)
2798 if (prli_rsp[i].code == code)
2799 return i;
2800
2801 return 0;
2802}
2803
2770/** 2804/**
2771 * ibmvfc_tgt_prli_done - Completion handler for Process Login 2805 * ibmvfc_tgt_prli_done - Completion handler for Process Login
2772 * @evt: ibmvfc event struct 2806 * @evt: ibmvfc event struct
@@ -2777,15 +2811,36 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
2777 struct ibmvfc_target *tgt = evt->tgt; 2811 struct ibmvfc_target *tgt = evt->tgt;
2778 struct ibmvfc_host *vhost = evt->vhost; 2812 struct ibmvfc_host *vhost = evt->vhost;
2779 struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli; 2813 struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
2814 struct ibmvfc_prli_svc_parms *parms = &rsp->parms;
2780 u32 status = rsp->common.status; 2815 u32 status = rsp->common.status;
2816 int index;
2781 2817
2782 vhost->discovery_threads--; 2818 vhost->discovery_threads--;
2783 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); 2819 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
2784 switch (status) { 2820 switch (status) {
2785 case IBMVFC_MAD_SUCCESS: 2821 case IBMVFC_MAD_SUCCESS:
2786 tgt_dbg(tgt, "Process Login succeeded\n"); 2822 tgt_dbg(tgt, "Process Login succeeded: %X %02X %04X\n",
2787 tgt->need_login = 0; 2823 parms->type, parms->flags, parms->service_parms);
2788 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_ADD_RPORT); 2824
2825 if (parms->type == IBMVFC_SCSI_FCP_TYPE) {
2826 index = ibmvfc_get_prli_rsp(parms->flags);
2827 if (prli_rsp[index].logged_in) {
2828 if (parms->flags & IBMVFC_PRLI_EST_IMG_PAIR) {
2829 tgt->need_login = 0;
2830 tgt->ids.roles = 0;
2831 if (parms->service_parms & IBMVFC_PRLI_TARGET_FUNC)
2832 tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET;
2833 if (parms->service_parms & IBMVFC_PRLI_INITIATOR_FUNC)
2834 tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
2835 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_ADD_RPORT);
2836 } else
2837 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2838 } else if (prli_rsp[index].retry)
2839 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
2840 else
2841 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2842 } else
2843 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2789 break; 2844 break;
2790 case IBMVFC_MAD_DRIVER_FAILED: 2845 case IBMVFC_MAD_DRIVER_FAILED:
2791 break; 2846 break;
@@ -2874,7 +2929,6 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
2874 tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name); 2929 tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
2875 tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name); 2930 tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
2876 tgt->ids.port_id = tgt->scsi_id; 2931 tgt->ids.port_id = tgt->scsi_id;
2877 tgt->ids.roles = FC_PORT_ROLE_FCP_TARGET;
2878 memcpy(&tgt->service_parms, &rsp->service_parms, 2932 memcpy(&tgt->service_parms, &rsp->service_parms,
2879 sizeof(tgt->service_parms)); 2933 sizeof(tgt->service_parms));
2880 memcpy(&tgt->service_parms_change, &rsp->service_parms_change, 2934 memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 07829009a8be..def473f0a98f 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -152,13 +152,13 @@ module_param_named(log_level, ipr_log_level, uint, 0);
152MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver"); 152MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
153module_param_named(testmode, ipr_testmode, int, 0); 153module_param_named(testmode, ipr_testmode, int, 0);
154MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations"); 154MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
155module_param_named(fastfail, ipr_fastfail, int, 0); 155module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
156MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries"); 156MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
157module_param_named(transop_timeout, ipr_transop_timeout, int, 0); 157module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
158MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)"); 158MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
159module_param_named(enable_cache, ipr_enable_cache, int, 0); 159module_param_named(enable_cache, ipr_enable_cache, int, 0);
160MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)"); 160MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
161module_param_named(debug, ipr_debug, int, 0); 161module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
162MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)"); 162MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
163module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0); 163module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
164MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)"); 164MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
@@ -354,6 +354,8 @@ struct ipr_error_table_t ipr_error_table[] = {
354 "9076: Configuration error, missing remote IOA"}, 354 "9076: Configuration error, missing remote IOA"},
355 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL, 355 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
356 "4050: Enclosure does not support a required multipath function"}, 356 "4050: Enclosure does not support a required multipath function"},
357 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
358 "4070: Logically bad block written on device"},
357 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL, 359 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
358 "9041: Array protection temporarily suspended"}, 360 "9041: Array protection temporarily suspended"},
359 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL, 361 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
@@ -7147,6 +7149,7 @@ static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
7147 7149
7148 ENTER; 7150 ENTER;
7149 free_irq(pdev->irq, ioa_cfg); 7151 free_irq(pdev->irq, ioa_cfg);
7152 pci_disable_msi(pdev);
7150 iounmap(ioa_cfg->hdw_dma_regs); 7153 iounmap(ioa_cfg->hdw_dma_regs);
7151 pci_release_regions(pdev); 7154 pci_release_regions(pdev);
7152 ipr_free_mem(ioa_cfg); 7155 ipr_free_mem(ioa_cfg);
@@ -7432,6 +7435,11 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7432 goto out; 7435 goto out;
7433 } 7436 }
7434 7437
7438 if (!(rc = pci_enable_msi(pdev)))
7439 dev_info(&pdev->dev, "MSI enabled\n");
7440 else if (ipr_debug)
7441 dev_info(&pdev->dev, "Cannot enable MSI\n");
7442
7435 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq); 7443 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
7436 7444
7437 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg)); 7445 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
@@ -7574,6 +7582,7 @@ out_release_regions:
7574out_scsi_host_put: 7582out_scsi_host_put:
7575 scsi_host_put(host); 7583 scsi_host_put(host);
7576out_disable: 7584out_disable:
7585 pci_disable_msi(pdev);
7577 pci_disable_device(pdev); 7586 pci_disable_device(pdev);
7578 goto out; 7587 goto out;
7579} 7588}
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 8f872f816fe4..79a3ae4fb2c7 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -37,8 +37,8 @@
37/* 37/*
38 * Literals 38 * Literals
39 */ 39 */
40#define IPR_DRIVER_VERSION "2.4.1" 40#define IPR_DRIVER_VERSION "2.4.2"
41#define IPR_DRIVER_DATE "(April 24, 2007)" 41#define IPR_DRIVER_DATE "(January 21, 2009)"
42 42
43/* 43/*
44 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding 44 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index ef683f0d2b5a..457d76a4cfe5 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -1004,8 +1004,7 @@ static int __ips_eh_reset(struct scsi_cmnd *SC)
1004 DEBUG_VAR(1, "(%s%d) Failing active commands", ips_name, ha->host_num); 1004 DEBUG_VAR(1, "(%s%d) Failing active commands", ips_name, ha->host_num);
1005 1005
1006 while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) { 1006 while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) {
1007 scb->scsi_cmd->result = 1007 scb->scsi_cmd->result = DID_RESET << 16;
1008 (DID_RESET << 16) | (SUGGEST_RETRY << 24);
1009 scb->scsi_cmd->scsi_done(scb->scsi_cmd); 1008 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
1010 ips_freescb(ha, scb); 1009 ips_freescb(ha, scb);
1011 } 1010 }
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 23808dfe22ba..b3e5e08e44ab 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -48,13 +48,6 @@ MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, "
48 "Alex Aizman <itn780@yahoo.com>"); 48 "Alex Aizman <itn780@yahoo.com>");
49MODULE_DESCRIPTION("iSCSI/TCP data-path"); 49MODULE_DESCRIPTION("iSCSI/TCP data-path");
50MODULE_LICENSE("GPL"); 50MODULE_LICENSE("GPL");
51#undef DEBUG_TCP
52
53#ifdef DEBUG_TCP
54#define debug_tcp(fmt...) printk(KERN_INFO "tcp: " fmt)
55#else
56#define debug_tcp(fmt...)
57#endif
58 51
59static struct scsi_transport_template *iscsi_sw_tcp_scsi_transport; 52static struct scsi_transport_template *iscsi_sw_tcp_scsi_transport;
60static struct scsi_host_template iscsi_sw_tcp_sht; 53static struct scsi_host_template iscsi_sw_tcp_sht;
@@ -63,6 +56,21 @@ static struct iscsi_transport iscsi_sw_tcp_transport;
63static unsigned int iscsi_max_lun = 512; 56static unsigned int iscsi_max_lun = 512;
64module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO); 57module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
65 58
59static int iscsi_sw_tcp_dbg;
60module_param_named(debug_iscsi_tcp, iscsi_sw_tcp_dbg, int,
61 S_IRUGO | S_IWUSR);
62MODULE_PARM_DESC(debug_iscsi_tcp, "Turn on debugging for iscsi_tcp module "
63 "Set to 1 to turn on, and zero to turn off. Default is off.");
64
65#define ISCSI_SW_TCP_DBG(_conn, dbg_fmt, arg...) \
66 do { \
67 if (iscsi_sw_tcp_dbg) \
68 iscsi_conn_printk(KERN_INFO, _conn, \
69 "%s " dbg_fmt, \
70 __func__, ##arg); \
71 } while (0);
72
73
66/** 74/**
67 * iscsi_sw_tcp_recv - TCP receive in sendfile fashion 75 * iscsi_sw_tcp_recv - TCP receive in sendfile fashion
68 * @rd_desc: read descriptor 76 * @rd_desc: read descriptor
@@ -77,7 +85,7 @@ static int iscsi_sw_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
77 unsigned int consumed, total_consumed = 0; 85 unsigned int consumed, total_consumed = 0;
78 int status; 86 int status;
79 87
80 debug_tcp("in %d bytes\n", skb->len - offset); 88 ISCSI_SW_TCP_DBG(conn, "in %d bytes\n", skb->len - offset);
81 89
82 do { 90 do {
83 status = 0; 91 status = 0;
@@ -86,7 +94,8 @@ static int iscsi_sw_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
86 total_consumed += consumed; 94 total_consumed += consumed;
87 } while (consumed != 0 && status != ISCSI_TCP_SKB_DONE); 95 } while (consumed != 0 && status != ISCSI_TCP_SKB_DONE);
88 96
89 debug_tcp("read %d bytes status %d\n", skb->len - offset, status); 97 ISCSI_SW_TCP_DBG(conn, "read %d bytes status %d\n",
98 skb->len - offset, status);
90 return total_consumed; 99 return total_consumed;
91} 100}
92 101
@@ -131,7 +140,8 @@ static void iscsi_sw_tcp_state_change(struct sock *sk)
131 if ((sk->sk_state == TCP_CLOSE_WAIT || 140 if ((sk->sk_state == TCP_CLOSE_WAIT ||
132 sk->sk_state == TCP_CLOSE) && 141 sk->sk_state == TCP_CLOSE) &&
133 !atomic_read(&sk->sk_rmem_alloc)) { 142 !atomic_read(&sk->sk_rmem_alloc)) {
134 debug_tcp("iscsi_tcp_state_change: TCP_CLOSE|TCP_CLOSE_WAIT\n"); 143 ISCSI_SW_TCP_DBG(conn, "iscsi_tcp_state_change: "
144 "TCP_CLOSE|TCP_CLOSE_WAIT\n");
135 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 145 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
136 } 146 }
137 147
@@ -155,8 +165,8 @@ static void iscsi_sw_tcp_write_space(struct sock *sk)
155 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; 165 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
156 166
157 tcp_sw_conn->old_write_space(sk); 167 tcp_sw_conn->old_write_space(sk);
158 debug_tcp("iscsi_write_space: cid %d\n", conn->id); 168 ISCSI_SW_TCP_DBG(conn, "iscsi_write_space\n");
159 scsi_queue_work(conn->session->host, &conn->xmitwork); 169 iscsi_conn_queue_work(conn);
160} 170}
161 171
162static void iscsi_sw_tcp_conn_set_callbacks(struct iscsi_conn *conn) 172static void iscsi_sw_tcp_conn_set_callbacks(struct iscsi_conn *conn)
@@ -283,7 +293,7 @@ static int iscsi_sw_tcp_xmit(struct iscsi_conn *conn)
283 } 293 }
284 } 294 }
285 295
286 debug_tcp("xmit %d bytes\n", consumed); 296 ISCSI_SW_TCP_DBG(conn, "xmit %d bytes\n", consumed);
287 297
288 conn->txdata_octets += consumed; 298 conn->txdata_octets += consumed;
289 return consumed; 299 return consumed;
@@ -291,7 +301,7 @@ static int iscsi_sw_tcp_xmit(struct iscsi_conn *conn)
291error: 301error:
292 /* Transmit error. We could initiate error recovery 302 /* Transmit error. We could initiate error recovery
293 * here. */ 303 * here. */
294 debug_tcp("Error sending PDU, errno=%d\n", rc); 304 ISCSI_SW_TCP_DBG(conn, "Error sending PDU, errno=%d\n", rc);
295 iscsi_conn_failure(conn, rc); 305 iscsi_conn_failure(conn, rc);
296 return -EIO; 306 return -EIO;
297} 307}
@@ -334,9 +344,10 @@ static int iscsi_sw_tcp_send_hdr_done(struct iscsi_tcp_conn *tcp_conn,
334 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; 344 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
335 345
336 tcp_sw_conn->out.segment = tcp_sw_conn->out.data_segment; 346 tcp_sw_conn->out.segment = tcp_sw_conn->out.data_segment;
337 debug_tcp("Header done. Next segment size %u total_size %u\n", 347 ISCSI_SW_TCP_DBG(tcp_conn->iscsi_conn,
338 tcp_sw_conn->out.segment.size, 348 "Header done. Next segment size %u total_size %u\n",
339 tcp_sw_conn->out.segment.total_size); 349 tcp_sw_conn->out.segment.size,
350 tcp_sw_conn->out.segment.total_size);
340 return 0; 351 return 0;
341} 352}
342 353
@@ -346,8 +357,8 @@ static void iscsi_sw_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr,
346 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 357 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
347 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; 358 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
348 359
349 debug_tcp("%s(%p%s)\n", __func__, tcp_conn, 360 ISCSI_SW_TCP_DBG(conn, "%s\n", conn->hdrdgst_en ?
350 conn->hdrdgst_en? ", digest enabled" : ""); 361 "digest enabled" : "digest disabled");
351 362
352 /* Clear the data segment - needs to be filled in by the 363 /* Clear the data segment - needs to be filled in by the
353 * caller using iscsi_tcp_send_data_prep() */ 364 * caller using iscsi_tcp_send_data_prep() */
@@ -389,9 +400,9 @@ iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
389 struct hash_desc *tx_hash = NULL; 400 struct hash_desc *tx_hash = NULL;
390 unsigned int hdr_spec_len; 401 unsigned int hdr_spec_len;
391 402
392 debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __func__, 403 ISCSI_SW_TCP_DBG(conn, "offset=%d, datalen=%d %s\n", offset, len,
393 tcp_conn, offset, len, 404 conn->datadgst_en ?
394 conn->datadgst_en? ", digest enabled" : ""); 405 "digest enabled" : "digest disabled");
395 406
396 /* Make sure the datalen matches what the caller 407 /* Make sure the datalen matches what the caller
397 said he would send. */ 408 said he would send. */
@@ -415,8 +426,8 @@ iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data,
415 struct hash_desc *tx_hash = NULL; 426 struct hash_desc *tx_hash = NULL;
416 unsigned int hdr_spec_len; 427 unsigned int hdr_spec_len;
417 428
418 debug_tcp("%s(%p, datalen=%d%s)\n", __func__, tcp_conn, len, 429 ISCSI_SW_TCP_DBG(conn, "datalen=%zd %s\n", len, conn->datadgst_en ?
419 conn->datadgst_en? ", digest enabled" : ""); 430 "digest enabled" : "digest disabled");
420 431
421 /* Make sure the datalen matches what the caller 432 /* Make sure the datalen matches what the caller
422 said he would send. */ 433 said he would send. */
@@ -754,8 +765,7 @@ iscsi_sw_tcp_conn_get_stats(struct iscsi_cls_conn *cls_conn,
754 765
755static struct iscsi_cls_session * 766static struct iscsi_cls_session *
756iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max, 767iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
757 uint16_t qdepth, uint32_t initial_cmdsn, 768 uint16_t qdepth, uint32_t initial_cmdsn)
758 uint32_t *hostno)
759{ 769{
760 struct iscsi_cls_session *cls_session; 770 struct iscsi_cls_session *cls_session;
761 struct iscsi_session *session; 771 struct iscsi_session *session;
@@ -766,10 +776,11 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
766 return NULL; 776 return NULL;
767 } 777 }
768 778
769 shost = iscsi_host_alloc(&iscsi_sw_tcp_sht, 0, qdepth); 779 shost = iscsi_host_alloc(&iscsi_sw_tcp_sht, 0, 1);
770 if (!shost) 780 if (!shost)
771 return NULL; 781 return NULL;
772 shost->transportt = iscsi_sw_tcp_scsi_transport; 782 shost->transportt = iscsi_sw_tcp_scsi_transport;
783 shost->cmd_per_lun = qdepth;
773 shost->max_lun = iscsi_max_lun; 784 shost->max_lun = iscsi_max_lun;
774 shost->max_id = 0; 785 shost->max_id = 0;
775 shost->max_channel = 0; 786 shost->max_channel = 0;
@@ -777,7 +788,6 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
777 788
778 if (iscsi_host_add(shost, NULL)) 789 if (iscsi_host_add(shost, NULL))
779 goto free_host; 790 goto free_host;
780 *hostno = shost->host_no;
781 791
782 cls_session = iscsi_session_setup(&iscsi_sw_tcp_transport, shost, 792 cls_session = iscsi_session_setup(&iscsi_sw_tcp_transport, shost,
783 cmds_max, 793 cmds_max,
@@ -813,6 +823,12 @@ static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
813 iscsi_host_free(shost); 823 iscsi_host_free(shost);
814} 824}
815 825
826static int iscsi_sw_tcp_slave_alloc(struct scsi_device *sdev)
827{
828 set_bit(QUEUE_FLAG_BIDI, &sdev->request_queue->queue_flags);
829 return 0;
830}
831
816static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev) 832static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev)
817{ 833{
818 blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_ANY); 834 blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_ANY);
@@ -833,6 +849,7 @@ static struct scsi_host_template iscsi_sw_tcp_sht = {
833 .eh_device_reset_handler= iscsi_eh_device_reset, 849 .eh_device_reset_handler= iscsi_eh_device_reset,
834 .eh_target_reset_handler= iscsi_eh_target_reset, 850 .eh_target_reset_handler= iscsi_eh_target_reset,
835 .use_clustering = DISABLE_CLUSTERING, 851 .use_clustering = DISABLE_CLUSTERING,
852 .slave_alloc = iscsi_sw_tcp_slave_alloc,
836 .slave_configure = iscsi_sw_tcp_slave_configure, 853 .slave_configure = iscsi_sw_tcp_slave_configure,
837 .proc_name = "iscsi_tcp", 854 .proc_name = "iscsi_tcp",
838 .this_id = -1, 855 .this_id = -1,
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 505825b6124d..992af05aacf1 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -281,7 +281,7 @@ static void fc_exch_release(struct fc_exch *ep)
281 ep->destructor(&ep->seq, ep->arg); 281 ep->destructor(&ep->seq, ep->arg);
282 if (ep->lp->tt.exch_put) 282 if (ep->lp->tt.exch_put)
283 ep->lp->tt.exch_put(ep->lp, mp, ep->xid); 283 ep->lp->tt.exch_put(ep->lp, mp, ep->xid);
284 WARN_ON(!ep->esb_stat & ESB_ST_COMPLETE); 284 WARN_ON(!(ep->esb_stat & ESB_ST_COMPLETE));
285 mempool_free(ep, mp->ep_pool); 285 mempool_free(ep, mp->ep_pool);
286 } 286 }
287} 287}
@@ -489,7 +489,7 @@ static u16 fc_em_alloc_xid(struct fc_exch_mgr *mp, const struct fc_frame *fp)
489 struct fc_exch *ep = NULL; 489 struct fc_exch *ep = NULL;
490 490
491 if (mp->max_read) { 491 if (mp->max_read) {
492 if (fc_frame_is_read(fp)) { 492 if (fc_fcp_is_read(fr_fsp(fp))) {
493 min = mp->min_xid; 493 min = mp->min_xid;
494 max = mp->max_read; 494 max = mp->max_read;
495 plast = &mp->last_read; 495 plast = &mp->last_read;
@@ -1841,6 +1841,8 @@ struct fc_seq *fc_exch_seq_send(struct fc_lport *lp,
1841 fc_exch_setup_hdr(ep, fp, ep->f_ctl); 1841 fc_exch_setup_hdr(ep, fp, ep->f_ctl);
1842 sp->cnt++; 1842 sp->cnt++;
1843 1843
1844 fc_fcp_ddp_setup(fr_fsp(fp), ep->xid);
1845
1844 if (unlikely(lp->tt.frame_send(lp, fp))) 1846 if (unlikely(lp->tt.frame_send(lp, fp)))
1845 goto err; 1847 goto err;
1846 1848
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 2a631d7dbcec..a5725f3b7ce1 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -259,12 +259,62 @@ static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp)
259 } 259 }
260 260
261 fsp->state &= ~FC_SRB_ABORT_PENDING; 261 fsp->state &= ~FC_SRB_ABORT_PENDING;
262 fsp->io_status = SUGGEST_RETRY << 24; 262 fsp->io_status = 0;
263 fsp->status_code = FC_ERROR; 263 fsp->status_code = FC_ERROR;
264 fc_fcp_complete_locked(fsp); 264 fc_fcp_complete_locked(fsp);
265} 265}
266 266
267/* 267/*
268 * fc_fcp_ddp_setup - calls to LLD's ddp_setup to set up DDP
269 * transfer for a read I/O indicated by the fc_fcp_pkt.
270 * @fsp: ptr to the fc_fcp_pkt
271 *
272 * This is called in exch_seq_send() when we have a newly allocated
273 * exchange with a valid exchange id to setup ddp.
274 *
275 * returns: none
276 */
277void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid)
278{
279 struct fc_lport *lp;
280
281 if (!fsp)
282 return;
283
284 lp = fsp->lp;
285 if ((fsp->req_flags & FC_SRB_READ) &&
286 (lp->lro_enabled) && (lp->tt.ddp_setup)) {
287 if (lp->tt.ddp_setup(lp, xid, scsi_sglist(fsp->cmd),
288 scsi_sg_count(fsp->cmd)))
289 fsp->xfer_ddp = xid;
290 }
291}
292EXPORT_SYMBOL(fc_fcp_ddp_setup);
293
294/*
295 * fc_fcp_ddp_done - calls to LLD's ddp_done to release any
296 * DDP related resources for this I/O if it is initialized
297 * as a ddp transfer
298 * @fsp: ptr to the fc_fcp_pkt
299 *
300 * returns: none
301 */
302static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp)
303{
304 struct fc_lport *lp;
305
306 if (!fsp)
307 return;
308
309 lp = fsp->lp;
310 if (fsp->xfer_ddp && lp->tt.ddp_done) {
311 fsp->xfer_len = lp->tt.ddp_done(lp, fsp->xfer_ddp);
312 fsp->xfer_ddp = 0;
313 }
314}
315
316
317/*
268 * Receive SCSI data from target. 318 * Receive SCSI data from target.
269 * Called after receiving solicited data. 319 * Called after receiving solicited data.
270 */ 320 */
@@ -289,6 +339,9 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
289 len = fr_len(fp) - sizeof(*fh); 339 len = fr_len(fp) - sizeof(*fh);
290 buf = fc_frame_payload_get(fp, 0); 340 buf = fc_frame_payload_get(fp, 0);
291 341
342 /* if this I/O is ddped, update xfer len */
343 fc_fcp_ddp_done(fsp);
344
292 if (offset + len > fsp->data_len) { 345 if (offset + len > fsp->data_len) {
293 /* this should never happen */ 346 /* this should never happen */
294 if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) && 347 if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) &&
@@ -435,7 +488,13 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
435 * burst length (t_blen) to seq_blen, otherwise set t_blen 488 * burst length (t_blen) to seq_blen, otherwise set t_blen
436 * to max FC frame payload previously set in fsp->max_payload. 489 * to max FC frame payload previously set in fsp->max_payload.
437 */ 490 */
438 t_blen = lp->seq_offload ? seq_blen : fsp->max_payload; 491 t_blen = fsp->max_payload;
492 if (lp->seq_offload) {
493 t_blen = min(seq_blen, (size_t)lp->lso_max);
494 FC_DEBUG_FCP("fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n",
495 fsp, seq_blen, lp->lso_max, t_blen);
496 }
497
439 WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD); 498 WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD);
440 if (t_blen > 512) 499 if (t_blen > 512)
441 t_blen &= ~(512 - 1); /* round down to block size */ 500 t_blen &= ~(512 - 1); /* round down to block size */
@@ -744,6 +803,9 @@ static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
744 fsp->scsi_comp_flags = flags; 803 fsp->scsi_comp_flags = flags;
745 expected_len = fsp->data_len; 804 expected_len = fsp->data_len;
746 805
806 /* if ddp, update xfer len */
807 fc_fcp_ddp_done(fsp);
808
747 if (unlikely((flags & ~FCP_CONF_REQ) || fc_rp->fr_status)) { 809 if (unlikely((flags & ~FCP_CONF_REQ) || fc_rp->fr_status)) {
748 rp_ex = (void *)(fc_rp + 1); 810 rp_ex = (void *)(fc_rp + 1);
749 if (flags & (FCP_RSP_LEN_VAL | FCP_SNS_LEN_VAL)) { 811 if (flags & (FCP_RSP_LEN_VAL | FCP_SNS_LEN_VAL)) {
@@ -859,7 +921,7 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
859 (!(fsp->scsi_comp_flags & FCP_RESID_UNDER) || 921 (!(fsp->scsi_comp_flags & FCP_RESID_UNDER) ||
860 fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) { 922 fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) {
861 fsp->status_code = FC_DATA_UNDRUN; 923 fsp->status_code = FC_DATA_UNDRUN;
862 fsp->io_status = SUGGEST_RETRY << 24; 924 fsp->io_status = 0;
863 } 925 }
864 } 926 }
865 927
@@ -1006,7 +1068,7 @@ static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
1006 } 1068 }
1007 1069
1008 memcpy(fc_frame_payload_get(fp, len), &fsp->cdb_cmd, len); 1070 memcpy(fc_frame_payload_get(fp, len), &fsp->cdb_cmd, len);
1009 fr_cmd(fp) = fsp->cmd; 1071 fr_fsp(fp) = fsp;
1010 rport = fsp->rport; 1072 rport = fsp->rport;
1011 fsp->max_payload = rport->maxframe_size; 1073 fsp->max_payload = rport->maxframe_size;
1012 rp = rport->dd_data; 1074 rp = rport->dd_data;
@@ -1267,7 +1329,7 @@ static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
1267 rp = rport->dd_data; 1329 rp = rport->dd_data;
1268 if (!fsp->seq_ptr || rp->rp_state != RPORT_ST_READY) { 1330 if (!fsp->seq_ptr || rp->rp_state != RPORT_ST_READY) {
1269 fsp->status_code = FC_HRD_ERROR; 1331 fsp->status_code = FC_HRD_ERROR;
1270 fsp->io_status = SUGGEST_RETRY << 24; 1332 fsp->io_status = 0;
1271 fc_fcp_complete_locked(fsp); 1333 fc_fcp_complete_locked(fsp);
1272 return; 1334 return;
1273 } 1335 }
@@ -1740,6 +1802,9 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
1740 struct fc_lport *lp; 1802 struct fc_lport *lp;
1741 unsigned long flags; 1803 unsigned long flags;
1742 1804
1805 /* release outstanding ddp context */
1806 fc_fcp_ddp_done(fsp);
1807
1743 fsp->state |= FC_SRB_COMPL; 1808 fsp->state |= FC_SRB_COMPL;
1744 if (!(fsp->state & FC_SRB_FCP_PROCESSING_TMO)) { 1809 if (!(fsp->state & FC_SRB_FCP_PROCESSING_TMO)) {
1745 spin_unlock_bh(&fsp->scsi_pkt_lock); 1810 spin_unlock_bh(&fsp->scsi_pkt_lock);
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 2ae50a1188e6..7ef44501ecc6 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -762,10 +762,11 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
762 remote_wwpn = get_unaligned_be64(&flp->fl_wwpn); 762 remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
763 if (remote_wwpn == lport->wwpn) { 763 if (remote_wwpn == lport->wwpn) {
764 FC_DBG("FLOGI from port with same WWPN %llx " 764 FC_DBG("FLOGI from port with same WWPN %llx "
765 "possible configuration error\n", remote_wwpn); 765 "possible configuration error\n",
766 (unsigned long long)remote_wwpn);
766 goto out; 767 goto out;
767 } 768 }
768 FC_DBG("FLOGI from port WWPN %llx\n", remote_wwpn); 769 FC_DBG("FLOGI from port WWPN %llx\n", (unsigned long long)remote_wwpn);
769 770
770 /* 771 /*
771 * XXX what is the right thing to do for FIDs? 772 * XXX what is the right thing to do for FIDs?
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index dae65133a833..0472bb73221e 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -988,7 +988,7 @@ static void fc_rport_recv_plogi_req(struct fc_rport *rport,
988 switch (rdata->rp_state) { 988 switch (rdata->rp_state) {
989 case RPORT_ST_INIT: 989 case RPORT_ST_INIT:
990 FC_DEBUG_RPORT("incoming PLOGI from %6x wwpn %llx state INIT " 990 FC_DEBUG_RPORT("incoming PLOGI from %6x wwpn %llx state INIT "
991 "- reject\n", sid, wwpn); 991 "- reject\n", sid, (unsigned long long)wwpn);
992 reject = ELS_RJT_UNSUP; 992 reject = ELS_RJT_UNSUP;
993 break; 993 break;
994 case RPORT_ST_PLOGI: 994 case RPORT_ST_PLOGI:
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 809d32d95c76..dfaa8adf099e 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -38,6 +38,28 @@
38#include <scsi/scsi_transport_iscsi.h> 38#include <scsi/scsi_transport_iscsi.h>
39#include <scsi/libiscsi.h> 39#include <scsi/libiscsi.h>
40 40
41static int iscsi_dbg_lib;
42module_param_named(debug_libiscsi, iscsi_dbg_lib, int, S_IRUGO | S_IWUSR);
43MODULE_PARM_DESC(debug_libiscsi, "Turn on debugging for libiscsi module. "
44 "Set to 1 to turn on, and zero to turn off. Default "
45 "is off.");
46
47#define ISCSI_DBG_CONN(_conn, dbg_fmt, arg...) \
48 do { \
49 if (iscsi_dbg_lib) \
50 iscsi_conn_printk(KERN_INFO, _conn, \
51 "%s " dbg_fmt, \
52 __func__, ##arg); \
53 } while (0);
54
55#define ISCSI_DBG_SESSION(_session, dbg_fmt, arg...) \
56 do { \
57 if (iscsi_dbg_lib) \
58 iscsi_session_printk(KERN_INFO, _session, \
59 "%s " dbg_fmt, \
60 __func__, ##arg); \
61 } while (0);
62
41/* Serial Number Arithmetic, 32 bits, less than, RFC1982 */ 63/* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
42#define SNA32_CHECK 2147483648UL 64#define SNA32_CHECK 2147483648UL
43 65
@@ -54,6 +76,15 @@ static int iscsi_sna_lte(u32 n1, u32 n2)
54 (n1 > n2 && (n2 - n1 < SNA32_CHECK))); 76 (n1 > n2 && (n2 - n1 < SNA32_CHECK)));
55} 77}
56 78
79inline void iscsi_conn_queue_work(struct iscsi_conn *conn)
80{
81 struct Scsi_Host *shost = conn->session->host;
82 struct iscsi_host *ihost = shost_priv(shost);
83
84 queue_work(ihost->workq, &conn->xmitwork);
85}
86EXPORT_SYMBOL_GPL(iscsi_conn_queue_work);
87
57void 88void
58iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr) 89iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
59{ 90{
@@ -81,8 +112,7 @@ iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
81 if (!list_empty(&session->leadconn->xmitqueue) || 112 if (!list_empty(&session->leadconn->xmitqueue) ||
82 !list_empty(&session->leadconn->mgmtqueue)) { 113 !list_empty(&session->leadconn->mgmtqueue)) {
83 if (!(session->tt->caps & CAP_DATA_PATH_OFFLOAD)) 114 if (!(session->tt->caps & CAP_DATA_PATH_OFFLOAD))
84 scsi_queue_work(session->host, 115 iscsi_conn_queue_work(session->leadconn);
85 &session->leadconn->xmitwork);
86 } 116 }
87 } 117 }
88} 118}
@@ -176,10 +206,11 @@ static int iscsi_prep_ecdb_ahs(struct iscsi_task *task)
176 ecdb_ahdr->reserved = 0; 206 ecdb_ahdr->reserved = 0;
177 memcpy(ecdb_ahdr->ecdb, cmd->cmnd + ISCSI_CDB_SIZE, rlen); 207 memcpy(ecdb_ahdr->ecdb, cmd->cmnd + ISCSI_CDB_SIZE, rlen);
178 208
179 debug_scsi("iscsi_prep_ecdb_ahs: varlen_cdb_len %d " 209 ISCSI_DBG_SESSION(task->conn->session,
180 "rlen %d pad_len %d ahs_length %d iscsi_headers_size %u\n", 210 "iscsi_prep_ecdb_ahs: varlen_cdb_len %d "
181 cmd->cmd_len, rlen, pad_len, ahslength, task->hdr_len); 211 "rlen %d pad_len %d ahs_length %d iscsi_headers_size "
182 212 "%u\n", cmd->cmd_len, rlen, pad_len, ahslength,
213 task->hdr_len);
183 return 0; 214 return 0;
184} 215}
185 216
@@ -201,10 +232,11 @@ static int iscsi_prep_bidi_ahs(struct iscsi_task *task)
201 rlen_ahdr->reserved = 0; 232 rlen_ahdr->reserved = 0;
202 rlen_ahdr->read_length = cpu_to_be32(scsi_in(sc)->length); 233 rlen_ahdr->read_length = cpu_to_be32(scsi_in(sc)->length);
203 234
204 debug_scsi("bidi-in rlen_ahdr->read_length(%d) " 235 ISCSI_DBG_SESSION(task->conn->session,
205 "rlen_ahdr->ahslength(%d)\n", 236 "bidi-in rlen_ahdr->read_length(%d) "
206 be32_to_cpu(rlen_ahdr->read_length), 237 "rlen_ahdr->ahslength(%d)\n",
207 be16_to_cpu(rlen_ahdr->ahslength)); 238 be32_to_cpu(rlen_ahdr->read_length),
239 be16_to_cpu(rlen_ahdr->ahslength));
208 return 0; 240 return 0;
209} 241}
210 242
@@ -335,13 +367,15 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
335 list_move_tail(&task->running, &conn->run_list); 367 list_move_tail(&task->running, &conn->run_list);
336 368
337 conn->scsicmd_pdus_cnt++; 369 conn->scsicmd_pdus_cnt++;
338 debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x len %d " 370 ISCSI_DBG_SESSION(session, "iscsi prep [%s cid %d sc %p cdb 0x%x "
339 "bidi_len %d cmdsn %d win %d]\n", scsi_bidi_cmnd(sc) ? 371 "itt 0x%x len %d bidi_len %d cmdsn %d win %d]\n",
340 "bidirectional" : sc->sc_data_direction == DMA_TO_DEVICE ? 372 scsi_bidi_cmnd(sc) ? "bidirectional" :
341 "write" : "read", conn->id, sc, sc->cmnd[0], task->itt, 373 sc->sc_data_direction == DMA_TO_DEVICE ?
342 scsi_bufflen(sc), 374 "write" : "read", conn->id, sc, sc->cmnd[0],
343 scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0, 375 task->itt, scsi_bufflen(sc),
344 session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1); 376 scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0,
377 session->cmdsn,
378 session->max_cmdsn - session->exp_cmdsn + 1);
345 return 0; 379 return 0;
346} 380}
347 381
@@ -483,9 +517,9 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
483 517
484 task->state = ISCSI_TASK_RUNNING; 518 task->state = ISCSI_TASK_RUNNING;
485 list_move_tail(&task->running, &conn->mgmt_run_list); 519 list_move_tail(&task->running, &conn->mgmt_run_list);
486 debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n", 520 ISCSI_DBG_SESSION(session, "mgmtpdu [op 0x%x hdr->itt 0x%x "
487 hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt, 521 "datalen %d]\n", hdr->opcode & ISCSI_OPCODE_MASK,
488 task->data_count); 522 hdr->itt, task->data_count);
489 return 0; 523 return 0;
490} 524}
491 525
@@ -560,7 +594,7 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
560 goto free_task; 594 goto free_task;
561 595
562 } else 596 } else
563 scsi_queue_work(conn->session->host, &conn->xmitwork); 597 iscsi_conn_queue_work(conn);
564 598
565 return task; 599 return task;
566 600
@@ -637,8 +671,9 @@ invalid_datalen:
637 671
638 memcpy(sc->sense_buffer, data + 2, 672 memcpy(sc->sense_buffer, data + 2,
639 min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE)); 673 min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE));
640 debug_scsi("copied %d bytes of sense\n", 674 ISCSI_DBG_SESSION(session, "copied %d bytes of sense\n",
641 min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE)); 675 min_t(uint16_t, senselen,
676 SCSI_SENSE_BUFFERSIZE));
642 } 677 }
643 678
644 if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW | 679 if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW |
@@ -666,8 +701,8 @@ invalid_datalen:
666 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; 701 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
667 } 702 }
668out: 703out:
669 debug_scsi("done [sc %lx res %d itt 0x%x]\n", 704 ISCSI_DBG_SESSION(session, "done [sc %p res %d itt 0x%x]\n",
670 (long)sc, sc->result, task->itt); 705 sc, sc->result, task->itt);
671 conn->scsirsp_pdus_cnt++; 706 conn->scsirsp_pdus_cnt++;
672 707
673 __iscsi_put_task(task); 708 __iscsi_put_task(task);
@@ -835,8 +870,8 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
835 else 870 else
836 itt = ~0U; 871 itt = ~0U;
837 872
838 debug_scsi("[op 0x%x cid %d itt 0x%x len %d]\n", 873 ISCSI_DBG_SESSION(session, "[op 0x%x cid %d itt 0x%x len %d]\n",
839 opcode, conn->id, itt, datalen); 874 opcode, conn->id, itt, datalen);
840 875
841 if (itt == ~0U) { 876 if (itt == ~0U) {
842 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); 877 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
@@ -1034,10 +1069,9 @@ struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
1034} 1069}
1035EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask); 1070EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask);
1036 1071
1037void iscsi_session_failure(struct iscsi_cls_session *cls_session, 1072void iscsi_session_failure(struct iscsi_session *session,
1038 enum iscsi_err err) 1073 enum iscsi_err err)
1039{ 1074{
1040 struct iscsi_session *session = cls_session->dd_data;
1041 struct iscsi_conn *conn; 1075 struct iscsi_conn *conn;
1042 struct device *dev; 1076 struct device *dev;
1043 unsigned long flags; 1077 unsigned long flags;
@@ -1095,10 +1129,10 @@ static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
1095 * Check for iSCSI window and take care of CmdSN wrap-around 1129 * Check for iSCSI window and take care of CmdSN wrap-around
1096 */ 1130 */
1097 if (!iscsi_sna_lte(session->queued_cmdsn, session->max_cmdsn)) { 1131 if (!iscsi_sna_lte(session->queued_cmdsn, session->max_cmdsn)) {
1098 debug_scsi("iSCSI CmdSN closed. ExpCmdSn %u MaxCmdSN %u " 1132 ISCSI_DBG_SESSION(session, "iSCSI CmdSN closed. ExpCmdSn "
1099 "CmdSN %u/%u\n", session->exp_cmdsn, 1133 "%u MaxCmdSN %u CmdSN %u/%u\n",
1100 session->max_cmdsn, session->cmdsn, 1134 session->exp_cmdsn, session->max_cmdsn,
1101 session->queued_cmdsn); 1135 session->cmdsn, session->queued_cmdsn);
1102 return -ENOSPC; 1136 return -ENOSPC;
1103 } 1137 }
1104 return 0; 1138 return 0;
@@ -1133,7 +1167,7 @@ void iscsi_requeue_task(struct iscsi_task *task)
1133 struct iscsi_conn *conn = task->conn; 1167 struct iscsi_conn *conn = task->conn;
1134 1168
1135 list_move_tail(&task->running, &conn->requeue); 1169 list_move_tail(&task->running, &conn->requeue);
1136 scsi_queue_work(conn->session->host, &conn->xmitwork); 1170 iscsi_conn_queue_work(conn);
1137} 1171}
1138EXPORT_SYMBOL_GPL(iscsi_requeue_task); 1172EXPORT_SYMBOL_GPL(iscsi_requeue_task);
1139 1173
@@ -1152,7 +1186,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
1152 1186
1153 spin_lock_bh(&conn->session->lock); 1187 spin_lock_bh(&conn->session->lock);
1154 if (unlikely(conn->suspend_tx)) { 1188 if (unlikely(conn->suspend_tx)) {
1155 debug_scsi("conn %d Tx suspended!\n", conn->id); 1189 ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n");
1156 spin_unlock_bh(&conn->session->lock); 1190 spin_unlock_bh(&conn->session->lock);
1157 return -ENODATA; 1191 return -ENODATA;
1158 } 1192 }
@@ -1386,7 +1420,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1386 goto prepd_reject; 1420 goto prepd_reject;
1387 } 1421 }
1388 } else 1422 } else
1389 scsi_queue_work(session->host, &conn->xmitwork); 1423 iscsi_conn_queue_work(conn);
1390 1424
1391 session->queued_cmdsn++; 1425 session->queued_cmdsn++;
1392 spin_unlock(&session->lock); 1426 spin_unlock(&session->lock);
@@ -1398,7 +1432,8 @@ prepd_reject:
1398 iscsi_complete_command(task); 1432 iscsi_complete_command(task);
1399reject: 1433reject:
1400 spin_unlock(&session->lock); 1434 spin_unlock(&session->lock);
1401 debug_scsi("cmd 0x%x rejected (%d)\n", sc->cmnd[0], reason); 1435 ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n",
1436 sc->cmnd[0], reason);
1402 spin_lock(host->host_lock); 1437 spin_lock(host->host_lock);
1403 return SCSI_MLQUEUE_TARGET_BUSY; 1438 return SCSI_MLQUEUE_TARGET_BUSY;
1404 1439
@@ -1407,7 +1442,8 @@ prepd_fault:
1407 iscsi_complete_command(task); 1442 iscsi_complete_command(task);
1408fault: 1443fault:
1409 spin_unlock(&session->lock); 1444 spin_unlock(&session->lock);
1410 debug_scsi("iscsi: cmd 0x%x is not queued (%d)\n", sc->cmnd[0], reason); 1445 ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n",
1446 sc->cmnd[0], reason);
1411 if (!scsi_bidi_cmnd(sc)) 1447 if (!scsi_bidi_cmnd(sc))
1412 scsi_set_resid(sc, scsi_bufflen(sc)); 1448 scsi_set_resid(sc, scsi_bufflen(sc));
1413 else { 1449 else {
@@ -1422,8 +1458,6 @@ EXPORT_SYMBOL_GPL(iscsi_queuecommand);
1422 1458
1423int iscsi_change_queue_depth(struct scsi_device *sdev, int depth) 1459int iscsi_change_queue_depth(struct scsi_device *sdev, int depth)
1424{ 1460{
1425 if (depth > ISCSI_MAX_CMD_PER_LUN)
1426 depth = ISCSI_MAX_CMD_PER_LUN;
1427 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); 1461 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
1428 return sdev->queue_depth; 1462 return sdev->queue_depth;
1429} 1463}
@@ -1457,8 +1491,10 @@ int iscsi_eh_target_reset(struct scsi_cmnd *sc)
1457 spin_lock_bh(&session->lock); 1491 spin_lock_bh(&session->lock);
1458 if (session->state == ISCSI_STATE_TERMINATE) { 1492 if (session->state == ISCSI_STATE_TERMINATE) {
1459failed: 1493failed:
1460 debug_scsi("failing target reset: session terminated " 1494 iscsi_session_printk(KERN_INFO, session,
1461 "[CID %d age %d]\n", conn->id, session->age); 1495 "failing target reset: Could not log "
1496 "back into target [age %d]\n",
1497 session->age);
1462 spin_unlock_bh(&session->lock); 1498 spin_unlock_bh(&session->lock);
1463 mutex_unlock(&session->eh_mutex); 1499 mutex_unlock(&session->eh_mutex);
1464 return FAILED; 1500 return FAILED;
@@ -1472,7 +1508,7 @@ failed:
1472 */ 1508 */
1473 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1509 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1474 1510
1475 debug_scsi("iscsi_eh_target_reset wait for relogin\n"); 1511 ISCSI_DBG_SESSION(session, "wait for relogin\n");
1476 wait_event_interruptible(conn->ehwait, 1512 wait_event_interruptible(conn->ehwait,
1477 session->state == ISCSI_STATE_TERMINATE || 1513 session->state == ISCSI_STATE_TERMINATE ||
1478 session->state == ISCSI_STATE_LOGGED_IN || 1514 session->state == ISCSI_STATE_LOGGED_IN ||
@@ -1501,7 +1537,7 @@ static void iscsi_tmf_timedout(unsigned long data)
1501 spin_lock(&session->lock); 1537 spin_lock(&session->lock);
1502 if (conn->tmf_state == TMF_QUEUED) { 1538 if (conn->tmf_state == TMF_QUEUED) {
1503 conn->tmf_state = TMF_TIMEDOUT; 1539 conn->tmf_state = TMF_TIMEDOUT;
1504 debug_scsi("tmf timedout\n"); 1540 ISCSI_DBG_SESSION(session, "tmf timedout\n");
1505 /* unblock eh_abort() */ 1541 /* unblock eh_abort() */
1506 wake_up(&conn->ehwait); 1542 wake_up(&conn->ehwait);
1507 } 1543 }
@@ -1521,7 +1557,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
1521 spin_unlock_bh(&session->lock); 1557 spin_unlock_bh(&session->lock);
1522 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1558 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1523 spin_lock_bh(&session->lock); 1559 spin_lock_bh(&session->lock);
1524 debug_scsi("tmf exec failure\n"); 1560 ISCSI_DBG_SESSION(session, "tmf exec failure\n");
1525 return -EPERM; 1561 return -EPERM;
1526 } 1562 }
1527 conn->tmfcmd_pdus_cnt++; 1563 conn->tmfcmd_pdus_cnt++;
@@ -1529,7 +1565,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
1529 conn->tmf_timer.function = iscsi_tmf_timedout; 1565 conn->tmf_timer.function = iscsi_tmf_timedout;
1530 conn->tmf_timer.data = (unsigned long)conn; 1566 conn->tmf_timer.data = (unsigned long)conn;
1531 add_timer(&conn->tmf_timer); 1567 add_timer(&conn->tmf_timer);
1532 debug_scsi("tmf set timeout\n"); 1568 ISCSI_DBG_SESSION(session, "tmf set timeout\n");
1533 1569
1534 spin_unlock_bh(&session->lock); 1570 spin_unlock_bh(&session->lock);
1535 mutex_unlock(&session->eh_mutex); 1571 mutex_unlock(&session->eh_mutex);
@@ -1567,22 +1603,27 @@ static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
1567{ 1603{
1568 struct iscsi_task *task, *tmp; 1604 struct iscsi_task *task, *tmp;
1569 1605
1570 if (conn->task && (conn->task->sc->device->lun == lun || lun == -1)) 1606 if (conn->task) {
1571 conn->task = NULL; 1607 if (lun == -1 ||
1608 (conn->task->sc && conn->task->sc->device->lun == lun))
1609 conn->task = NULL;
1610 }
1572 1611
1573 /* flush pending */ 1612 /* flush pending */
1574 list_for_each_entry_safe(task, tmp, &conn->xmitqueue, running) { 1613 list_for_each_entry_safe(task, tmp, &conn->xmitqueue, running) {
1575 if (lun == task->sc->device->lun || lun == -1) { 1614 if (lun == task->sc->device->lun || lun == -1) {
1576 debug_scsi("failing pending sc %p itt 0x%x\n", 1615 ISCSI_DBG_SESSION(conn->session,
1577 task->sc, task->itt); 1616 "failing pending sc %p itt 0x%x\n",
1617 task->sc, task->itt);
1578 fail_command(conn, task, error << 16); 1618 fail_command(conn, task, error << 16);
1579 } 1619 }
1580 } 1620 }
1581 1621
1582 list_for_each_entry_safe(task, tmp, &conn->requeue, running) { 1622 list_for_each_entry_safe(task, tmp, &conn->requeue, running) {
1583 if (lun == task->sc->device->lun || lun == -1) { 1623 if (lun == task->sc->device->lun || lun == -1) {
1584 debug_scsi("failing requeued sc %p itt 0x%x\n", 1624 ISCSI_DBG_SESSION(conn->session,
1585 task->sc, task->itt); 1625 "failing requeued sc %p itt 0x%x\n",
1626 task->sc, task->itt);
1586 fail_command(conn, task, error << 16); 1627 fail_command(conn, task, error << 16);
1587 } 1628 }
1588 } 1629 }
@@ -1590,8 +1631,9 @@ static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
1590 /* fail all other running */ 1631 /* fail all other running */
1591 list_for_each_entry_safe(task, tmp, &conn->run_list, running) { 1632 list_for_each_entry_safe(task, tmp, &conn->run_list, running) {
1592 if (lun == task->sc->device->lun || lun == -1) { 1633 if (lun == task->sc->device->lun || lun == -1) {
1593 debug_scsi("failing in progress sc %p itt 0x%x\n", 1634 ISCSI_DBG_SESSION(conn->session,
1594 task->sc, task->itt); 1635 "failing in progress sc %p itt 0x%x\n",
1636 task->sc, task->itt);
1595 fail_command(conn, task, error << 16); 1637 fail_command(conn, task, error << 16);
1596 } 1638 }
1597 } 1639 }
@@ -1599,9 +1641,12 @@ static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
1599 1641
1600void iscsi_suspend_tx(struct iscsi_conn *conn) 1642void iscsi_suspend_tx(struct iscsi_conn *conn)
1601{ 1643{
1644 struct Scsi_Host *shost = conn->session->host;
1645 struct iscsi_host *ihost = shost_priv(shost);
1646
1602 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1647 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1603 if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD)) 1648 if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
1604 scsi_flush_work(conn->session->host); 1649 flush_workqueue(ihost->workq);
1605} 1650}
1606EXPORT_SYMBOL_GPL(iscsi_suspend_tx); 1651EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
1607 1652
@@ -1609,7 +1654,7 @@ static void iscsi_start_tx(struct iscsi_conn *conn)
1609{ 1654{
1610 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1655 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1611 if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD)) 1656 if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
1612 scsi_queue_work(conn->session->host, &conn->xmitwork); 1657 iscsi_conn_queue_work(conn);
1613} 1658}
1614 1659
1615static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) 1660static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
@@ -1622,7 +1667,7 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
1622 cls_session = starget_to_session(scsi_target(scmd->device)); 1667 cls_session = starget_to_session(scsi_target(scmd->device));
1623 session = cls_session->dd_data; 1668 session = cls_session->dd_data;
1624 1669
1625 debug_scsi("scsi cmd %p timedout\n", scmd); 1670 ISCSI_DBG_SESSION(session, "scsi cmd %p timedout\n", scmd);
1626 1671
1627 spin_lock(&session->lock); 1672 spin_lock(&session->lock);
1628 if (session->state != ISCSI_STATE_LOGGED_IN) { 1673 if (session->state != ISCSI_STATE_LOGGED_IN) {
@@ -1662,8 +1707,8 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
1662 rc = BLK_EH_RESET_TIMER; 1707 rc = BLK_EH_RESET_TIMER;
1663done: 1708done:
1664 spin_unlock(&session->lock); 1709 spin_unlock(&session->lock);
1665 debug_scsi("return %s\n", rc == BLK_EH_RESET_TIMER ? 1710 ISCSI_DBG_SESSION(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
1666 "timer reset" : "nh"); 1711 "timer reset" : "nh");
1667 return rc; 1712 return rc;
1668} 1713}
1669 1714
@@ -1697,13 +1742,13 @@ static void iscsi_check_transport_timeouts(unsigned long data)
1697 1742
1698 if (time_before_eq(last_recv + recv_timeout, jiffies)) { 1743 if (time_before_eq(last_recv + recv_timeout, jiffies)) {
1699 /* send a ping to try to provoke some traffic */ 1744 /* send a ping to try to provoke some traffic */
1700 debug_scsi("Sending nopout as ping on conn %p\n", conn); 1745 ISCSI_DBG_CONN(conn, "Sending nopout as ping\n");
1701 iscsi_send_nopout(conn, NULL); 1746 iscsi_send_nopout(conn, NULL);
1702 next_timeout = conn->last_ping + (conn->ping_timeout * HZ); 1747 next_timeout = conn->last_ping + (conn->ping_timeout * HZ);
1703 } else 1748 } else
1704 next_timeout = last_recv + recv_timeout; 1749 next_timeout = last_recv + recv_timeout;
1705 1750
1706 debug_scsi("Setting next tmo %lu\n", next_timeout); 1751 ISCSI_DBG_CONN(conn, "Setting next tmo %lu\n", next_timeout);
1707 mod_timer(&conn->transport_timer, next_timeout); 1752 mod_timer(&conn->transport_timer, next_timeout);
1708done: 1753done:
1709 spin_unlock(&session->lock); 1754 spin_unlock(&session->lock);
@@ -1740,7 +1785,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1740 * got the command. 1785 * got the command.
1741 */ 1786 */
1742 if (!sc->SCp.ptr) { 1787 if (!sc->SCp.ptr) {
1743 debug_scsi("sc never reached iscsi layer or it completed.\n"); 1788 ISCSI_DBG_SESSION(session, "sc never reached iscsi layer or "
1789 "it completed.\n");
1744 spin_unlock_bh(&session->lock); 1790 spin_unlock_bh(&session->lock);
1745 mutex_unlock(&session->eh_mutex); 1791 mutex_unlock(&session->eh_mutex);
1746 return SUCCESS; 1792 return SUCCESS;
@@ -1762,11 +1808,13 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1762 age = session->age; 1808 age = session->age;
1763 1809
1764 task = (struct iscsi_task *)sc->SCp.ptr; 1810 task = (struct iscsi_task *)sc->SCp.ptr;
1765 debug_scsi("aborting [sc %p itt 0x%x]\n", sc, task->itt); 1811 ISCSI_DBG_SESSION(session, "aborting [sc %p itt 0x%x]\n",
1812 sc, task->itt);
1766 1813
1767 /* task completed before time out */ 1814 /* task completed before time out */
1768 if (!task->sc) { 1815 if (!task->sc) {
1769 debug_scsi("sc completed while abort in progress\n"); 1816 ISCSI_DBG_SESSION(session, "sc completed while abort in "
1817 "progress\n");
1770 goto success; 1818 goto success;
1771 } 1819 }
1772 1820
@@ -1815,7 +1863,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1815 if (!sc->SCp.ptr) { 1863 if (!sc->SCp.ptr) {
1816 conn->tmf_state = TMF_INITIAL; 1864 conn->tmf_state = TMF_INITIAL;
1817 /* task completed before tmf abort response */ 1865 /* task completed before tmf abort response */
1818 debug_scsi("sc completed while abort in progress\n"); 1866 ISCSI_DBG_SESSION(session, "sc completed while abort "
1867 "in progress\n");
1819 goto success; 1868 goto success;
1820 } 1869 }
1821 /* fall through */ 1870 /* fall through */
@@ -1827,15 +1876,16 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1827success: 1876success:
1828 spin_unlock_bh(&session->lock); 1877 spin_unlock_bh(&session->lock);
1829success_unlocked: 1878success_unlocked:
1830 debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, task->itt); 1879 ISCSI_DBG_SESSION(session, "abort success [sc %p itt 0x%x]\n",
1880 sc, task->itt);
1831 mutex_unlock(&session->eh_mutex); 1881 mutex_unlock(&session->eh_mutex);
1832 return SUCCESS; 1882 return SUCCESS;
1833 1883
1834failed: 1884failed:
1835 spin_unlock_bh(&session->lock); 1885 spin_unlock_bh(&session->lock);
1836failed_unlocked: 1886failed_unlocked:
1837 debug_scsi("abort failed [sc %p itt 0x%x]\n", sc, 1887 ISCSI_DBG_SESSION(session, "abort failed [sc %p itt 0x%x]\n", sc,
1838 task ? task->itt : 0); 1888 task ? task->itt : 0);
1839 mutex_unlock(&session->eh_mutex); 1889 mutex_unlock(&session->eh_mutex);
1840 return FAILED; 1890 return FAILED;
1841} 1891}
@@ -1862,7 +1912,8 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
1862 cls_session = starget_to_session(scsi_target(sc->device)); 1912 cls_session = starget_to_session(scsi_target(sc->device));
1863 session = cls_session->dd_data; 1913 session = cls_session->dd_data;
1864 1914
1865 debug_scsi("LU Reset [sc %p lun %u]\n", sc, sc->device->lun); 1915 ISCSI_DBG_SESSION(session, "LU Reset [sc %p lun %u]\n",
1916 sc, sc->device->lun);
1866 1917
1867 mutex_lock(&session->eh_mutex); 1918 mutex_lock(&session->eh_mutex);
1868 spin_lock_bh(&session->lock); 1919 spin_lock_bh(&session->lock);
@@ -1916,8 +1967,8 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
1916unlock: 1967unlock:
1917 spin_unlock_bh(&session->lock); 1968 spin_unlock_bh(&session->lock);
1918done: 1969done:
1919 debug_scsi("iscsi_eh_device_reset %s\n", 1970 ISCSI_DBG_SESSION(session, "dev reset result = %s\n",
1920 rc == SUCCESS ? "SUCCESS" : "FAILED"); 1971 rc == SUCCESS ? "SUCCESS" : "FAILED");
1921 mutex_unlock(&session->eh_mutex); 1972 mutex_unlock(&session->eh_mutex);
1922 return rc; 1973 return rc;
1923} 1974}
@@ -1944,7 +1995,7 @@ iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size)
1944 num_arrays++; 1995 num_arrays++;
1945 q->pool = kzalloc(num_arrays * max * sizeof(void*), GFP_KERNEL); 1996 q->pool = kzalloc(num_arrays * max * sizeof(void*), GFP_KERNEL);
1946 if (q->pool == NULL) 1997 if (q->pool == NULL)
1947 goto enomem; 1998 return -ENOMEM;
1948 1999
1949 q->queue = kfifo_init((void*)q->pool, max * sizeof(void*), 2000 q->queue = kfifo_init((void*)q->pool, max * sizeof(void*),
1950 GFP_KERNEL, NULL); 2001 GFP_KERNEL, NULL);
@@ -1979,8 +2030,7 @@ void iscsi_pool_free(struct iscsi_pool *q)
1979 2030
1980 for (i = 0; i < q->max; i++) 2031 for (i = 0; i < q->max; i++)
1981 kfree(q->pool[i]); 2032 kfree(q->pool[i]);
1982 if (q->pool) 2033 kfree(q->pool);
1983 kfree(q->pool);
1984 kfree(q->queue); 2034 kfree(q->queue);
1985} 2035}
1986EXPORT_SYMBOL_GPL(iscsi_pool_free); 2036EXPORT_SYMBOL_GPL(iscsi_pool_free);
@@ -1998,6 +2048,9 @@ int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev)
1998 if (!shost->can_queue) 2048 if (!shost->can_queue)
1999 shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX; 2049 shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX;
2000 2050
2051 if (!shost->cmd_per_lun)
2052 shost->cmd_per_lun = ISCSI_DEF_CMD_PER_LUN;
2053
2001 if (!shost->transportt->eh_timed_out) 2054 if (!shost->transportt->eh_timed_out)
2002 shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out; 2055 shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
2003 return scsi_add_host(shost, pdev); 2056 return scsi_add_host(shost, pdev);
@@ -2008,13 +2061,13 @@ EXPORT_SYMBOL_GPL(iscsi_host_add);
2008 * iscsi_host_alloc - allocate a host and driver data 2061 * iscsi_host_alloc - allocate a host and driver data
2009 * @sht: scsi host template 2062 * @sht: scsi host template
2010 * @dd_data_size: driver host data size 2063 * @dd_data_size: driver host data size
2011 * @qdepth: default device queue depth 2064 * @xmit_can_sleep: bool indicating if LLD will queue IO from a work queue
2012 * 2065 *
2013 * This should be called by partial offload and software iscsi drivers. 2066 * This should be called by partial offload and software iscsi drivers.
2014 * To access the driver specific memory use the iscsi_host_priv() macro. 2067 * To access the driver specific memory use the iscsi_host_priv() macro.
2015 */ 2068 */
2016struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht, 2069struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
2017 int dd_data_size, uint16_t qdepth) 2070 int dd_data_size, bool xmit_can_sleep)
2018{ 2071{
2019 struct Scsi_Host *shost; 2072 struct Scsi_Host *shost;
2020 struct iscsi_host *ihost; 2073 struct iscsi_host *ihost;
@@ -2022,28 +2075,31 @@ struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
2022 shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size); 2075 shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size);
2023 if (!shost) 2076 if (!shost)
2024 return NULL; 2077 return NULL;
2078 ihost = shost_priv(shost);
2025 2079
2026 if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) { 2080 if (xmit_can_sleep) {
2027 if (qdepth != 0) 2081 snprintf(ihost->workq_name, sizeof(ihost->workq_name),
2028 printk(KERN_ERR "iscsi: invalid queue depth of %d. " 2082 "iscsi_q_%d", shost->host_no);
2029 "Queue depth must be between 1 and %d.\n", 2083 ihost->workq = create_singlethread_workqueue(ihost->workq_name);
2030 qdepth, ISCSI_MAX_CMD_PER_LUN); 2084 if (!ihost->workq)
2031 qdepth = ISCSI_DEF_CMD_PER_LUN; 2085 goto free_host;
2032 } 2086 }
2033 shost->cmd_per_lun = qdepth;
2034 2087
2035 ihost = shost_priv(shost);
2036 spin_lock_init(&ihost->lock); 2088 spin_lock_init(&ihost->lock);
2037 ihost->state = ISCSI_HOST_SETUP; 2089 ihost->state = ISCSI_HOST_SETUP;
2038 ihost->num_sessions = 0; 2090 ihost->num_sessions = 0;
2039 init_waitqueue_head(&ihost->session_removal_wq); 2091 init_waitqueue_head(&ihost->session_removal_wq);
2040 return shost; 2092 return shost;
2093
2094free_host:
2095 scsi_host_put(shost);
2096 return NULL;
2041} 2097}
2042EXPORT_SYMBOL_GPL(iscsi_host_alloc); 2098EXPORT_SYMBOL_GPL(iscsi_host_alloc);
2043 2099
2044static void iscsi_notify_host_removed(struct iscsi_cls_session *cls_session) 2100static void iscsi_notify_host_removed(struct iscsi_cls_session *cls_session)
2045{ 2101{
2046 iscsi_session_failure(cls_session, ISCSI_ERR_INVALID_HOST); 2102 iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_INVALID_HOST);
2047} 2103}
2048 2104
2049/** 2105/**
@@ -2069,6 +2125,8 @@ void iscsi_host_remove(struct Scsi_Host *shost)
2069 flush_signals(current); 2125 flush_signals(current);
2070 2126
2071 scsi_remove_host(shost); 2127 scsi_remove_host(shost);
2128 if (ihost->workq)
2129 destroy_workqueue(ihost->workq);
2072} 2130}
2073EXPORT_SYMBOL_GPL(iscsi_host_remove); 2131EXPORT_SYMBOL_GPL(iscsi_host_remove);
2074 2132
@@ -2467,14 +2525,16 @@ flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn)
2467 2525
2468 /* handle pending */ 2526 /* handle pending */
2469 list_for_each_entry_safe(task, tmp, &conn->mgmtqueue, running) { 2527 list_for_each_entry_safe(task, tmp, &conn->mgmtqueue, running) {
2470 debug_scsi("flushing pending mgmt task itt 0x%x\n", task->itt); 2528 ISCSI_DBG_SESSION(session, "flushing pending mgmt task "
2529 "itt 0x%x\n", task->itt);
2471 /* release ref from prep task */ 2530 /* release ref from prep task */
2472 __iscsi_put_task(task); 2531 __iscsi_put_task(task);
2473 } 2532 }
2474 2533
2475 /* handle running */ 2534 /* handle running */
2476 list_for_each_entry_safe(task, tmp, &conn->mgmt_run_list, running) { 2535 list_for_each_entry_safe(task, tmp, &conn->mgmt_run_list, running) {
2477 debug_scsi("flushing running mgmt task itt 0x%x\n", task->itt); 2536 ISCSI_DBG_SESSION(session, "flushing running mgmt task "
2537 "itt 0x%x\n", task->itt);
2478 /* release ref from prep task */ 2538 /* release ref from prep task */
2479 __iscsi_put_task(task); 2539 __iscsi_put_task(task);
2480 } 2540 }
@@ -2524,7 +2584,7 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
2524 conn->datadgst_en = 0; 2584 conn->datadgst_en = 0;
2525 if (session->state == ISCSI_STATE_IN_RECOVERY && 2585 if (session->state == ISCSI_STATE_IN_RECOVERY &&
2526 old_stop_stage != STOP_CONN_RECOVER) { 2586 old_stop_stage != STOP_CONN_RECOVER) {
2527 debug_scsi("blocking session\n"); 2587 ISCSI_DBG_SESSION(session, "blocking session\n");
2528 iscsi_block_session(session->cls_session); 2588 iscsi_block_session(session->cls_session);
2529 } 2589 }
2530 } 2590 }
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index e7705d3532c9..91f8ce4d8d08 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -49,13 +49,21 @@ MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, "
49 "Alex Aizman <itn780@yahoo.com>"); 49 "Alex Aizman <itn780@yahoo.com>");
50MODULE_DESCRIPTION("iSCSI/TCP data-path"); 50MODULE_DESCRIPTION("iSCSI/TCP data-path");
51MODULE_LICENSE("GPL"); 51MODULE_LICENSE("GPL");
52#undef DEBUG_TCP
53 52
54#ifdef DEBUG_TCP 53static int iscsi_dbg_libtcp;
55#define debug_tcp(fmt...) printk(KERN_INFO "tcp: " fmt) 54module_param_named(debug_libiscsi_tcp, iscsi_dbg_libtcp, int,
56#else 55 S_IRUGO | S_IWUSR);
57#define debug_tcp(fmt...) 56MODULE_PARM_DESC(debug_libiscsi_tcp, "Turn on debugging for libiscsi_tcp "
58#endif 57 "module. Set to 1 to turn on, and zero to turn off. Default "
58 "is off.");
59
60#define ISCSI_DBG_TCP(_conn, dbg_fmt, arg...) \
61 do { \
62 if (iscsi_dbg_libtcp) \
63 iscsi_conn_printk(KERN_INFO, _conn, \
64 "%s " dbg_fmt, \
65 __func__, ##arg); \
66 } while (0);
59 67
60static int iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn, 68static int iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
61 struct iscsi_segment *segment); 69 struct iscsi_segment *segment);
@@ -123,18 +131,13 @@ static void iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
123 if (page_count(sg_page(sg)) >= 1 && !recv) 131 if (page_count(sg_page(sg)) >= 1 && !recv)
124 return; 132 return;
125 133
126 debug_tcp("iscsi_tcp_segment_map %s %p\n", recv ? "recv" : "xmit",
127 segment);
128 segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0); 134 segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0);
129 segment->data = segment->sg_mapped + sg->offset + segment->sg_offset; 135 segment->data = segment->sg_mapped + sg->offset + segment->sg_offset;
130} 136}
131 137
132void iscsi_tcp_segment_unmap(struct iscsi_segment *segment) 138void iscsi_tcp_segment_unmap(struct iscsi_segment *segment)
133{ 139{
134 debug_tcp("iscsi_tcp_segment_unmap %p\n", segment);
135
136 if (segment->sg_mapped) { 140 if (segment->sg_mapped) {
137 debug_tcp("iscsi_tcp_segment_unmap valid\n");
138 kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0); 141 kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0);
139 segment->sg_mapped = NULL; 142 segment->sg_mapped = NULL;
140 segment->data = NULL; 143 segment->data = NULL;
@@ -180,8 +183,9 @@ int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn,
180 struct scatterlist sg; 183 struct scatterlist sg;
181 unsigned int pad; 184 unsigned int pad;
182 185
183 debug_tcp("copied %u %u size %u %s\n", segment->copied, copied, 186 ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "copied %u %u size %u %s\n",
184 segment->size, recv ? "recv" : "xmit"); 187 segment->copied, copied, segment->size,
188 recv ? "recv" : "xmit");
185 if (segment->hash && copied) { 189 if (segment->hash && copied) {
186 /* 190 /*
187 * If a segment is kmapd we must unmap it before sending 191 * If a segment is kmapd we must unmap it before sending
@@ -214,8 +218,8 @@ int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn,
214 iscsi_tcp_segment_unmap(segment); 218 iscsi_tcp_segment_unmap(segment);
215 219
216 /* Do we have more scatterlist entries? */ 220 /* Do we have more scatterlist entries? */
217 debug_tcp("total copied %u total size %u\n", segment->total_copied, 221 ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "total copied %u total size %u\n",
218 segment->total_size); 222 segment->total_copied, segment->total_size);
219 if (segment->total_copied < segment->total_size) { 223 if (segment->total_copied < segment->total_size) {
220 /* Proceed to the next entry in the scatterlist. */ 224 /* Proceed to the next entry in the scatterlist. */
221 iscsi_tcp_segment_init_sg(segment, sg_next(segment->sg), 225 iscsi_tcp_segment_init_sg(segment, sg_next(segment->sg),
@@ -229,7 +233,8 @@ int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn,
229 if (!(tcp_conn->iscsi_conn->session->tt->caps & CAP_PADDING_OFFLOAD)) { 233 if (!(tcp_conn->iscsi_conn->session->tt->caps & CAP_PADDING_OFFLOAD)) {
230 pad = iscsi_padding(segment->total_copied); 234 pad = iscsi_padding(segment->total_copied);
231 if (pad != 0) { 235 if (pad != 0) {
232 debug_tcp("consume %d pad bytes\n", pad); 236 ISCSI_DBG_TCP(tcp_conn->iscsi_conn,
237 "consume %d pad bytes\n", pad);
233 segment->total_size += pad; 238 segment->total_size += pad;
234 segment->size = pad; 239 segment->size = pad;
235 segment->data = segment->padbuf; 240 segment->data = segment->padbuf;
@@ -278,13 +283,13 @@ iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn,
278 283
279 while (!iscsi_tcp_segment_done(tcp_conn, segment, 1, copy)) { 284 while (!iscsi_tcp_segment_done(tcp_conn, segment, 1, copy)) {
280 if (copied == len) { 285 if (copied == len) {
281 debug_tcp("iscsi_tcp_segment_recv copied %d bytes\n", 286 ISCSI_DBG_TCP(tcp_conn->iscsi_conn,
282 len); 287 "copied %d bytes\n", len);
283 break; 288 break;
284 } 289 }
285 290
286 copy = min(len - copied, segment->size - segment->copied); 291 copy = min(len - copied, segment->size - segment->copied);
287 debug_tcp("iscsi_tcp_segment_recv copying %d\n", copy); 292 ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "copying %d\n", copy);
288 memcpy(segment->data + segment->copied, ptr + copied, copy); 293 memcpy(segment->data + segment->copied, ptr + copied, copy);
289 copied += copy; 294 copied += copy;
290 } 295 }
@@ -311,7 +316,7 @@ iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn,
311 316
312 if (memcmp(segment->recv_digest, segment->digest, 317 if (memcmp(segment->recv_digest, segment->digest,
313 segment->digest_len)) { 318 segment->digest_len)) {
314 debug_scsi("digest mismatch\n"); 319 ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "digest mismatch\n");
315 return 0; 320 return 0;
316 } 321 }
317 322
@@ -355,12 +360,8 @@ iscsi_segment_seek_sg(struct iscsi_segment *segment,
355 struct scatterlist *sg; 360 struct scatterlist *sg;
356 unsigned int i; 361 unsigned int i;
357 362
358 debug_scsi("iscsi_segment_seek_sg offset %u size %llu\n",
359 offset, size);
360 __iscsi_segment_init(segment, size, done, hash); 363 __iscsi_segment_init(segment, size, done, hash);
361 for_each_sg(sg_list, sg, sg_count, i) { 364 for_each_sg(sg_list, sg, sg_count, i) {
362 debug_scsi("sg %d, len %u offset %u\n", i, sg->length,
363 sg->offset);
364 if (offset < sg->length) { 365 if (offset < sg->length) {
365 iscsi_tcp_segment_init_sg(segment, sg, offset); 366 iscsi_tcp_segment_init_sg(segment, sg, offset);
366 return 0; 367 return 0;
@@ -382,8 +383,9 @@ EXPORT_SYMBOL_GPL(iscsi_segment_seek_sg);
382 */ 383 */
383void iscsi_tcp_hdr_recv_prep(struct iscsi_tcp_conn *tcp_conn) 384void iscsi_tcp_hdr_recv_prep(struct iscsi_tcp_conn *tcp_conn)
384{ 385{
385 debug_tcp("iscsi_tcp_hdr_recv_prep(%p%s)\n", tcp_conn, 386 ISCSI_DBG_TCP(tcp_conn->iscsi_conn,
386 tcp_conn->iscsi_conn->hdrdgst_en ? ", digest enabled" : ""); 387 "(%s)\n", tcp_conn->iscsi_conn->hdrdgst_en ?
388 "digest enabled" : "digest disabled");
387 iscsi_segment_init_linear(&tcp_conn->in.segment, 389 iscsi_segment_init_linear(&tcp_conn->in.segment,
388 tcp_conn->in.hdr_buf, sizeof(struct iscsi_hdr), 390 tcp_conn->in.hdr_buf, sizeof(struct iscsi_hdr),
389 iscsi_tcp_hdr_recv_done, NULL); 391 iscsi_tcp_hdr_recv_done, NULL);
@@ -446,7 +448,7 @@ void iscsi_tcp_cleanup_task(struct iscsi_task *task)
446 while (__kfifo_get(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) { 448 while (__kfifo_get(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
447 __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t, 449 __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
448 sizeof(void*)); 450 sizeof(void*));
449 debug_scsi("iscsi_tcp_cleanup_task pending r2t dropped\n"); 451 ISCSI_DBG_TCP(task->conn, "pending r2t dropped\n");
450 } 452 }
451 453
452 r2t = tcp_task->r2t; 454 r2t = tcp_task->r2t;
@@ -476,8 +478,8 @@ static int iscsi_tcp_data_in(struct iscsi_conn *conn, struct iscsi_task *task)
476 return 0; 478 return 0;
477 479
478 if (tcp_task->exp_datasn != datasn) { 480 if (tcp_task->exp_datasn != datasn) {
479 debug_tcp("%s: task->exp_datasn(%d) != rhdr->datasn(%d)\n", 481 ISCSI_DBG_TCP(conn, "task->exp_datasn(%d) != rhdr->datasn(%d)"
480 __func__, tcp_task->exp_datasn, datasn); 482 "\n", tcp_task->exp_datasn, datasn);
481 return ISCSI_ERR_DATASN; 483 return ISCSI_ERR_DATASN;
482 } 484 }
483 485
@@ -485,9 +487,9 @@ static int iscsi_tcp_data_in(struct iscsi_conn *conn, struct iscsi_task *task)
485 487
486 tcp_task->data_offset = be32_to_cpu(rhdr->offset); 488 tcp_task->data_offset = be32_to_cpu(rhdr->offset);
487 if (tcp_task->data_offset + tcp_conn->in.datalen > total_in_length) { 489 if (tcp_task->data_offset + tcp_conn->in.datalen > total_in_length) {
488 debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n", 490 ISCSI_DBG_TCP(conn, "data_offset(%d) + data_len(%d) > "
489 __func__, tcp_task->data_offset, 491 "total_length_in(%d)\n", tcp_task->data_offset,
490 tcp_conn->in.datalen, total_in_length); 492 tcp_conn->in.datalen, total_in_length);
491 return ISCSI_ERR_DATA_OFFSET; 493 return ISCSI_ERR_DATA_OFFSET;
492 } 494 }
493 495
@@ -518,8 +520,8 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
518 } 520 }
519 521
520 if (tcp_task->exp_datasn != r2tsn){ 522 if (tcp_task->exp_datasn != r2tsn){
521 debug_tcp("%s: task->exp_datasn(%d) != rhdr->r2tsn(%d)\n", 523 ISCSI_DBG_TCP(conn, "task->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
522 __func__, tcp_task->exp_datasn, r2tsn); 524 tcp_task->exp_datasn, r2tsn);
523 return ISCSI_ERR_R2TSN; 525 return ISCSI_ERR_R2TSN;
524 } 526 }
525 527
@@ -552,9 +554,9 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
552 } 554 }
553 555
554 if (r2t->data_length > session->max_burst) 556 if (r2t->data_length > session->max_burst)
555 debug_scsi("invalid R2T with data len %u and max burst %u." 557 ISCSI_DBG_TCP(conn, "invalid R2T with data len %u and max "
556 "Attempting to execute request.\n", 558 "burst %u. Attempting to execute request.\n",
557 r2t->data_length, session->max_burst); 559 r2t->data_length, session->max_burst);
558 560
559 r2t->data_offset = be32_to_cpu(rhdr->data_offset); 561 r2t->data_offset = be32_to_cpu(rhdr->data_offset);
560 if (r2t->data_offset + r2t->data_length > scsi_out(task->sc)->length) { 562 if (r2t->data_offset + r2t->data_length > scsi_out(task->sc)->length) {
@@ -641,8 +643,8 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
641 if (rc) 643 if (rc)
642 return rc; 644 return rc;
643 645
644 debug_tcp("opcode 0x%x ahslen %d datalen %d\n", 646 ISCSI_DBG_TCP(conn, "opcode 0x%x ahslen %d datalen %d\n",
645 opcode, ahslen, tcp_conn->in.datalen); 647 opcode, ahslen, tcp_conn->in.datalen);
646 648
647 switch(opcode) { 649 switch(opcode) {
648 case ISCSI_OP_SCSI_DATA_IN: 650 case ISCSI_OP_SCSI_DATA_IN:
@@ -674,10 +676,10 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
674 !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD)) 676 !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD))
675 rx_hash = tcp_conn->rx_hash; 677 rx_hash = tcp_conn->rx_hash;
676 678
677 debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, " 679 ISCSI_DBG_TCP(conn, "iscsi_tcp_begin_data_in( "
678 "datalen=%d)\n", tcp_conn, 680 "offset=%d, datalen=%d)\n",
679 tcp_task->data_offset, 681 tcp_task->data_offset,
680 tcp_conn->in.datalen); 682 tcp_conn->in.datalen);
681 rc = iscsi_segment_seek_sg(&tcp_conn->in.segment, 683 rc = iscsi_segment_seek_sg(&tcp_conn->in.segment,
682 sdb->table.sgl, 684 sdb->table.sgl,
683 sdb->table.nents, 685 sdb->table.nents,
@@ -854,10 +856,10 @@ int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
854 unsigned int consumed = 0; 856 unsigned int consumed = 0;
855 int rc = 0; 857 int rc = 0;
856 858
857 debug_tcp("in %d bytes\n", skb->len - offset); 859 ISCSI_DBG_TCP(conn, "in %d bytes\n", skb->len - offset);
858 860
859 if (unlikely(conn->suspend_rx)) { 861 if (unlikely(conn->suspend_rx)) {
860 debug_tcp("conn %d Rx suspended!\n", conn->id); 862 ISCSI_DBG_TCP(conn, "Rx suspended!\n");
861 *status = ISCSI_TCP_SUSPENDED; 863 *status = ISCSI_TCP_SUSPENDED;
862 return 0; 864 return 0;
863 } 865 }
@@ -874,15 +876,16 @@ int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
874 876
875 avail = skb_seq_read(consumed, &ptr, &seq); 877 avail = skb_seq_read(consumed, &ptr, &seq);
876 if (avail == 0) { 878 if (avail == 0) {
877 debug_tcp("no more data avail. Consumed %d\n", 879 ISCSI_DBG_TCP(conn, "no more data avail. Consumed %d\n",
878 consumed); 880 consumed);
879 *status = ISCSI_TCP_SKB_DONE; 881 *status = ISCSI_TCP_SKB_DONE;
880 skb_abort_seq_read(&seq); 882 skb_abort_seq_read(&seq);
881 goto skb_done; 883 goto skb_done;
882 } 884 }
883 BUG_ON(segment->copied >= segment->size); 885 BUG_ON(segment->copied >= segment->size);
884 886
885 debug_tcp("skb %p ptr=%p avail=%u\n", skb, ptr, avail); 887 ISCSI_DBG_TCP(conn, "skb %p ptr=%p avail=%u\n", skb, ptr,
888 avail);
886 rc = iscsi_tcp_segment_recv(tcp_conn, segment, ptr, avail); 889 rc = iscsi_tcp_segment_recv(tcp_conn, segment, ptr, avail);
887 BUG_ON(rc == 0); 890 BUG_ON(rc == 0);
888 consumed += rc; 891 consumed += rc;
@@ -895,11 +898,11 @@ int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
895 898
896segment_done: 899segment_done:
897 *status = ISCSI_TCP_SEGMENT_DONE; 900 *status = ISCSI_TCP_SEGMENT_DONE;
898 debug_tcp("segment done\n"); 901 ISCSI_DBG_TCP(conn, "segment done\n");
899 rc = segment->done(tcp_conn, segment); 902 rc = segment->done(tcp_conn, segment);
900 if (rc != 0) { 903 if (rc != 0) {
901 *status = ISCSI_TCP_CONN_ERR; 904 *status = ISCSI_TCP_CONN_ERR;
902 debug_tcp("Error receiving PDU, errno=%d\n", rc); 905 ISCSI_DBG_TCP(conn, "Error receiving PDU, errno=%d\n", rc);
903 iscsi_conn_failure(conn, rc); 906 iscsi_conn_failure(conn, rc);
904 return 0; 907 return 0;
905 } 908 }
@@ -929,8 +932,7 @@ int iscsi_tcp_task_init(struct iscsi_task *task)
929 * mgmt tasks do not have a scatterlist since they come 932 * mgmt tasks do not have a scatterlist since they come
930 * in from the iscsi interface. 933 * in from the iscsi interface.
931 */ 934 */
932 debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, 935 ISCSI_DBG_TCP(conn, "mtask deq [itt 0x%x]\n", task->itt);
933 task->itt);
934 936
935 return conn->session->tt->init_pdu(task, 0, task->data_count); 937 return conn->session->tt->init_pdu(task, 0, task->data_count);
936 } 938 }
@@ -939,9 +941,8 @@ int iscsi_tcp_task_init(struct iscsi_task *task)
939 tcp_task->exp_datasn = 0; 941 tcp_task->exp_datasn = 0;
940 942
941 /* Prepare PDU, optionally w/ immediate data */ 943 /* Prepare PDU, optionally w/ immediate data */
942 debug_scsi("task deq [cid %d itt 0x%x imm %d unsol %d]\n", 944 ISCSI_DBG_TCP(conn, "task deq [itt 0x%x imm %d unsol %d]\n",
943 conn->id, task->itt, task->imm_count, 945 task->itt, task->imm_count, task->unsol_r2t.data_length);
944 task->unsol_r2t.data_length);
945 946
946 err = conn->session->tt->init_pdu(task, 0, task->imm_count); 947 err = conn->session->tt->init_pdu(task, 0, task->imm_count);
947 if (err) 948 if (err)
@@ -965,7 +966,8 @@ static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task)
965 r2t = tcp_task->r2t; 966 r2t = tcp_task->r2t;
966 /* Continue with this R2T? */ 967 /* Continue with this R2T? */
967 if (r2t->data_length <= r2t->sent) { 968 if (r2t->data_length <= r2t->sent) {
968 debug_scsi(" done with r2t %p\n", r2t); 969 ISCSI_DBG_TCP(task->conn,
970 " done with r2t %p\n", r2t);
969 __kfifo_put(tcp_task->r2tpool.queue, 971 __kfifo_put(tcp_task->r2tpool.queue,
970 (void *)&tcp_task->r2t, 972 (void *)&tcp_task->r2t,
971 sizeof(void *)); 973 sizeof(void *));
@@ -1019,7 +1021,7 @@ flush:
1019 r2t = iscsi_tcp_get_curr_r2t(task); 1021 r2t = iscsi_tcp_get_curr_r2t(task);
1020 if (r2t == NULL) { 1022 if (r2t == NULL) {
1021 /* Waiting for more R2Ts to arrive. */ 1023 /* Waiting for more R2Ts to arrive. */
1022 debug_tcp("no R2Ts yet\n"); 1024 ISCSI_DBG_TCP(conn, "no R2Ts yet\n");
1023 return 0; 1025 return 0;
1024 } 1026 }
1025 1027
@@ -1028,9 +1030,9 @@ flush:
1028 return rc; 1030 return rc;
1029 iscsi_prep_data_out_pdu(task, r2t, (struct iscsi_data *) task->hdr); 1031 iscsi_prep_data_out_pdu(task, r2t, (struct iscsi_data *) task->hdr);
1030 1032
1031 debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n", 1033 ISCSI_DBG_TCP(conn, "sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n",
1032 r2t, r2t->datasn - 1, task->hdr->itt, 1034 r2t, r2t->datasn - 1, task->hdr->itt,
1033 r2t->data_offset + r2t->sent, r2t->data_count); 1035 r2t->data_offset + r2t->sent, r2t->data_count);
1034 1036
1035 rc = conn->session->tt->init_pdu(task, r2t->data_offset + r2t->sent, 1037 rc = conn->session->tt->init_pdu(task, r2t->data_offset + r2t->sent,
1036 r2t->data_count); 1038 r2t->data_count);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index b615eda361d5..81cdcf46c471 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1132,7 +1132,7 @@ lpfc_debugfs_dumpDataDif_release(struct inode *inode, struct file *file)
1132} 1132}
1133 1133
1134#undef lpfc_debugfs_op_disc_trc 1134#undef lpfc_debugfs_op_disc_trc
1135static struct file_operations lpfc_debugfs_op_disc_trc = { 1135static const struct file_operations lpfc_debugfs_op_disc_trc = {
1136 .owner = THIS_MODULE, 1136 .owner = THIS_MODULE,
1137 .open = lpfc_debugfs_disc_trc_open, 1137 .open = lpfc_debugfs_disc_trc_open,
1138 .llseek = lpfc_debugfs_lseek, 1138 .llseek = lpfc_debugfs_lseek,
@@ -1141,7 +1141,7 @@ static struct file_operations lpfc_debugfs_op_disc_trc = {
1141}; 1141};
1142 1142
1143#undef lpfc_debugfs_op_nodelist 1143#undef lpfc_debugfs_op_nodelist
1144static struct file_operations lpfc_debugfs_op_nodelist = { 1144static const struct file_operations lpfc_debugfs_op_nodelist = {
1145 .owner = THIS_MODULE, 1145 .owner = THIS_MODULE,
1146 .open = lpfc_debugfs_nodelist_open, 1146 .open = lpfc_debugfs_nodelist_open,
1147 .llseek = lpfc_debugfs_lseek, 1147 .llseek = lpfc_debugfs_lseek,
@@ -1150,7 +1150,7 @@ static struct file_operations lpfc_debugfs_op_nodelist = {
1150}; 1150};
1151 1151
1152#undef lpfc_debugfs_op_hbqinfo 1152#undef lpfc_debugfs_op_hbqinfo
1153static struct file_operations lpfc_debugfs_op_hbqinfo = { 1153static const struct file_operations lpfc_debugfs_op_hbqinfo = {
1154 .owner = THIS_MODULE, 1154 .owner = THIS_MODULE,
1155 .open = lpfc_debugfs_hbqinfo_open, 1155 .open = lpfc_debugfs_hbqinfo_open,
1156 .llseek = lpfc_debugfs_lseek, 1156 .llseek = lpfc_debugfs_lseek,
@@ -1159,7 +1159,7 @@ static struct file_operations lpfc_debugfs_op_hbqinfo = {
1159}; 1159};
1160 1160
1161#undef lpfc_debugfs_op_dumpHBASlim 1161#undef lpfc_debugfs_op_dumpHBASlim
1162static struct file_operations lpfc_debugfs_op_dumpHBASlim = { 1162static const struct file_operations lpfc_debugfs_op_dumpHBASlim = {
1163 .owner = THIS_MODULE, 1163 .owner = THIS_MODULE,
1164 .open = lpfc_debugfs_dumpHBASlim_open, 1164 .open = lpfc_debugfs_dumpHBASlim_open,
1165 .llseek = lpfc_debugfs_lseek, 1165 .llseek = lpfc_debugfs_lseek,
@@ -1168,7 +1168,7 @@ static struct file_operations lpfc_debugfs_op_dumpHBASlim = {
1168}; 1168};
1169 1169
1170#undef lpfc_debugfs_op_dumpHostSlim 1170#undef lpfc_debugfs_op_dumpHostSlim
1171static struct file_operations lpfc_debugfs_op_dumpHostSlim = { 1171static const struct file_operations lpfc_debugfs_op_dumpHostSlim = {
1172 .owner = THIS_MODULE, 1172 .owner = THIS_MODULE,
1173 .open = lpfc_debugfs_dumpHostSlim_open, 1173 .open = lpfc_debugfs_dumpHostSlim_open,
1174 .llseek = lpfc_debugfs_lseek, 1174 .llseek = lpfc_debugfs_lseek,
@@ -1177,7 +1177,7 @@ static struct file_operations lpfc_debugfs_op_dumpHostSlim = {
1177}; 1177};
1178 1178
1179#undef lpfc_debugfs_op_dumpData 1179#undef lpfc_debugfs_op_dumpData
1180static struct file_operations lpfc_debugfs_op_dumpData = { 1180static const struct file_operations lpfc_debugfs_op_dumpData = {
1181 .owner = THIS_MODULE, 1181 .owner = THIS_MODULE,
1182 .open = lpfc_debugfs_dumpData_open, 1182 .open = lpfc_debugfs_dumpData_open,
1183 .llseek = lpfc_debugfs_lseek, 1183 .llseek = lpfc_debugfs_lseek,
@@ -1187,7 +1187,7 @@ static struct file_operations lpfc_debugfs_op_dumpData = {
1187}; 1187};
1188 1188
1189#undef lpfc_debugfs_op_dumpDif 1189#undef lpfc_debugfs_op_dumpDif
1190static struct file_operations lpfc_debugfs_op_dumpDif = { 1190static const struct file_operations lpfc_debugfs_op_dumpDif = {
1191 .owner = THIS_MODULE, 1191 .owner = THIS_MODULE,
1192 .open = lpfc_debugfs_dumpDif_open, 1192 .open = lpfc_debugfs_dumpDif_open,
1193 .llseek = lpfc_debugfs_lseek, 1193 .llseek = lpfc_debugfs_lseek,
@@ -1197,7 +1197,7 @@ static struct file_operations lpfc_debugfs_op_dumpDif = {
1197}; 1197};
1198 1198
1199#undef lpfc_debugfs_op_slow_ring_trc 1199#undef lpfc_debugfs_op_slow_ring_trc
1200static struct file_operations lpfc_debugfs_op_slow_ring_trc = { 1200static const struct file_operations lpfc_debugfs_op_slow_ring_trc = {
1201 .owner = THIS_MODULE, 1201 .owner = THIS_MODULE,
1202 .open = lpfc_debugfs_slow_ring_trc_open, 1202 .open = lpfc_debugfs_slow_ring_trc_open,
1203 .llseek = lpfc_debugfs_lseek, 1203 .llseek = lpfc_debugfs_lseek,
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index b103b6ed4970..b1bd3fc7bae8 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1357,7 +1357,7 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1357 1357
1358 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1358 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1359 0x10, 0x1); 1359 0x10, 0x1);
1360 cmd->result = (DRIVER_SENSE|SUGGEST_DIE) << 24 1360 cmd->result = DRIVER_SENSE << 24
1361 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); 1361 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1362 phba->bg_guard_err_cnt++; 1362 phba->bg_guard_err_cnt++;
1363 printk(KERN_ERR "BLKGRD: guard_tag error\n"); 1363 printk(KERN_ERR "BLKGRD: guard_tag error\n");
@@ -1368,7 +1368,7 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1368 1368
1369 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1369 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1370 0x10, 0x3); 1370 0x10, 0x3);
1371 cmd->result = (DRIVER_SENSE|SUGGEST_DIE) << 24 1371 cmd->result = DRIVER_SENSE << 24
1372 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); 1372 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1373 1373
1374 phba->bg_reftag_err_cnt++; 1374 phba->bg_reftag_err_cnt++;
@@ -1380,7 +1380,7 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1380 1380
1381 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1381 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1382 0x10, 0x2); 1382 0x10, 0x2);
1383 cmd->result = (DRIVER_SENSE|SUGGEST_DIE) << 24 1383 cmd->result = DRIVER_SENSE << 24
1384 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); 1384 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1385 1385
1386 phba->bg_apptag_err_cnt++; 1386 phba->bg_apptag_err_cnt++;
diff --git a/drivers/scsi/mpt2sas/Kconfig b/drivers/scsi/mpt2sas/Kconfig
new file mode 100644
index 000000000000..4a86855c23b3
--- /dev/null
+++ b/drivers/scsi/mpt2sas/Kconfig
@@ -0,0 +1,66 @@
1#
2# Kernel configuration file for the MPT2SAS
3#
4# This code is based on drivers/scsi/mpt2sas/Kconfig
5# Copyright (C) 2007-2008 LSI Corporation
6# (mailto:DL-MPTFusionLinux@lsi.com)
7
8# This program is free software; you can redistribute it and/or
9# modify it under the terms of the GNU General Public License
10# as published by the Free Software Foundation; either version 2
11# of the License, or (at your option) any later version.
12
13# This program is distributed in the hope that it will be useful,
14# but WITHOUT ANY WARRANTY; without even the implied warranty of
15# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16# GNU General Public License for more details.
17
18# NO WARRANTY
19# THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
20# CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
21# LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
22# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
23# solely responsible for determining the appropriateness of using and
24# distributing the Program and assumes all risks associated with its
25# exercise of rights under this Agreement, including but not limited to
26# the risks and costs of program errors, damage to or loss of data,
27# programs or equipment, and unavailability or interruption of operations.
28
29# DISCLAIMER OF LIABILITY
30# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
31# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
33# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
34# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
36# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
37
38# You should have received a copy of the GNU General Public License
39# along with this program; if not, write to the Free Software
40# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
41# USA.
42
43config SCSI_MPT2SAS
44 tristate "LSI MPT Fusion SAS 2.0 Device Driver"
45 depends on PCI && SCSI
46 select SCSI_SAS_ATTRS
47 ---help---
48 This driver supports PCI-Express SAS 6Gb/s Host Adapters.
49
50config SCSI_MPT2SAS_MAX_SGE
51 int "LSI MPT Fusion Max number of SG Entries (16 - 128)"
52 depends on PCI && SCSI && SCSI_MPT2SAS
53 default "128"
54 range 16 128
55 ---help---
56 This option allows you to specify the maximum number of scatter-
57 gather entries per I/O. The driver default is 128, which matches
58 SAFE_PHYS_SEGMENTS. However, it may decreased down to 16.
59 Decreasing this parameter will reduce memory requirements
60 on a per controller instance.
61
62config SCSI_MPT2SAS_LOGGING
63 bool "LSI MPT Fusion logging facility"
64 depends on PCI && SCSI && SCSI_MPT2SAS
65 ---help---
66 This turns on a logging facility.
diff --git a/drivers/scsi/mpt2sas/Makefile b/drivers/scsi/mpt2sas/Makefile
new file mode 100644
index 000000000000..728f0475711d
--- /dev/null
+++ b/drivers/scsi/mpt2sas/Makefile
@@ -0,0 +1,7 @@
1# mpt2sas makefile
2obj-$(CONFIG_SCSI_MPT2SAS) += mpt2sas.o
3mpt2sas-y += mpt2sas_base.o \
4 mpt2sas_config.o \
5 mpt2sas_scsih.o \
6 mpt2sas_transport.o \
7 mpt2sas_ctl.o
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
new file mode 100644
index 000000000000..7bb2ece8b2e4
--- /dev/null
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -0,0 +1,1067 @@
1/*
2 * Copyright (c) 2000-2009 LSI Corporation.
3 *
4 *
5 * Name: mpi2.h
6 * Title: MPI Message independent structures and definitions
7 * including System Interface Register Set and
8 * scatter/gather formats.
9 * Creation Date: June 21, 2006
10 *
11 * mpi2.h Version: 02.00.11
12 *
13 * Version History
14 * ---------------
15 *
16 * Date Version Description
17 * -------- -------- ------------------------------------------------------
18 * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
19 * 06-04-07 02.00.01 Bumped MPI2_HEADER_VERSION_UNIT.
20 * 06-26-07 02.00.02 Bumped MPI2_HEADER_VERSION_UNIT.
21 * 08-31-07 02.00.03 Bumped MPI2_HEADER_VERSION_UNIT.
22 * Moved ReplyPostHostIndex register to offset 0x6C of the
23 * MPI2_SYSTEM_INTERFACE_REGS and modified the define for
24 * MPI2_REPLY_POST_HOST_INDEX_OFFSET.
25 * Added union of request descriptors.
26 * Added union of reply descriptors.
27 * 10-31-07 02.00.04 Bumped MPI2_HEADER_VERSION_UNIT.
28 * Added define for MPI2_VERSION_02_00.
29 * Fixed the size of the FunctionDependent5 field in the
30 * MPI2_DEFAULT_REPLY structure.
31 * 12-18-07 02.00.05 Bumped MPI2_HEADER_VERSION_UNIT.
32 * Removed the MPI-defined Fault Codes and extended the
33 * product specific codes up to 0xEFFF.
34 * Added a sixth key value for the WriteSequence register
35 * and changed the flush value to 0x0.
36 * Added message function codes for Diagnostic Buffer Post
37 * and Diagnsotic Release.
38 * New IOCStatus define: MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED
39 * Moved MPI2_VERSION_UNION from mpi2_ioc.h.
40 * 02-29-08 02.00.06 Bumped MPI2_HEADER_VERSION_UNIT.
41 * 03-03-08 02.00.07 Bumped MPI2_HEADER_VERSION_UNIT.
42 * 05-21-08 02.00.08 Bumped MPI2_HEADER_VERSION_UNIT.
43 * Added #defines for marking a reply descriptor as unused.
44 * 06-27-08 02.00.09 Bumped MPI2_HEADER_VERSION_UNIT.
45 * 10-02-08 02.00.10 Bumped MPI2_HEADER_VERSION_UNIT.
46 * Moved LUN field defines from mpi2_init.h.
47 * 01-19-09 02.00.11 Bumped MPI2_HEADER_VERSION_UNIT.
48 * --------------------------------------------------------------------------
49 */
50
51#ifndef MPI2_H
52#define MPI2_H
53
54
55/*****************************************************************************
56*
57* MPI Version Definitions
58*
59*****************************************************************************/
60
61#define MPI2_VERSION_MAJOR (0x02)
62#define MPI2_VERSION_MINOR (0x00)
63#define MPI2_VERSION_MAJOR_MASK (0xFF00)
64#define MPI2_VERSION_MAJOR_SHIFT (8)
65#define MPI2_VERSION_MINOR_MASK (0x00FF)
66#define MPI2_VERSION_MINOR_SHIFT (0)
67#define MPI2_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \
68 MPI2_VERSION_MINOR)
69
70#define MPI2_VERSION_02_00 (0x0200)
71
72/* versioning for this MPI header set */
73#define MPI2_HEADER_VERSION_UNIT (0x0B)
74#define MPI2_HEADER_VERSION_DEV (0x00)
75#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
76#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
77#define MPI2_HEADER_VERSION_DEV_MASK (0x00FF)
78#define MPI2_HEADER_VERSION_DEV_SHIFT (0)
79#define MPI2_HEADER_VERSION ((MPI2_HEADER_VERSION_UNIT << 8) | MPI2_HEADER_VERSION_DEV)
80
81
82/*****************************************************************************
83*
84* IOC State Definitions
85*
86*****************************************************************************/
87
88#define MPI2_IOC_STATE_RESET (0x00000000)
89#define MPI2_IOC_STATE_READY (0x10000000)
90#define MPI2_IOC_STATE_OPERATIONAL (0x20000000)
91#define MPI2_IOC_STATE_FAULT (0x40000000)
92
93#define MPI2_IOC_STATE_MASK (0xF0000000)
94#define MPI2_IOC_STATE_SHIFT (28)
95
96/* Fault state range for prodcut specific codes */
97#define MPI2_FAULT_PRODUCT_SPECIFIC_MIN (0x0000)
98#define MPI2_FAULT_PRODUCT_SPECIFIC_MAX (0xEFFF)
99
100
101/*****************************************************************************
102*
103* System Interface Register Definitions
104*
105*****************************************************************************/
106
107typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS
108{
109 U32 Doorbell; /* 0x00 */
110 U32 WriteSequence; /* 0x04 */
111 U32 HostDiagnostic; /* 0x08 */
112 U32 Reserved1; /* 0x0C */
113 U32 DiagRWData; /* 0x10 */
114 U32 DiagRWAddressLow; /* 0x14 */
115 U32 DiagRWAddressHigh; /* 0x18 */
116 U32 Reserved2[5]; /* 0x1C */
117 U32 HostInterruptStatus; /* 0x30 */
118 U32 HostInterruptMask; /* 0x34 */
119 U32 DCRData; /* 0x38 */
120 U32 DCRAddress; /* 0x3C */
121 U32 Reserved3[2]; /* 0x40 */
122 U32 ReplyFreeHostIndex; /* 0x48 */
123 U32 Reserved4[8]; /* 0x4C */
124 U32 ReplyPostHostIndex; /* 0x6C */
125 U32 Reserved5; /* 0x70 */
126 U32 HCBSize; /* 0x74 */
127 U32 HCBAddressLow; /* 0x78 */
128 U32 HCBAddressHigh; /* 0x7C */
129 U32 Reserved6[16]; /* 0x80 */
130 U32 RequestDescriptorPostLow; /* 0xC0 */
131 U32 RequestDescriptorPostHigh; /* 0xC4 */
132 U32 Reserved7[14]; /* 0xC8 */
133} MPI2_SYSTEM_INTERFACE_REGS, MPI2_POINTER PTR_MPI2_SYSTEM_INTERFACE_REGS,
134 Mpi2SystemInterfaceRegs_t, MPI2_POINTER pMpi2SystemInterfaceRegs_t;
135
136/*
137 * Defines for working with the Doorbell register.
138 */
139#define MPI2_DOORBELL_OFFSET (0x00000000)
140
141/* IOC --> System values */
142#define MPI2_DOORBELL_USED (0x08000000)
143#define MPI2_DOORBELL_WHO_INIT_MASK (0x07000000)
144#define MPI2_DOORBELL_WHO_INIT_SHIFT (24)
145#define MPI2_DOORBELL_FAULT_CODE_MASK (0x0000FFFF)
146#define MPI2_DOORBELL_DATA_MASK (0x0000FFFF)
147
148/* System --> IOC values */
149#define MPI2_DOORBELL_FUNCTION_MASK (0xFF000000)
150#define MPI2_DOORBELL_FUNCTION_SHIFT (24)
151#define MPI2_DOORBELL_ADD_DWORDS_MASK (0x00FF0000)
152#define MPI2_DOORBELL_ADD_DWORDS_SHIFT (16)
153
154
155/*
156 * Defines for the WriteSequence register
157 */
158#define MPI2_WRITE_SEQUENCE_OFFSET (0x00000004)
159#define MPI2_WRSEQ_KEY_VALUE_MASK (0x0000000F)
160#define MPI2_WRSEQ_FLUSH_KEY_VALUE (0x0)
161#define MPI2_WRSEQ_1ST_KEY_VALUE (0xF)
162#define MPI2_WRSEQ_2ND_KEY_VALUE (0x4)
163#define MPI2_WRSEQ_3RD_KEY_VALUE (0xB)
164#define MPI2_WRSEQ_4TH_KEY_VALUE (0x2)
165#define MPI2_WRSEQ_5TH_KEY_VALUE (0x7)
166#define MPI2_WRSEQ_6TH_KEY_VALUE (0xD)
167
168/*
169 * Defines for the HostDiagnostic register
170 */
171#define MPI2_HOST_DIAGNOSTIC_OFFSET (0x00000008)
172
173#define MPI2_DIAG_BOOT_DEVICE_SELECT_MASK (0x00001800)
174#define MPI2_DIAG_BOOT_DEVICE_SELECT_DEFAULT (0x00000000)
175#define MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW (0x00000800)
176
177#define MPI2_DIAG_CLEAR_FLASH_BAD_SIG (0x00000400)
178#define MPI2_DIAG_FORCE_HCB_ON_RESET (0x00000200)
179#define MPI2_DIAG_HCB_MODE (0x00000100)
180#define MPI2_DIAG_DIAG_WRITE_ENABLE (0x00000080)
181#define MPI2_DIAG_FLASH_BAD_SIG (0x00000040)
182#define MPI2_DIAG_RESET_HISTORY (0x00000020)
183#define MPI2_DIAG_DIAG_RW_ENABLE (0x00000010)
184#define MPI2_DIAG_RESET_ADAPTER (0x00000004)
185#define MPI2_DIAG_HOLD_IOC_RESET (0x00000002)
186
187/*
188 * Offsets for DiagRWData and address
189 */
190#define MPI2_DIAG_RW_DATA_OFFSET (0x00000010)
191#define MPI2_DIAG_RW_ADDRESS_LOW_OFFSET (0x00000014)
192#define MPI2_DIAG_RW_ADDRESS_HIGH_OFFSET (0x00000018)
193
194/*
195 * Defines for the HostInterruptStatus register
196 */
197#define MPI2_HOST_INTERRUPT_STATUS_OFFSET (0x00000030)
198#define MPI2_HIS_SYS2IOC_DB_STATUS (0x80000000)
199#define MPI2_HIS_IOP_DOORBELL_STATUS MPI2_HIS_SYS2IOC_DB_STATUS
200#define MPI2_HIS_RESET_IRQ_STATUS (0x40000000)
201#define MPI2_HIS_REPLY_DESCRIPTOR_INTERRUPT (0x00000008)
202#define MPI2_HIS_IOC2SYS_DB_STATUS (0x00000001)
203#define MPI2_HIS_DOORBELL_INTERRUPT MPI2_HIS_IOC2SYS_DB_STATUS
204
205/*
206 * Defines for the HostInterruptMask register
207 */
208#define MPI2_HOST_INTERRUPT_MASK_OFFSET (0x00000034)
209#define MPI2_HIM_RESET_IRQ_MASK (0x40000000)
210#define MPI2_HIM_REPLY_INT_MASK (0x00000008)
211#define MPI2_HIM_RIM MPI2_HIM_REPLY_INT_MASK
212#define MPI2_HIM_IOC2SYS_DB_MASK (0x00000001)
213#define MPI2_HIM_DIM MPI2_HIM_IOC2SYS_DB_MASK
214
215/*
216 * Offsets for DCRData and address
217 */
218#define MPI2_DCR_DATA_OFFSET (0x00000038)
219#define MPI2_DCR_ADDRESS_OFFSET (0x0000003C)
220
221/*
222 * Offset for the Reply Free Queue
223 */
224#define MPI2_REPLY_FREE_HOST_INDEX_OFFSET (0x00000048)
225
226/*
227 * Offset for the Reply Descriptor Post Queue
228 */
229#define MPI2_REPLY_POST_HOST_INDEX_OFFSET (0x0000006C)
230
231/*
232 * Defines for the HCBSize and address
233 */
234#define MPI2_HCB_SIZE_OFFSET (0x00000074)
235#define MPI2_HCB_SIZE_SIZE_MASK (0xFFFFF000)
236#define MPI2_HCB_SIZE_HCB_ENABLE (0x00000001)
237
238#define MPI2_HCB_ADDRESS_LOW_OFFSET (0x00000078)
239#define MPI2_HCB_ADDRESS_HIGH_OFFSET (0x0000007C)
240
241/*
242 * Offsets for the Request Queue
243 */
244#define MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET (0x000000C0)
245#define MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET (0x000000C4)
246
247
248/*****************************************************************************
249*
250* Message Descriptors
251*
252*****************************************************************************/
253
254/* Request Descriptors */
255
256/* Default Request Descriptor */
257typedef struct _MPI2_DEFAULT_REQUEST_DESCRIPTOR
258{
259 U8 RequestFlags; /* 0x00 */
260 U8 VF_ID; /* 0x01 */
261 U16 SMID; /* 0x02 */
262 U16 LMID; /* 0x04 */
263 U16 DescriptorTypeDependent; /* 0x06 */
264} MPI2_DEFAULT_REQUEST_DESCRIPTOR,
265 MPI2_POINTER PTR_MPI2_DEFAULT_REQUEST_DESCRIPTOR,
266 Mpi2DefaultRequestDescriptor_t, MPI2_POINTER pMpi2DefaultRequestDescriptor_t;
267
268/* defines for the RequestFlags field */
269#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x0E)
270#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00)
271#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET (0x02)
272#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06)
273#define MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE (0x08)
274
275#define MPI2_REQ_DESCRIPT_FLAGS_IOC_FIFO_MARKER (0x01)
276
277
278/* High Priority Request Descriptor */
279typedef struct _MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR
280{
281 U8 RequestFlags; /* 0x00 */
282 U8 VF_ID; /* 0x01 */
283 U16 SMID; /* 0x02 */
284 U16 LMID; /* 0x04 */
285 U16 Reserved1; /* 0x06 */
286} MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR,
287 MPI2_POINTER PTR_MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR,
288 Mpi2HighPriorityRequestDescriptor_t,
289 MPI2_POINTER pMpi2HighPriorityRequestDescriptor_t;
290
291
292/* SCSI IO Request Descriptor */
293typedef struct _MPI2_SCSI_IO_REQUEST_DESCRIPTOR
294{
295 U8 RequestFlags; /* 0x00 */
296 U8 VF_ID; /* 0x01 */
297 U16 SMID; /* 0x02 */
298 U16 LMID; /* 0x04 */
299 U16 DevHandle; /* 0x06 */
300} MPI2_SCSI_IO_REQUEST_DESCRIPTOR,
301 MPI2_POINTER PTR_MPI2_SCSI_IO_REQUEST_DESCRIPTOR,
302 Mpi2SCSIIORequestDescriptor_t, MPI2_POINTER pMpi2SCSIIORequestDescriptor_t;
303
304
305/* SCSI Target Request Descriptor */
306typedef struct _MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR
307{
308 U8 RequestFlags; /* 0x00 */
309 U8 VF_ID; /* 0x01 */
310 U16 SMID; /* 0x02 */
311 U16 LMID; /* 0x04 */
312 U16 IoIndex; /* 0x06 */
313} MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR,
314 MPI2_POINTER PTR_MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR,
315 Mpi2SCSITargetRequestDescriptor_t,
316 MPI2_POINTER pMpi2SCSITargetRequestDescriptor_t;
317
318/* union of Request Descriptors */
319typedef union _MPI2_REQUEST_DESCRIPTOR_UNION
320{
321 MPI2_DEFAULT_REQUEST_DESCRIPTOR Default;
322 MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority;
323 MPI2_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO;
324 MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget;
325 U64 Words;
326} MPI2_REQUEST_DESCRIPTOR_UNION, MPI2_POINTER PTR_MPI2_REQUEST_DESCRIPTOR_UNION,
327 Mpi2RequestDescriptorUnion_t, MPI2_POINTER pMpi2RequestDescriptorUnion_t;
328
329
330/* Reply Descriptors */
331
332/* Default Reply Descriptor */
333typedef struct _MPI2_DEFAULT_REPLY_DESCRIPTOR
334{
335 U8 ReplyFlags; /* 0x00 */
336 U8 VF_ID; /* 0x01 */
337 U16 DescriptorTypeDependent1; /* 0x02 */
338 U32 DescriptorTypeDependent2; /* 0x04 */
339} MPI2_DEFAULT_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_DEFAULT_REPLY_DESCRIPTOR,
340 Mpi2DefaultReplyDescriptor_t, MPI2_POINTER pMpi2DefaultReplyDescriptor_t;
341
342/* defines for the ReplyFlags field */
343#define MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F)
344#define MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00)
345#define MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY (0x01)
346#define MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS (0x02)
347#define MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER (0x03)
348#define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F)
349
350/* values for marking a reply descriptor as unused */
351#define MPI2_RPY_DESCRIPT_UNUSED_WORD0_MARK (0xFFFFFFFF)
352#define MPI2_RPY_DESCRIPT_UNUSED_WORD1_MARK (0xFFFFFFFF)
353
354/* Address Reply Descriptor */
355typedef struct _MPI2_ADDRESS_REPLY_DESCRIPTOR
356{
357 U8 ReplyFlags; /* 0x00 */
358 U8 VF_ID; /* 0x01 */
359 U16 SMID; /* 0x02 */
360 U32 ReplyFrameAddress; /* 0x04 */
361} MPI2_ADDRESS_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_ADDRESS_REPLY_DESCRIPTOR,
362 Mpi2AddressReplyDescriptor_t, MPI2_POINTER pMpi2AddressReplyDescriptor_t;
363
364#define MPI2_ADDRESS_REPLY_SMID_INVALID (0x00)
365
366
367/* SCSI IO Success Reply Descriptor */
368typedef struct _MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR
369{
370 U8 ReplyFlags; /* 0x00 */
371 U8 VF_ID; /* 0x01 */
372 U16 SMID; /* 0x02 */
373 U16 TaskTag; /* 0x04 */
374 U16 DevHandle; /* 0x06 */
375} MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR,
376 MPI2_POINTER PTR_MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR,
377 Mpi2SCSIIOSuccessReplyDescriptor_t,
378 MPI2_POINTER pMpi2SCSIIOSuccessReplyDescriptor_t;
379
380
381/* TargetAssist Success Reply Descriptor */
382typedef struct _MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR
383{
384 U8 ReplyFlags; /* 0x00 */
385 U8 VF_ID; /* 0x01 */
386 U16 SMID; /* 0x02 */
387 U8 SequenceNumber; /* 0x04 */
388 U8 Reserved1; /* 0x05 */
389 U16 IoIndex; /* 0x06 */
390} MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR,
391 MPI2_POINTER PTR_MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR,
392 Mpi2TargetAssistSuccessReplyDescriptor_t,
393 MPI2_POINTER pMpi2TargetAssistSuccessReplyDescriptor_t;
394
395
396/* Target Command Buffer Reply Descriptor */
397typedef struct _MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR
398{
399 U8 ReplyFlags; /* 0x00 */
400 U8 VF_ID; /* 0x01 */
401 U8 VP_ID; /* 0x02 */
402 U8 Flags; /* 0x03 */
403 U16 InitiatorDevHandle; /* 0x04 */
404 U16 IoIndex; /* 0x06 */
405} MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR,
406 MPI2_POINTER PTR_MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR,
407 Mpi2TargetCommandBufferReplyDescriptor_t,
408 MPI2_POINTER pMpi2TargetCommandBufferReplyDescriptor_t;
409
410/* defines for Flags field */
411#define MPI2_RPY_DESCRIPT_TCB_FLAGS_PHYNUM_MASK (0x3F)
412
413
414/* union of Reply Descriptors */
415typedef union _MPI2_REPLY_DESCRIPTORS_UNION
416{
417 MPI2_DEFAULT_REPLY_DESCRIPTOR Default;
418 MPI2_ADDRESS_REPLY_DESCRIPTOR AddressReply;
419 MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess;
420 MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR TargetAssistSuccess;
421 MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer;
422 U64 Words;
423} MPI2_REPLY_DESCRIPTORS_UNION, MPI2_POINTER PTR_MPI2_REPLY_DESCRIPTORS_UNION,
424 Mpi2ReplyDescriptorsUnion_t, MPI2_POINTER pMpi2ReplyDescriptorsUnion_t;
425
426
427
428/*****************************************************************************
429*
430* Message Functions
431* 0x80 -> 0x8F reserved for private message use per product
432*
433*
434*****************************************************************************/
435
436#define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) /* SCSI IO */
437#define MPI2_FUNCTION_SCSI_TASK_MGMT (0x01) /* SCSI Task Management */
438#define MPI2_FUNCTION_IOC_INIT (0x02) /* IOC Init */
439#define MPI2_FUNCTION_IOC_FACTS (0x03) /* IOC Facts */
440#define MPI2_FUNCTION_CONFIG (0x04) /* Configuration */
441#define MPI2_FUNCTION_PORT_FACTS (0x05) /* Port Facts */
442#define MPI2_FUNCTION_PORT_ENABLE (0x06) /* Port Enable */
443#define MPI2_FUNCTION_EVENT_NOTIFICATION (0x07) /* Event Notification */
444#define MPI2_FUNCTION_EVENT_ACK (0x08) /* Event Acknowledge */
445#define MPI2_FUNCTION_FW_DOWNLOAD (0x09) /* FW Download */
446#define MPI2_FUNCTION_TARGET_ASSIST (0x0B) /* Target Assist */
447#define MPI2_FUNCTION_TARGET_STATUS_SEND (0x0C) /* Target Status Send */
448#define MPI2_FUNCTION_TARGET_MODE_ABORT (0x0D) /* Target Mode Abort */
449#define MPI2_FUNCTION_FW_UPLOAD (0x12) /* FW Upload */
450#define MPI2_FUNCTION_RAID_ACTION (0x15) /* RAID Action */
451#define MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH (0x16) /* SCSI IO RAID Passthrough */
452#define MPI2_FUNCTION_TOOLBOX (0x17) /* Toolbox */
453#define MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR (0x18) /* SCSI Enclosure Processor */
454#define MPI2_FUNCTION_SMP_PASSTHROUGH (0x1A) /* SMP Passthrough */
455#define MPI2_FUNCTION_SAS_IO_UNIT_CONTROL (0x1B) /* SAS IO Unit Control */
456#define MPI2_FUNCTION_SATA_PASSTHROUGH (0x1C) /* SATA Passthrough */
457#define MPI2_FUNCTION_DIAG_BUFFER_POST (0x1D) /* Diagnostic Buffer Post */
458#define MPI2_FUNCTION_DIAG_RELEASE (0x1E) /* Diagnostic Release */
459#define MPI2_FUNCTION_TARGET_CMD_BUF_BASE_POST (0x24) /* Target Command Buffer Post Base */
460#define MPI2_FUNCTION_TARGET_CMD_BUF_LIST_POST (0x25) /* Target Command Buffer Post List */
461
462
463
464/* Doorbell functions */
465#define MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET (0x40)
466/* #define MPI2_FUNCTION_IO_UNIT_RESET (0x41) */
467#define MPI2_FUNCTION_HANDSHAKE (0x42)
468
469
470/*****************************************************************************
471*
472* IOC Status Values
473*
474*****************************************************************************/
475
476/* mask for IOCStatus status value */
477#define MPI2_IOCSTATUS_MASK (0x7FFF)
478
479/****************************************************************************
480* Common IOCStatus values for all replies
481****************************************************************************/
482
483#define MPI2_IOCSTATUS_SUCCESS (0x0000)
484#define MPI2_IOCSTATUS_INVALID_FUNCTION (0x0001)
485#define MPI2_IOCSTATUS_BUSY (0x0002)
486#define MPI2_IOCSTATUS_INVALID_SGL (0x0003)
487#define MPI2_IOCSTATUS_INTERNAL_ERROR (0x0004)
488#define MPI2_IOCSTATUS_INVALID_VPID (0x0005)
489#define MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES (0x0006)
490#define MPI2_IOCSTATUS_INVALID_FIELD (0x0007)
491#define MPI2_IOCSTATUS_INVALID_STATE (0x0008)
492#define MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED (0x0009)
493
494/****************************************************************************
495* Config IOCStatus values
496****************************************************************************/
497
498#define MPI2_IOCSTATUS_CONFIG_INVALID_ACTION (0x0020)
499#define MPI2_IOCSTATUS_CONFIG_INVALID_TYPE (0x0021)
500#define MPI2_IOCSTATUS_CONFIG_INVALID_PAGE (0x0022)
501#define MPI2_IOCSTATUS_CONFIG_INVALID_DATA (0x0023)
502#define MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS (0x0024)
503#define MPI2_IOCSTATUS_CONFIG_CANT_COMMIT (0x0025)
504
505/****************************************************************************
506* SCSI IO Reply
507****************************************************************************/
508
509#define MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR (0x0040)
510#define MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE (0x0042)
511#define MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE (0x0043)
512#define MPI2_IOCSTATUS_SCSI_DATA_OVERRUN (0x0044)
513#define MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN (0x0045)
514#define MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR (0x0046)
515#define MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR (0x0047)
516#define MPI2_IOCSTATUS_SCSI_TASK_TERMINATED (0x0048)
517#define MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH (0x0049)
518#define MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED (0x004A)
519#define MPI2_IOCSTATUS_SCSI_IOC_TERMINATED (0x004B)
520#define MPI2_IOCSTATUS_SCSI_EXT_TERMINATED (0x004C)
521
522/****************************************************************************
523* For use by SCSI Initiator and SCSI Target end-to-end data protection
524****************************************************************************/
525
526#define MPI2_IOCSTATUS_EEDP_GUARD_ERROR (0x004D)
527#define MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR (0x004E)
528#define MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR (0x004F)
529
530/****************************************************************************
531* SCSI Target values
532****************************************************************************/
533
534#define MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX (0x0062)
535#define MPI2_IOCSTATUS_TARGET_ABORTED (0x0063)
536#define MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE (0x0064)
537#define MPI2_IOCSTATUS_TARGET_NO_CONNECTION (0x0065)
538#define MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH (0x006A)
539#define MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR (0x006D)
540#define MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA (0x006E)
541#define MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT (0x006F)
542#define MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT (0x0070)
543#define MPI2_IOCSTATUS_TARGET_NAK_RECEIVED (0x0071)
544
545/****************************************************************************
546* Serial Attached SCSI values
547****************************************************************************/
548
549#define MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED (0x0090)
550#define MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN (0x0091)
551
552/****************************************************************************
553* Diagnostic Buffer Post / Diagnostic Release values
554****************************************************************************/
555
556#define MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED (0x00A0)
557
558
559/****************************************************************************
560* IOCStatus flag to indicate that log info is available
561****************************************************************************/
562
563#define MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE (0x8000)
564
565/****************************************************************************
566* IOCLogInfo Types
567****************************************************************************/
568
569#define MPI2_IOCLOGINFO_TYPE_MASK (0xF0000000)
570#define MPI2_IOCLOGINFO_TYPE_SHIFT (28)
571#define MPI2_IOCLOGINFO_TYPE_NONE (0x0)
572#define MPI2_IOCLOGINFO_TYPE_SCSI (0x1)
573#define MPI2_IOCLOGINFO_TYPE_FC (0x2)
574#define MPI2_IOCLOGINFO_TYPE_SAS (0x3)
575#define MPI2_IOCLOGINFO_TYPE_ISCSI (0x4)
576#define MPI2_IOCLOGINFO_LOG_DATA_MASK (0x0FFFFFFF)
577
578
579/*****************************************************************************
580*
581* Standard Message Structures
582*
583*****************************************************************************/
584
585/****************************************************************************
586* Request Message Header for all request messages
587****************************************************************************/
588
589typedef struct _MPI2_REQUEST_HEADER
590{
591 U16 FunctionDependent1; /* 0x00 */
592 U8 ChainOffset; /* 0x02 */
593 U8 Function; /* 0x03 */
594 U16 FunctionDependent2; /* 0x04 */
595 U8 FunctionDependent3; /* 0x06 */
596 U8 MsgFlags; /* 0x07 */
597 U8 VP_ID; /* 0x08 */
598 U8 VF_ID; /* 0x09 */
599 U16 Reserved1; /* 0x0A */
600} MPI2_REQUEST_HEADER, MPI2_POINTER PTR_MPI2_REQUEST_HEADER,
601 MPI2RequestHeader_t, MPI2_POINTER pMPI2RequestHeader_t;
602
603
604/****************************************************************************
605* Default Reply
606****************************************************************************/
607
608typedef struct _MPI2_DEFAULT_REPLY
609{
610 U16 FunctionDependent1; /* 0x00 */
611 U8 MsgLength; /* 0x02 */
612 U8 Function; /* 0x03 */
613 U16 FunctionDependent2; /* 0x04 */
614 U8 FunctionDependent3; /* 0x06 */
615 U8 MsgFlags; /* 0x07 */
616 U8 VP_ID; /* 0x08 */
617 U8 VF_ID; /* 0x09 */
618 U16 Reserved1; /* 0x0A */
619 U16 FunctionDependent5; /* 0x0C */
620 U16 IOCStatus; /* 0x0E */
621 U32 IOCLogInfo; /* 0x10 */
622} MPI2_DEFAULT_REPLY, MPI2_POINTER PTR_MPI2_DEFAULT_REPLY,
623 MPI2DefaultReply_t, MPI2_POINTER pMPI2DefaultReply_t;
624
625
626/* common version structure/union used in messages and configuration pages */
627
628typedef struct _MPI2_VERSION_STRUCT
629{
630 U8 Dev; /* 0x00 */
631 U8 Unit; /* 0x01 */
632 U8 Minor; /* 0x02 */
633 U8 Major; /* 0x03 */
634} MPI2_VERSION_STRUCT;
635
636typedef union _MPI2_VERSION_UNION
637{
638 MPI2_VERSION_STRUCT Struct;
639 U32 Word;
640} MPI2_VERSION_UNION;
641
642
643/* LUN field defines, common to many structures */
644#define MPI2_LUN_FIRST_LEVEL_ADDRESSING (0x0000FFFF)
645#define MPI2_LUN_SECOND_LEVEL_ADDRESSING (0xFFFF0000)
646#define MPI2_LUN_THIRD_LEVEL_ADDRESSING (0x0000FFFF)
647#define MPI2_LUN_FOURTH_LEVEL_ADDRESSING (0xFFFF0000)
648#define MPI2_LUN_LEVEL_1_WORD (0xFF00)
649#define MPI2_LUN_LEVEL_1_DWORD (0x0000FF00)
650
651
652/*****************************************************************************
653*
654* Fusion-MPT MPI Scatter Gather Elements
655*
656*****************************************************************************/
657
658/****************************************************************************
659* MPI Simple Element structures
660****************************************************************************/
661
662typedef struct _MPI2_SGE_SIMPLE32
663{
664 U32 FlagsLength;
665 U32 Address;
666} MPI2_SGE_SIMPLE32, MPI2_POINTER PTR_MPI2_SGE_SIMPLE32,
667 Mpi2SGESimple32_t, MPI2_POINTER pMpi2SGESimple32_t;
668
669typedef struct _MPI2_SGE_SIMPLE64
670{
671 U32 FlagsLength;
672 U64 Address;
673} MPI2_SGE_SIMPLE64, MPI2_POINTER PTR_MPI2_SGE_SIMPLE64,
674 Mpi2SGESimple64_t, MPI2_POINTER pMpi2SGESimple64_t;
675
676typedef struct _MPI2_SGE_SIMPLE_UNION
677{
678 U32 FlagsLength;
679 union
680 {
681 U32 Address32;
682 U64 Address64;
683 } u;
684} MPI2_SGE_SIMPLE_UNION, MPI2_POINTER PTR_MPI2_SGE_SIMPLE_UNION,
685 Mpi2SGESimpleUnion_t, MPI2_POINTER pMpi2SGESimpleUnion_t;
686
687
688/****************************************************************************
689* MPI Chain Element structures
690****************************************************************************/
691
692typedef struct _MPI2_SGE_CHAIN32
693{
694 U16 Length;
695 U8 NextChainOffset;
696 U8 Flags;
697 U32 Address;
698} MPI2_SGE_CHAIN32, MPI2_POINTER PTR_MPI2_SGE_CHAIN32,
699 Mpi2SGEChain32_t, MPI2_POINTER pMpi2SGEChain32_t;
700
701typedef struct _MPI2_SGE_CHAIN64
702{
703 U16 Length;
704 U8 NextChainOffset;
705 U8 Flags;
706 U64 Address;
707} MPI2_SGE_CHAIN64, MPI2_POINTER PTR_MPI2_SGE_CHAIN64,
708 Mpi2SGEChain64_t, MPI2_POINTER pMpi2SGEChain64_t;
709
710typedef struct _MPI2_SGE_CHAIN_UNION
711{
712 U16 Length;
713 U8 NextChainOffset;
714 U8 Flags;
715 union
716 {
717 U32 Address32;
718 U64 Address64;
719 } u;
720} MPI2_SGE_CHAIN_UNION, MPI2_POINTER PTR_MPI2_SGE_CHAIN_UNION,
721 Mpi2SGEChainUnion_t, MPI2_POINTER pMpi2SGEChainUnion_t;
722
723
724/****************************************************************************
725* MPI Transaction Context Element structures
726****************************************************************************/
727
728typedef struct _MPI2_SGE_TRANSACTION32
729{
730 U8 Reserved;
731 U8 ContextSize;
732 U8 DetailsLength;
733 U8 Flags;
734 U32 TransactionContext[1];
735 U32 TransactionDetails[1];
736} MPI2_SGE_TRANSACTION32, MPI2_POINTER PTR_MPI2_SGE_TRANSACTION32,
737 Mpi2SGETransaction32_t, MPI2_POINTER pMpi2SGETransaction32_t;
738
739typedef struct _MPI2_SGE_TRANSACTION64
740{
741 U8 Reserved;
742 U8 ContextSize;
743 U8 DetailsLength;
744 U8 Flags;
745 U32 TransactionContext[2];
746 U32 TransactionDetails[1];
747} MPI2_SGE_TRANSACTION64, MPI2_POINTER PTR_MPI2_SGE_TRANSACTION64,
748 Mpi2SGETransaction64_t, MPI2_POINTER pMpi2SGETransaction64_t;
749
750typedef struct _MPI2_SGE_TRANSACTION96
751{
752 U8 Reserved;
753 U8 ContextSize;
754 U8 DetailsLength;
755 U8 Flags;
756 U32 TransactionContext[3];
757 U32 TransactionDetails[1];
758} MPI2_SGE_TRANSACTION96, MPI2_POINTER PTR_MPI2_SGE_TRANSACTION96,
759 Mpi2SGETransaction96_t, MPI2_POINTER pMpi2SGETransaction96_t;
760
761typedef struct _MPI2_SGE_TRANSACTION128
762{
763 U8 Reserved;
764 U8 ContextSize;
765 U8 DetailsLength;
766 U8 Flags;
767 U32 TransactionContext[4];
768 U32 TransactionDetails[1];
769} MPI2_SGE_TRANSACTION128, MPI2_POINTER PTR_MPI2_SGE_TRANSACTION128,
770 Mpi2SGETransaction_t128, MPI2_POINTER pMpi2SGETransaction_t128;
771
772typedef struct _MPI2_SGE_TRANSACTION_UNION
773{
774 U8 Reserved;
775 U8 ContextSize;
776 U8 DetailsLength;
777 U8 Flags;
778 union
779 {
780 U32 TransactionContext32[1];
781 U32 TransactionContext64[2];
782 U32 TransactionContext96[3];
783 U32 TransactionContext128[4];
784 } u;
785 U32 TransactionDetails[1];
786} MPI2_SGE_TRANSACTION_UNION, MPI2_POINTER PTR_MPI2_SGE_TRANSACTION_UNION,
787 Mpi2SGETransactionUnion_t, MPI2_POINTER pMpi2SGETransactionUnion_t;
788
789
790/****************************************************************************
791* MPI SGE union for IO SGL's
792****************************************************************************/
793
794typedef struct _MPI2_MPI_SGE_IO_UNION
795{
796 union
797 {
798 MPI2_SGE_SIMPLE_UNION Simple;
799 MPI2_SGE_CHAIN_UNION Chain;
800 } u;
801} MPI2_MPI_SGE_IO_UNION, MPI2_POINTER PTR_MPI2_MPI_SGE_IO_UNION,
802 Mpi2MpiSGEIOUnion_t, MPI2_POINTER pMpi2MpiSGEIOUnion_t;
803
804
805/****************************************************************************
806* MPI SGE union for SGL's with Simple and Transaction elements
807****************************************************************************/
808
809typedef struct _MPI2_SGE_TRANS_SIMPLE_UNION
810{
811 union
812 {
813 MPI2_SGE_SIMPLE_UNION Simple;
814 MPI2_SGE_TRANSACTION_UNION Transaction;
815 } u;
816} MPI2_SGE_TRANS_SIMPLE_UNION, MPI2_POINTER PTR_MPI2_SGE_TRANS_SIMPLE_UNION,
817 Mpi2SGETransSimpleUnion_t, MPI2_POINTER pMpi2SGETransSimpleUnion_t;
818
819
820/****************************************************************************
821* All MPI SGE types union
822****************************************************************************/
823
824typedef struct _MPI2_MPI_SGE_UNION
825{
826 union
827 {
828 MPI2_SGE_SIMPLE_UNION Simple;
829 MPI2_SGE_CHAIN_UNION Chain;
830 MPI2_SGE_TRANSACTION_UNION Transaction;
831 } u;
832} MPI2_MPI_SGE_UNION, MPI2_POINTER PTR_MPI2_MPI_SGE_UNION,
833 Mpi2MpiSgeUnion_t, MPI2_POINTER pMpi2MpiSgeUnion_t;
834
835
836/****************************************************************************
837* MPI SGE field definition and masks
838****************************************************************************/
839
840/* Flags field bit definitions */
841
842#define MPI2_SGE_FLAGS_LAST_ELEMENT (0x80)
843#define MPI2_SGE_FLAGS_END_OF_BUFFER (0x40)
844#define MPI2_SGE_FLAGS_ELEMENT_TYPE_MASK (0x30)
845#define MPI2_SGE_FLAGS_LOCAL_ADDRESS (0x08)
846#define MPI2_SGE_FLAGS_DIRECTION (0x04)
847#define MPI2_SGE_FLAGS_ADDRESS_SIZE (0x02)
848#define MPI2_SGE_FLAGS_END_OF_LIST (0x01)
849
850#define MPI2_SGE_FLAGS_SHIFT (24)
851
852#define MPI2_SGE_LENGTH_MASK (0x00FFFFFF)
853#define MPI2_SGE_CHAIN_LENGTH_MASK (0x0000FFFF)
854
855/* Element Type */
856
857#define MPI2_SGE_FLAGS_TRANSACTION_ELEMENT (0x00)
858#define MPI2_SGE_FLAGS_SIMPLE_ELEMENT (0x10)
859#define MPI2_SGE_FLAGS_CHAIN_ELEMENT (0x30)
860#define MPI2_SGE_FLAGS_ELEMENT_MASK (0x30)
861
862/* Address location */
863
864#define MPI2_SGE_FLAGS_SYSTEM_ADDRESS (0x00)
865
866/* Direction */
867
868#define MPI2_SGE_FLAGS_IOC_TO_HOST (0x00)
869#define MPI2_SGE_FLAGS_HOST_TO_IOC (0x04)
870
871/* Address Size */
872
873#define MPI2_SGE_FLAGS_32_BIT_ADDRESSING (0x00)
874#define MPI2_SGE_FLAGS_64_BIT_ADDRESSING (0x02)
875
876/* Context Size */
877
878#define MPI2_SGE_FLAGS_32_BIT_CONTEXT (0x00)
879#define MPI2_SGE_FLAGS_64_BIT_CONTEXT (0x02)
880#define MPI2_SGE_FLAGS_96_BIT_CONTEXT (0x04)
881#define MPI2_SGE_FLAGS_128_BIT_CONTEXT (0x06)
882
883#define MPI2_SGE_CHAIN_OFFSET_MASK (0x00FF0000)
884#define MPI2_SGE_CHAIN_OFFSET_SHIFT (16)
885
886/****************************************************************************
887* MPI SGE operation Macros
888****************************************************************************/
889
890/* SIMPLE FlagsLength manipulations... */
891#define MPI2_SGE_SET_FLAGS(f) ((U32)(f) << MPI2_SGE_FLAGS_SHIFT)
892#define MPI2_SGE_GET_FLAGS(f) (((f) & ~MPI2_SGE_LENGTH_MASK) >> MPI2_SGE_FLAGS_SHIFT)
893#define MPI2_SGE_LENGTH(f) ((f) & MPI2_SGE_LENGTH_MASK)
894#define MPI2_SGE_CHAIN_LENGTH(f) ((f) & MPI2_SGE_CHAIN_LENGTH_MASK)
895
896#define MPI2_SGE_SET_FLAGS_LENGTH(f,l) (MPI2_SGE_SET_FLAGS(f) | MPI2_SGE_LENGTH(l))
897
898#define MPI2_pSGE_GET_FLAGS(psg) MPI2_SGE_GET_FLAGS((psg)->FlagsLength)
899#define MPI2_pSGE_GET_LENGTH(psg) MPI2_SGE_LENGTH((psg)->FlagsLength)
900#define MPI2_pSGE_SET_FLAGS_LENGTH(psg,f,l) (psg)->FlagsLength = MPI2_SGE_SET_FLAGS_LENGTH(f,l)
901
902/* CAUTION - The following are READ-MODIFY-WRITE! */
903#define MPI2_pSGE_SET_FLAGS(psg,f) (psg)->FlagsLength |= MPI2_SGE_SET_FLAGS(f)
904#define MPI2_pSGE_SET_LENGTH(psg,l) (psg)->FlagsLength |= MPI2_SGE_LENGTH(l)
905
906#define MPI2_GET_CHAIN_OFFSET(x) ((x & MPI2_SGE_CHAIN_OFFSET_MASK) >> MPI2_SGE_CHAIN_OFFSET_SHIFT)
907
908
909/*****************************************************************************
910*
911* Fusion-MPT IEEE Scatter Gather Elements
912*
913*****************************************************************************/
914
915/****************************************************************************
916* IEEE Simple Element structures
917****************************************************************************/
918
919typedef struct _MPI2_IEEE_SGE_SIMPLE32
920{
921 U32 Address;
922 U32 FlagsLength;
923} MPI2_IEEE_SGE_SIMPLE32, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE32,
924 Mpi2IeeeSgeSimple32_t, MPI2_POINTER pMpi2IeeeSgeSimple32_t;
925
926typedef struct _MPI2_IEEE_SGE_SIMPLE64
927{
928 U64 Address;
929 U32 Length;
930 U16 Reserved1;
931 U8 Reserved2;
932 U8 Flags;
933} MPI2_IEEE_SGE_SIMPLE64, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE64,
934 Mpi2IeeeSgeSimple64_t, MPI2_POINTER pMpi2IeeeSgeSimple64_t;
935
936typedef union _MPI2_IEEE_SGE_SIMPLE_UNION
937{
938 MPI2_IEEE_SGE_SIMPLE32 Simple32;
939 MPI2_IEEE_SGE_SIMPLE64 Simple64;
940} MPI2_IEEE_SGE_SIMPLE_UNION, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE_UNION,
941 Mpi2IeeeSgeSimpleUnion_t, MPI2_POINTER pMpi2IeeeSgeSimpleUnion_t;
942
943
944/****************************************************************************
945* IEEE Chain Element structures
946****************************************************************************/
947
948typedef MPI2_IEEE_SGE_SIMPLE32 MPI2_IEEE_SGE_CHAIN32;
949
950typedef MPI2_IEEE_SGE_SIMPLE64 MPI2_IEEE_SGE_CHAIN64;
951
952typedef union _MPI2_IEEE_SGE_CHAIN_UNION
953{
954 MPI2_IEEE_SGE_CHAIN32 Chain32;
955 MPI2_IEEE_SGE_CHAIN64 Chain64;
956} MPI2_IEEE_SGE_CHAIN_UNION, MPI2_POINTER PTR_MPI2_IEEE_SGE_CHAIN_UNION,
957 Mpi2IeeeSgeChainUnion_t, MPI2_POINTER pMpi2IeeeSgeChainUnion_t;
958
959
960/****************************************************************************
961* All IEEE SGE types union
962****************************************************************************/
963
964typedef struct _MPI2_IEEE_SGE_UNION
965{
966 union
967 {
968 MPI2_IEEE_SGE_SIMPLE_UNION Simple;
969 MPI2_IEEE_SGE_CHAIN_UNION Chain;
970 } u;
971} MPI2_IEEE_SGE_UNION, MPI2_POINTER PTR_MPI2_IEEE_SGE_UNION,
972 Mpi2IeeeSgeUnion_t, MPI2_POINTER pMpi2IeeeSgeUnion_t;
973
974
975/****************************************************************************
976* IEEE SGE field definitions and masks
977****************************************************************************/
978
979/* Flags field bit definitions */
980
981#define MPI2_IEEE_SGE_FLAGS_ELEMENT_TYPE_MASK (0x80)
982
983#define MPI2_IEEE32_SGE_FLAGS_SHIFT (24)
984
985#define MPI2_IEEE32_SGE_LENGTH_MASK (0x00FFFFFF)
986
987/* Element Type */
988
989#define MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT (0x00)
990#define MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80)
991
992/* Data Location Address Space */
993
994#define MPI2_IEEE_SGE_FLAGS_ADDR_MASK (0x03)
995#define MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00)
996#define MPI2_IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01)
997#define MPI2_IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02)
998#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
999
1000
1001/****************************************************************************
1002* IEEE SGE operation Macros
1003****************************************************************************/
1004
1005/* SIMPLE FlagsLength manipulations... */
1006#define MPI2_IEEE32_SGE_SET_FLAGS(f) ((U32)(f) << MPI2_IEEE32_SGE_FLAGS_SHIFT)
1007#define MPI2_IEEE32_SGE_GET_FLAGS(f) (((f) & ~MPI2_IEEE32_SGE_LENGTH_MASK) >> MPI2_IEEE32_SGE_FLAGS_SHIFT)
1008#define MPI2_IEEE32_SGE_LENGTH(f) ((f) & MPI2_IEEE32_SGE_LENGTH_MASK)
1009
1010#define MPI2_IEEE32_SGE_SET_FLAGS_LENGTH(f, l) (MPI2_IEEE32_SGE_SET_FLAGS(f) | MPI2_IEEE32_SGE_LENGTH(l))
1011
1012#define MPI2_IEEE32_pSGE_GET_FLAGS(psg) MPI2_IEEE32_SGE_GET_FLAGS((psg)->FlagsLength)
1013#define MPI2_IEEE32_pSGE_GET_LENGTH(psg) MPI2_IEEE32_SGE_LENGTH((psg)->FlagsLength)
1014#define MPI2_IEEE32_pSGE_SET_FLAGS_LENGTH(psg,f,l) (psg)->FlagsLength = MPI2_IEEE32_SGE_SET_FLAGS_LENGTH(f,l)
1015
1016/* CAUTION - The following are READ-MODIFY-WRITE! */
1017#define MPI2_IEEE32_pSGE_SET_FLAGS(psg,f) (psg)->FlagsLength |= MPI2_IEEE32_SGE_SET_FLAGS(f)
1018#define MPI2_IEEE32_pSGE_SET_LENGTH(psg,l) (psg)->FlagsLength |= MPI2_IEEE32_SGE_LENGTH(l)
1019
1020
1021
1022
1023/*****************************************************************************
1024*
1025* Fusion-MPT MPI/IEEE Scatter Gather Unions
1026*
1027*****************************************************************************/
1028
1029typedef union _MPI2_SIMPLE_SGE_UNION
1030{
1031 MPI2_SGE_SIMPLE_UNION MpiSimple;
1032 MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple;
1033} MPI2_SIMPLE_SGE_UNION, MPI2_POINTER PTR_MPI2_SIMPLE_SGE_UNION,
1034 Mpi2SimpleSgeUntion_t, MPI2_POINTER pMpi2SimpleSgeUntion_t;
1035
1036
1037typedef union _MPI2_SGE_IO_UNION
1038{
1039 MPI2_SGE_SIMPLE_UNION MpiSimple;
1040 MPI2_SGE_CHAIN_UNION MpiChain;
1041 MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple;
1042 MPI2_IEEE_SGE_CHAIN_UNION IeeeChain;
1043} MPI2_SGE_IO_UNION, MPI2_POINTER PTR_MPI2_SGE_IO_UNION,
1044 Mpi2SGEIOUnion_t, MPI2_POINTER pMpi2SGEIOUnion_t;
1045
1046
1047/****************************************************************************
1048*
1049* Values for SGLFlags field, used in many request messages with an SGL
1050*
1051****************************************************************************/
1052
1053/* values for MPI SGL Data Location Address Space subfield */
1054#define MPI2_SGLFLAGS_ADDRESS_SPACE_MASK (0x0C)
1055#define MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE (0x00)
1056#define MPI2_SGLFLAGS_IOCDDR_ADDRESS_SPACE (0x04)
1057#define MPI2_SGLFLAGS_IOCPLB_ADDRESS_SPACE (0x08)
1058#define MPI2_SGLFLAGS_IOCPLBNTA_ADDRESS_SPACE (0x0C)
1059/* values for SGL Type subfield */
1060#define MPI2_SGLFLAGS_SGL_TYPE_MASK (0x03)
1061#define MPI2_SGLFLAGS_SGL_TYPE_MPI (0x00)
1062#define MPI2_SGLFLAGS_SGL_TYPE_IEEE32 (0x01)
1063#define MPI2_SGLFLAGS_SGL_TYPE_IEEE64 (0x02)
1064
1065
1066#endif
1067
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
new file mode 100644
index 000000000000..2f27cf6d6c65
--- /dev/null
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
@@ -0,0 +1,2151 @@
1/*
2 * Copyright (c) 2000-2009 LSI Corporation.
3 *
4 *
5 * Name: mpi2_cnfg.h
6 * Title: MPI Configuration messages and pages
7 * Creation Date: November 10, 2006
8 *
9 * mpi2_cnfg.h Version: 02.00.10
10 *
11 * Version History
12 * ---------------
13 *
14 * Date Version Description
15 * -------- -------- ------------------------------------------------------
16 * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
17 * 06-04-07 02.00.01 Added defines for SAS IO Unit Page 2 PhyFlags.
18 * Added Manufacturing Page 11.
19 * Added MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE
20 * define.
21 * 06-26-07 02.00.02 Adding generic structure for product-specific
22 * Manufacturing pages: MPI2_CONFIG_PAGE_MANUFACTURING_PS.
23 * Rework of BIOS Page 2 configuration page.
24 * Fixed MPI2_BIOSPAGE2_BOOT_DEVICE to be a union of the
25 * forms.
26 * Added configuration pages IOC Page 8 and Driver
27 * Persistent Mapping Page 0.
28 * 08-31-07 02.00.03 Modified configuration pages dealing with Integrated
29 * RAID (Manufacturing Page 4, RAID Volume Pages 0 and 1,
30 * RAID Physical Disk Pages 0 and 1, RAID Configuration
31 * Page 0).
32 * Added new value for AccessStatus field of SAS Device
33 * Page 0 (_SATA_NEEDS_INITIALIZATION).
34 * 10-31-07 02.00.04 Added missing SEPDevHandle field to
35 * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0.
36 * 12-18-07 02.00.05 Modified IO Unit Page 0 to use 32-bit version fields for
37 * NVDATA.
38 * Modified IOC Page 7 to use masks and added field for
39 * SASBroadcastPrimitiveMasks.
40 * Added MPI2_CONFIG_PAGE_BIOS_4.
41 * Added MPI2_CONFIG_PAGE_LOG_0.
42 * 02-29-08 02.00.06 Modified various names to make them 32-character unique.
43 * Added SAS Device IDs.
44 * Updated Integrated RAID configuration pages including
45 * Manufacturing Page 4, IOC Page 6, and RAID Configuration
46 * Page 0.
47 * 05-21-08 02.00.07 Added define MPI2_MANPAGE4_MIX_SSD_SAS_SATA.
48 * Added define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION.
49 * Fixed define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING.
50 * Added missing MaxNumRoutedSasAddresses field to
51 * MPI2_CONFIG_PAGE_EXPANDER_0.
52 * Added SAS Port Page 0.
53 * Modified structure layout for
54 * MPI2_CONFIG_PAGE_DRIVER_MAPPING_0.
55 * 06-27-08 02.00.08 Changed MPI2_CONFIG_PAGE_RD_PDISK_1 to use
56 * MPI2_RAID_PHYS_DISK1_PATH_MAX to size the array.
57 * 10-02-08 02.00.09 Changed MPI2_RAID_PGAD_CONFIGNUM_MASK from 0x0000FFFF
58 * to 0x000000FF.
59 * Added two new values for the Physical Disk Coercion Size
60 * bits in the Flags field of Manufacturing Page 4.
61 * Added product-specific Manufacturing pages 16 to 31.
62 * Modified Flags bits for controlling write cache on SATA
63 * drives in IO Unit Page 1.
64 * Added new bit to AdditionalControlFlags of SAS IO Unit
65 * Page 1 to control Invalid Topology Correction.
66 * Added additional defines for RAID Volume Page 0
67 * VolumeStatusFlags field.
68 * Modified meaning of RAID Volume Page 0 VolumeSettings
69 * define for auto-configure of hot-swap drives.
70 * Added SupportedPhysDisks field to RAID Volume Page 1 and
71 * added related defines.
72 * Added PhysDiskAttributes field (and related defines) to
73 * RAID Physical Disk Page 0.
74 * Added MPI2_SAS_PHYINFO_PHY_VACANT define.
75 * Added three new DiscoveryStatus bits for SAS IO Unit
76 * Page 0 and SAS Expander Page 0.
77 * Removed multiplexing information from SAS IO Unit pages.
78 * Added BootDeviceWaitTime field to SAS IO Unit Page 4.
79 * Removed Zone Address Resolved bit from PhyInfo and from
80 * Expander Page 0 Flags field.
81 * Added two new AccessStatus values to SAS Device Page 0
82 * for indicating routing problems. Added 3 reserved words
83 * to this page.
84 * 01-19-09 02.00.10 Fixed defines for GPIOVal field of IO Unit Page 3.
85 * Inserted missing reserved field into structure for IOC
86 * Page 6.
87 * Added more pending task bits to RAID Volume Page 0
88 * VolumeStatusFlags defines.
89 * Added MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED define.
90 * Added a new DiscoveryStatus bit for SAS IO Unit Page 0
91 * and SAS Expander Page 0 to flag a downstream initiator
92 * when in simplified routing mode.
93 * Removed SATA Init Failure defines for DiscoveryStatus
94 * fields of SAS IO Unit Page 0 and SAS Expander Page 0.
95 * Added MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED define.
96 * Added PortGroups, DmaGroup, and ControlGroup fields to
97 * SAS Device Page 0.
98 * --------------------------------------------------------------------------
99 */
100
101#ifndef MPI2_CNFG_H
102#define MPI2_CNFG_H
103
104/*****************************************************************************
105* Configuration Page Header and defines
106*****************************************************************************/
107
108/* Config Page Header */
109typedef struct _MPI2_CONFIG_PAGE_HEADER
110{
111 U8 PageVersion; /* 0x00 */
112 U8 PageLength; /* 0x01 */
113 U8 PageNumber; /* 0x02 */
114 U8 PageType; /* 0x03 */
115} MPI2_CONFIG_PAGE_HEADER, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_HEADER,
116 Mpi2ConfigPageHeader_t, MPI2_POINTER pMpi2ConfigPageHeader_t;
117
118typedef union _MPI2_CONFIG_PAGE_HEADER_UNION
119{
120 MPI2_CONFIG_PAGE_HEADER Struct;
121 U8 Bytes[4];
122 U16 Word16[2];
123 U32 Word32;
124} MPI2_CONFIG_PAGE_HEADER_UNION, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_HEADER_UNION,
125 Mpi2ConfigPageHeaderUnion, MPI2_POINTER pMpi2ConfigPageHeaderUnion;
126
127/* Extended Config Page Header */
128typedef struct _MPI2_CONFIG_EXTENDED_PAGE_HEADER
129{
130 U8 PageVersion; /* 0x00 */
131 U8 Reserved1; /* 0x01 */
132 U8 PageNumber; /* 0x02 */
133 U8 PageType; /* 0x03 */
134 U16 ExtPageLength; /* 0x04 */
135 U8 ExtPageType; /* 0x06 */
136 U8 Reserved2; /* 0x07 */
137} MPI2_CONFIG_EXTENDED_PAGE_HEADER,
138 MPI2_POINTER PTR_MPI2_CONFIG_EXTENDED_PAGE_HEADER,
139 Mpi2ConfigExtendedPageHeader_t, MPI2_POINTER pMpi2ConfigExtendedPageHeader_t;
140
141typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION
142{
143 MPI2_CONFIG_PAGE_HEADER Struct;
144 MPI2_CONFIG_EXTENDED_PAGE_HEADER Ext;
145 U8 Bytes[8];
146 U16 Word16[4];
147 U32 Word32[2];
148} MPI2_CONFIG_EXT_PAGE_HEADER_UNION, MPI2_POINTER PTR_MPI2_CONFIG_EXT_PAGE_HEADER_UNION,
149 Mpi2ConfigPageExtendedHeaderUnion, MPI2_POINTER pMpi2ConfigPageExtendedHeaderUnion;
150
151
152/* PageType field values */
153#define MPI2_CONFIG_PAGEATTR_READ_ONLY (0x00)
154#define MPI2_CONFIG_PAGEATTR_CHANGEABLE (0x10)
155#define MPI2_CONFIG_PAGEATTR_PERSISTENT (0x20)
156#define MPI2_CONFIG_PAGEATTR_MASK (0xF0)
157
158#define MPI2_CONFIG_PAGETYPE_IO_UNIT (0x00)
159#define MPI2_CONFIG_PAGETYPE_IOC (0x01)
160#define MPI2_CONFIG_PAGETYPE_BIOS (0x02)
161#define MPI2_CONFIG_PAGETYPE_RAID_VOLUME (0x08)
162#define MPI2_CONFIG_PAGETYPE_MANUFACTURING (0x09)
163#define MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK (0x0A)
164#define MPI2_CONFIG_PAGETYPE_EXTENDED (0x0F)
165#define MPI2_CONFIG_PAGETYPE_MASK (0x0F)
166
167#define MPI2_CONFIG_TYPENUM_MASK (0x0FFF)
168
169
170/* ExtPageType field values */
171#define MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT (0x10)
172#define MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER (0x11)
173#define MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE (0x12)
174#define MPI2_CONFIG_EXTPAGETYPE_SAS_PHY (0x13)
175#define MPI2_CONFIG_EXTPAGETYPE_LOG (0x14)
176#define MPI2_CONFIG_EXTPAGETYPE_ENCLOSURE (0x15)
177#define MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG (0x16)
178#define MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING (0x17)
179#define MPI2_CONFIG_EXTPAGETYPE_SAS_PORT (0x18)
180
181
182/*****************************************************************************
183* PageAddress defines
184*****************************************************************************/
185
186/* RAID Volume PageAddress format */
187#define MPI2_RAID_VOLUME_PGAD_FORM_MASK (0xF0000000)
188#define MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
189#define MPI2_RAID_VOLUME_PGAD_FORM_HANDLE (0x10000000)
190
191#define MPI2_RAID_VOLUME_PGAD_HANDLE_MASK (0x0000FFFF)
192
193
194/* RAID Physical Disk PageAddress format */
195#define MPI2_PHYSDISK_PGAD_FORM_MASK (0xF0000000)
196#define MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM (0x00000000)
197#define MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM (0x10000000)
198#define MPI2_PHYSDISK_PGAD_FORM_DEVHANDLE (0x20000000)
199
200#define MPI2_PHYSDISK_PGAD_PHYSDISKNUM_MASK (0x000000FF)
201#define MPI2_PHYSDISK_PGAD_DEVHANDLE_MASK (0x0000FFFF)
202
203
204/* SAS Expander PageAddress format */
205#define MPI2_SAS_EXPAND_PGAD_FORM_MASK (0xF0000000)
206#define MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL (0x00000000)
207#define MPI2_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM (0x10000000)
208#define MPI2_SAS_EXPAND_PGAD_FORM_HNDL (0x20000000)
209
210#define MPI2_SAS_EXPAND_PGAD_HANDLE_MASK (0x0000FFFF)
211#define MPI2_SAS_EXPAND_PGAD_PHYNUM_MASK (0x00FF0000)
212#define MPI2_SAS_EXPAND_PGAD_PHYNUM_SHIFT (16)
213
214
215/* SAS Device PageAddress format */
216#define MPI2_SAS_DEVICE_PGAD_FORM_MASK (0xF0000000)
217#define MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
218#define MPI2_SAS_DEVICE_PGAD_FORM_HANDLE (0x20000000)
219
220#define MPI2_SAS_DEVICE_PGAD_HANDLE_MASK (0x0000FFFF)
221
222
223/* SAS PHY PageAddress format */
224#define MPI2_SAS_PHY_PGAD_FORM_MASK (0xF0000000)
225#define MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER (0x00000000)
226#define MPI2_SAS_PHY_PGAD_FORM_PHY_TBL_INDEX (0x10000000)
227
228#define MPI2_SAS_PHY_PGAD_PHY_NUMBER_MASK (0x000000FF)
229#define MPI2_SAS_PHY_PGAD_PHY_TBL_INDEX_MASK (0x0000FFFF)
230
231
232/* SAS Port PageAddress format */
233#define MPI2_SASPORT_PGAD_FORM_MASK (0xF0000000)
234#define MPI2_SASPORT_PGAD_FORM_GET_NEXT_PORT (0x00000000)
235#define MPI2_SASPORT_PGAD_FORM_PORT_NUM (0x10000000)
236
237#define MPI2_SASPORT_PGAD_PORTNUMBER_MASK (0x00000FFF)
238
239
240/* SAS Enclosure PageAddress format */
241#define MPI2_SAS_ENCLOS_PGAD_FORM_MASK (0xF0000000)
242#define MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
243#define MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE (0x10000000)
244
245#define MPI2_SAS_ENCLOS_PGAD_HANDLE_MASK (0x0000FFFF)
246
247
248/* RAID Configuration PageAddress format */
249#define MPI2_RAID_PGAD_FORM_MASK (0xF0000000)
250#define MPI2_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM (0x00000000)
251#define MPI2_RAID_PGAD_FORM_CONFIGNUM (0x10000000)
252#define MPI2_RAID_PGAD_FORM_ACTIVE_CONFIG (0x20000000)
253
254#define MPI2_RAID_PGAD_CONFIGNUM_MASK (0x000000FF)
255
256
257/* Driver Persistent Mapping PageAddress format */
258#define MPI2_DPM_PGAD_FORM_MASK (0xF0000000)
259#define MPI2_DPM_PGAD_FORM_ENTRY_RANGE (0x00000000)
260
261#define MPI2_DPM_PGAD_ENTRY_COUNT_MASK (0x0FFF0000)
262#define MPI2_DPM_PGAD_ENTRY_COUNT_SHIFT (16)
263#define MPI2_DPM_PGAD_START_ENTRY_MASK (0x0000FFFF)
264
265
266/****************************************************************************
267* Configuration messages
268****************************************************************************/
269
270/* Configuration Request Message */
271typedef struct _MPI2_CONFIG_REQUEST
272{
273 U8 Action; /* 0x00 */
274 U8 SGLFlags; /* 0x01 */
275 U8 ChainOffset; /* 0x02 */
276 U8 Function; /* 0x03 */
277 U16 ExtPageLength; /* 0x04 */
278 U8 ExtPageType; /* 0x06 */
279 U8 MsgFlags; /* 0x07 */
280 U8 VP_ID; /* 0x08 */
281 U8 VF_ID; /* 0x09 */
282 U16 Reserved1; /* 0x0A */
283 U32 Reserved2; /* 0x0C */
284 U32 Reserved3; /* 0x10 */
285 MPI2_CONFIG_PAGE_HEADER Header; /* 0x14 */
286 U32 PageAddress; /* 0x18 */
287 MPI2_SGE_IO_UNION PageBufferSGE; /* 0x1C */
288} MPI2_CONFIG_REQUEST, MPI2_POINTER PTR_MPI2_CONFIG_REQUEST,
289 Mpi2ConfigRequest_t, MPI2_POINTER pMpi2ConfigRequest_t;
290
291/* values for the Action field */
292#define MPI2_CONFIG_ACTION_PAGE_HEADER (0x00)
293#define MPI2_CONFIG_ACTION_PAGE_READ_CURRENT (0x01)
294#define MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT (0x02)
295#define MPI2_CONFIG_ACTION_PAGE_DEFAULT (0x03)
296#define MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM (0x04)
297#define MPI2_CONFIG_ACTION_PAGE_READ_DEFAULT (0x05)
298#define MPI2_CONFIG_ACTION_PAGE_READ_NVRAM (0x06)
299#define MPI2_CONFIG_ACTION_PAGE_GET_CHANGEABLE (0x07)
300
301/* values for SGLFlags field are in the SGL section of mpi2.h */
302
303
304/* Config Reply Message */
305typedef struct _MPI2_CONFIG_REPLY
306{
307 U8 Action; /* 0x00 */
308 U8 SGLFlags; /* 0x01 */
309 U8 MsgLength; /* 0x02 */
310 U8 Function; /* 0x03 */
311 U16 ExtPageLength; /* 0x04 */
312 U8 ExtPageType; /* 0x06 */
313 U8 MsgFlags; /* 0x07 */
314 U8 VP_ID; /* 0x08 */
315 U8 VF_ID; /* 0x09 */
316 U16 Reserved1; /* 0x0A */
317 U16 Reserved2; /* 0x0C */
318 U16 IOCStatus; /* 0x0E */
319 U32 IOCLogInfo; /* 0x10 */
320 MPI2_CONFIG_PAGE_HEADER Header; /* 0x14 */
321} MPI2_CONFIG_REPLY, MPI2_POINTER PTR_MPI2_CONFIG_REPLY,
322 Mpi2ConfigReply_t, MPI2_POINTER pMpi2ConfigReply_t;
323
324
325
326/*****************************************************************************
327*
328* C o n f i g u r a t i o n P a g e s
329*
330*****************************************************************************/
331
332/****************************************************************************
333* Manufacturing Config pages
334****************************************************************************/
335
336#define MPI2_MFGPAGE_VENDORID_LSI (0x1000)
337
338/* SAS */
339#define MPI2_MFGPAGE_DEVID_SAS2004 (0x0070)
340#define MPI2_MFGPAGE_DEVID_SAS2008 (0x0072)
341#define MPI2_MFGPAGE_DEVID_SAS2108_1 (0x0074)
342#define MPI2_MFGPAGE_DEVID_SAS2108_2 (0x0076)
343#define MPI2_MFGPAGE_DEVID_SAS2108_3 (0x0077)
344#define MPI2_MFGPAGE_DEVID_SAS2116_1 (0x0064)
345#define MPI2_MFGPAGE_DEVID_SAS2116_2 (0x0065)
346
347
348/* Manufacturing Page 0 */
349
350typedef struct _MPI2_CONFIG_PAGE_MAN_0
351{
352 MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
353 U8 ChipName[16]; /* 0x04 */
354 U8 ChipRevision[8]; /* 0x14 */
355 U8 BoardName[16]; /* 0x1C */
356 U8 BoardAssembly[16]; /* 0x2C */
357 U8 BoardTracerNumber[16]; /* 0x3C */
358} MPI2_CONFIG_PAGE_MAN_0,
359 MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_0,
360 Mpi2ManufacturingPage0_t, MPI2_POINTER pMpi2ManufacturingPage0_t;
361
362#define MPI2_MANUFACTURING0_PAGEVERSION (0x00)
363
364
365/* Manufacturing Page 1 */
366
367typedef struct _MPI2_CONFIG_PAGE_MAN_1
368{
369 MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
370 U8 VPD[256]; /* 0x04 */
371} MPI2_CONFIG_PAGE_MAN_1,
372 MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_1,
373 Mpi2ManufacturingPage1_t, MPI2_POINTER pMpi2ManufacturingPage1_t;
374
375#define MPI2_MANUFACTURING1_PAGEVERSION (0x00)
376
377
378typedef struct _MPI2_CHIP_REVISION_ID
379{
380 U16 DeviceID; /* 0x00 */
381 U8 PCIRevisionID; /* 0x02 */
382 U8 Reserved; /* 0x03 */
383} MPI2_CHIP_REVISION_ID, MPI2_POINTER PTR_MPI2_CHIP_REVISION_ID,
384 Mpi2ChipRevisionId_t, MPI2_POINTER pMpi2ChipRevisionId_t;
385
386
387/* Manufacturing Page 2 */
388
389/*
390 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
391 * one and check Header.PageLength at runtime.
392 */
393#ifndef MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS
394#define MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS (1)
395#endif
396
397typedef struct _MPI2_CONFIG_PAGE_MAN_2
398{
399 MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
400 MPI2_CHIP_REVISION_ID ChipId; /* 0x04 */
401 U32 HwSettings[MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS];/* 0x08 */
402} MPI2_CONFIG_PAGE_MAN_2,
403 MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_2,
404 Mpi2ManufacturingPage2_t, MPI2_POINTER pMpi2ManufacturingPage2_t;
405
406#define MPI2_MANUFACTURING2_PAGEVERSION (0x00)
407
408
409/* Manufacturing Page 3 */
410
411/*
412 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
413 * one and check Header.PageLength at runtime.
414 */
415#ifndef MPI2_MAN_PAGE_3_INFO_WORDS
416#define MPI2_MAN_PAGE_3_INFO_WORDS (1)
417#endif
418
419typedef struct _MPI2_CONFIG_PAGE_MAN_3
420{
421 MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
422 MPI2_CHIP_REVISION_ID ChipId; /* 0x04 */
423 U32 Info[MPI2_MAN_PAGE_3_INFO_WORDS];/* 0x08 */
424} MPI2_CONFIG_PAGE_MAN_3,
425 MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_3,
426 Mpi2ManufacturingPage3_t, MPI2_POINTER pMpi2ManufacturingPage3_t;
427
428#define MPI2_MANUFACTURING3_PAGEVERSION (0x00)
429
430
431/* Manufacturing Page 4 */
432
433typedef struct _MPI2_MANPAGE4_PWR_SAVE_SETTINGS
434{
435 U8 PowerSaveFlags; /* 0x00 */
436 U8 InternalOperationsSleepTime; /* 0x01 */
437 U8 InternalOperationsRunTime; /* 0x02 */
438 U8 HostIdleTime; /* 0x03 */
439} MPI2_MANPAGE4_PWR_SAVE_SETTINGS,
440 MPI2_POINTER PTR_MPI2_MANPAGE4_PWR_SAVE_SETTINGS,
441 Mpi2ManPage4PwrSaveSettings_t, MPI2_POINTER pMpi2ManPage4PwrSaveSettings_t;
442
443/* defines for the PowerSaveFlags field */
444#define MPI2_MANPAGE4_MASK_POWERSAVE_MODE (0x03)
445#define MPI2_MANPAGE4_POWERSAVE_MODE_DISABLED (0x00)
446#define MPI2_MANPAGE4_CUSTOM_POWERSAVE_MODE (0x01)
447#define MPI2_MANPAGE4_FULL_POWERSAVE_MODE (0x02)
448
449typedef struct _MPI2_CONFIG_PAGE_MAN_4
450{
451 MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
452 U32 Reserved1; /* 0x04 */
453 U32 Flags; /* 0x08 */
454 U8 InquirySize; /* 0x0C */
455 U8 Reserved2; /* 0x0D */
456 U16 Reserved3; /* 0x0E */
457 U8 InquiryData[56]; /* 0x10 */
458 U32 RAID0VolumeSettings; /* 0x48 */
459 U32 RAID1EVolumeSettings; /* 0x4C */
460 U32 RAID1VolumeSettings; /* 0x50 */
461 U32 RAID10VolumeSettings; /* 0x54 */
462 U32 Reserved4; /* 0x58 */
463 U32 Reserved5; /* 0x5C */
464 MPI2_MANPAGE4_PWR_SAVE_SETTINGS PowerSaveSettings; /* 0x60 */
465 U8 MaxOCEDisks; /* 0x64 */
466 U8 ResyncRate; /* 0x65 */
467 U16 DataScrubDuration; /* 0x66 */
468 U8 MaxHotSpares; /* 0x68 */
469 U8 MaxPhysDisksPerVol; /* 0x69 */
470 U8 MaxPhysDisks; /* 0x6A */
471 U8 MaxVolumes; /* 0x6B */
472} MPI2_CONFIG_PAGE_MAN_4,
473 MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_4,
474 Mpi2ManufacturingPage4_t, MPI2_POINTER pMpi2ManufacturingPage4_t;
475
476#define MPI2_MANUFACTURING4_PAGEVERSION (0x0A)
477
478/* Manufacturing Page 4 Flags field */
479#define MPI2_MANPAGE4_METADATA_SIZE_MASK (0x00030000)
480#define MPI2_MANPAGE4_METADATA_512MB (0x00000000)
481
482#define MPI2_MANPAGE4_MIX_SSD_SAS_SATA (0x00008000)
483#define MPI2_MANPAGE4_MIX_SSD_AND_NON_SSD (0x00004000)
484#define MPI2_MANPAGE4_HIDE_PHYSDISK_NON_IR (0x00002000)
485
486#define MPI2_MANPAGE4_MASK_PHYSDISK_COERCION (0x00001C00)
487#define MPI2_MANPAGE4_PHYSDISK_COERCION_1GB (0x00000000)
488#define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION (0x00000400)
489#define MPI2_MANPAGE4_PHYSDISK_ADAPTIVE_COERCION (0x00000800)
490#define MPI2_MANPAGE4_PHYSDISK_ZERO_COERCION (0x00000C00)
491
492#define MPI2_MANPAGE4_MASK_BAD_BLOCK_MARKING (0x00000300)
493#define MPI2_MANPAGE4_DEFAULT_BAD_BLOCK_MARKING (0x00000000)
494#define MPI2_MANPAGE4_TABLE_BAD_BLOCK_MARKING (0x00000100)
495#define MPI2_MANPAGE4_WRITE_LONG_BAD_BLOCK_MARKING (0x00000200)
496
497#define MPI2_MANPAGE4_FORCE_OFFLINE_FAILOVER (0x00000080)
498#define MPI2_MANPAGE4_RAID10_DISABLE (0x00000040)
499#define MPI2_MANPAGE4_RAID1E_DISABLE (0x00000020)
500#define MPI2_MANPAGE4_RAID1_DISABLE (0x00000010)
501#define MPI2_MANPAGE4_RAID0_DISABLE (0x00000008)
502#define MPI2_MANPAGE4_IR_MODEPAGE8_DISABLE (0x00000004)
503#define MPI2_MANPAGE4_IM_RESYNC_CACHE_ENABLE (0x00000002)
504#define MPI2_MANPAGE4_IR_NO_MIX_SAS_SATA (0x00000001)
505
506
507/* Manufacturing Page 5 */
508
509/*
510 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
511 * one and check Header.PageLength or NumPhys at runtime.
512 */
513#ifndef MPI2_MAN_PAGE_5_PHY_ENTRIES
514#define MPI2_MAN_PAGE_5_PHY_ENTRIES (1)
515#endif
516
517typedef struct _MPI2_MANUFACTURING5_ENTRY
518{
519 U64 WWID; /* 0x00 */
520 U64 DeviceName; /* 0x08 */
521} MPI2_MANUFACTURING5_ENTRY, MPI2_POINTER PTR_MPI2_MANUFACTURING5_ENTRY,
522 Mpi2Manufacturing5Entry_t, MPI2_POINTER pMpi2Manufacturing5Entry_t;
523
524typedef struct _MPI2_CONFIG_PAGE_MAN_5
525{
526 MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
527 U8 NumPhys; /* 0x04 */
528 U8 Reserved1; /* 0x05 */
529 U16 Reserved2; /* 0x06 */
530 U32 Reserved3; /* 0x08 */
531 U32 Reserved4; /* 0x0C */
532 MPI2_MANUFACTURING5_ENTRY Phy[MPI2_MAN_PAGE_5_PHY_ENTRIES];/* 0x08 */
533} MPI2_CONFIG_PAGE_MAN_5,
534 MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_5,
535 Mpi2ManufacturingPage5_t, MPI2_POINTER pMpi2ManufacturingPage5_t;
536
537#define MPI2_MANUFACTURING5_PAGEVERSION (0x03)
538
539
540/* Manufacturing Page 6 */
541
542typedef struct _MPI2_CONFIG_PAGE_MAN_6
543{
544 MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
545 U32 ProductSpecificInfo;/* 0x04 */
546} MPI2_CONFIG_PAGE_MAN_6,
547 MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_6,
548 Mpi2ManufacturingPage6_t, MPI2_POINTER pMpi2ManufacturingPage6_t;
549
550#define MPI2_MANUFACTURING6_PAGEVERSION (0x00)
551
552
553/* Manufacturing Page 7 */
554
555typedef struct _MPI2_MANPAGE7_CONNECTOR_INFO
556{
557 U32 Pinout; /* 0x00 */
558 U8 Connector[16]; /* 0x04 */
559 U8 Location; /* 0x14 */
560 U8 Reserved1; /* 0x15 */
561 U16 Slot; /* 0x16 */
562 U32 Reserved2; /* 0x18 */
563} MPI2_MANPAGE7_CONNECTOR_INFO, MPI2_POINTER PTR_MPI2_MANPAGE7_CONNECTOR_INFO,
564 Mpi2ManPage7ConnectorInfo_t, MPI2_POINTER pMpi2ManPage7ConnectorInfo_t;
565
566/* defines for the Pinout field */
567#define MPI2_MANPAGE7_PINOUT_SFF_8484_L4 (0x00080000)
568#define MPI2_MANPAGE7_PINOUT_SFF_8484_L3 (0x00040000)
569#define MPI2_MANPAGE7_PINOUT_SFF_8484_L2 (0x00020000)
570#define MPI2_MANPAGE7_PINOUT_SFF_8484_L1 (0x00010000)
571#define MPI2_MANPAGE7_PINOUT_SFF_8470_L4 (0x00000800)
572#define MPI2_MANPAGE7_PINOUT_SFF_8470_L3 (0x00000400)
573#define MPI2_MANPAGE7_PINOUT_SFF_8470_L2 (0x00000200)
574#define MPI2_MANPAGE7_PINOUT_SFF_8470_L1 (0x00000100)
575#define MPI2_MANPAGE7_PINOUT_SFF_8482 (0x00000002)
576#define MPI2_MANPAGE7_PINOUT_CONNECTION_UNKNOWN (0x00000001)
577
578/* defines for the Location field */
579#define MPI2_MANPAGE7_LOCATION_UNKNOWN (0x01)
580#define MPI2_MANPAGE7_LOCATION_INTERNAL (0x02)
581#define MPI2_MANPAGE7_LOCATION_EXTERNAL (0x04)
582#define MPI2_MANPAGE7_LOCATION_SWITCHABLE (0x08)
583#define MPI2_MANPAGE7_LOCATION_AUTO (0x10)
584#define MPI2_MANPAGE7_LOCATION_NOT_PRESENT (0x20)
585#define MPI2_MANPAGE7_LOCATION_NOT_CONNECTED (0x80)
586
587/*
588 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
589 * one and check NumPhys at runtime.
590 */
591#ifndef MPI2_MANPAGE7_CONNECTOR_INFO_MAX
592#define MPI2_MANPAGE7_CONNECTOR_INFO_MAX (1)
593#endif
594
595typedef struct _MPI2_CONFIG_PAGE_MAN_7
596{
597 MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
598 U32 Reserved1; /* 0x04 */
599 U32 Reserved2; /* 0x08 */
600 U32 Flags; /* 0x0C */
601 U8 EnclosureName[16]; /* 0x10 */
602 U8 NumPhys; /* 0x20 */
603 U8 Reserved3; /* 0x21 */
604 U16 Reserved4; /* 0x22 */
605 MPI2_MANPAGE7_CONNECTOR_INFO ConnectorInfo[MPI2_MANPAGE7_CONNECTOR_INFO_MAX]; /* 0x24 */
606} MPI2_CONFIG_PAGE_MAN_7,
607 MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_7,
608 Mpi2ManufacturingPage7_t, MPI2_POINTER pMpi2ManufacturingPage7_t;
609
610#define MPI2_MANUFACTURING7_PAGEVERSION (0x00)
611
612/* defines for the Flags field */
613#define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001)
614
615
616/*
617 * Generic structure to use for product-specific manufacturing pages
618 * (currently Manufacturing Page 8 through Manufacturing Page 31).
619 */
620
621typedef struct _MPI2_CONFIG_PAGE_MAN_PS
622{
623 MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
624 U32 ProductSpecificInfo;/* 0x04 */
625} MPI2_CONFIG_PAGE_MAN_PS,
626 MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_PS,
627 Mpi2ManufacturingPagePS_t, MPI2_POINTER pMpi2ManufacturingPagePS_t;
628
629#define MPI2_MANUFACTURING8_PAGEVERSION (0x00)
630#define MPI2_MANUFACTURING9_PAGEVERSION (0x00)
631#define MPI2_MANUFACTURING10_PAGEVERSION (0x00)
632#define MPI2_MANUFACTURING11_PAGEVERSION (0x00)
633#define MPI2_MANUFACTURING12_PAGEVERSION (0x00)
634#define MPI2_MANUFACTURING13_PAGEVERSION (0x00)
635#define MPI2_MANUFACTURING14_PAGEVERSION (0x00)
636#define MPI2_MANUFACTURING15_PAGEVERSION (0x00)
637#define MPI2_MANUFACTURING16_PAGEVERSION (0x00)
638#define MPI2_MANUFACTURING17_PAGEVERSION (0x00)
639#define MPI2_MANUFACTURING18_PAGEVERSION (0x00)
640#define MPI2_MANUFACTURING19_PAGEVERSION (0x00)
641#define MPI2_MANUFACTURING20_PAGEVERSION (0x00)
642#define MPI2_MANUFACTURING21_PAGEVERSION (0x00)
643#define MPI2_MANUFACTURING22_PAGEVERSION (0x00)
644#define MPI2_MANUFACTURING23_PAGEVERSION (0x00)
645#define MPI2_MANUFACTURING24_PAGEVERSION (0x00)
646#define MPI2_MANUFACTURING25_PAGEVERSION (0x00)
647#define MPI2_MANUFACTURING26_PAGEVERSION (0x00)
648#define MPI2_MANUFACTURING27_PAGEVERSION (0x00)
649#define MPI2_MANUFACTURING28_PAGEVERSION (0x00)
650#define MPI2_MANUFACTURING29_PAGEVERSION (0x00)
651#define MPI2_MANUFACTURING30_PAGEVERSION (0x00)
652#define MPI2_MANUFACTURING31_PAGEVERSION (0x00)
653
654
655/****************************************************************************
656* IO Unit Config Pages
657****************************************************************************/
658
659/* IO Unit Page 0 */
660
661typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_0
662{
663 MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
664 U64 UniqueValue; /* 0x04 */
665 MPI2_VERSION_UNION NvdataVersionDefault; /* 0x08 */
666 MPI2_VERSION_UNION NvdataVersionPersistent; /* 0x0A */
667} MPI2_CONFIG_PAGE_IO_UNIT_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_0,
668 Mpi2IOUnitPage0_t, MPI2_POINTER pMpi2IOUnitPage0_t;
669
670#define MPI2_IOUNITPAGE0_PAGEVERSION (0x02)
671
672
673/* IO Unit Page 1 */
674
675typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1
676{
677 MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
678 U32 Flags; /* 0x04 */
679} MPI2_CONFIG_PAGE_IO_UNIT_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_1,
680 Mpi2IOUnitPage1_t, MPI2_POINTER pMpi2IOUnitPage1_t;
681
682#define MPI2_IOUNITPAGE1_PAGEVERSION (0x04)
683
684/* IO Unit Page 1 Flags defines */
685#define MPI2_IOUNITPAGE1_MASK_SATA_WRITE_CACHE (0x00000600)
686#define MPI2_IOUNITPAGE1_ENABLE_SATA_WRITE_CACHE (0x00000000)
687#define MPI2_IOUNITPAGE1_DISABLE_SATA_WRITE_CACHE (0x00000200)
688#define MPI2_IOUNITPAGE1_UNCHANGED_SATA_WRITE_CACHE (0x00000400)
689#define MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE (0x00000100)
690#define MPI2_IOUNITPAGE1_DISABLE_IR (0x00000040)
691#define MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING (0x00000020)
692#define MPI2_IOUNITPAGE1_IR_USE_STATIC_VOLUME_ID (0x00000004)
693#define MPI2_IOUNITPAGE1_MULTI_PATHING (0x00000002)
694#define MPI2_IOUNITPAGE1_SINGLE_PATHING (0x00000000)
695
696
697/* IO Unit Page 3 */
698
699/*
700 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
701 * one and check Header.PageLength at runtime.
702 */
703#ifndef MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX
704#define MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX (1)
705#endif
706
707typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_3
708{
709 MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
710 U8 GPIOCount; /* 0x04 */
711 U8 Reserved1; /* 0x05 */
712 U16 Reserved2; /* 0x06 */
713 U16 GPIOVal[MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX];/* 0x08 */
714} MPI2_CONFIG_PAGE_IO_UNIT_3, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_3,
715 Mpi2IOUnitPage3_t, MPI2_POINTER pMpi2IOUnitPage3_t;
716
717#define MPI2_IOUNITPAGE3_PAGEVERSION (0x01)
718
719/* defines for IO Unit Page 3 GPIOVal field */
720#define MPI2_IOUNITPAGE3_GPIO_FUNCTION_MASK (0xFFFC)
721#define MPI2_IOUNITPAGE3_GPIO_FUNCTION_SHIFT (2)
722#define MPI2_IOUNITPAGE3_GPIO_SETTING_OFF (0x0000)
723#define MPI2_IOUNITPAGE3_GPIO_SETTING_ON (0x0001)
724
725
726/****************************************************************************
727* IOC Config Pages
728****************************************************************************/
729
730/* IOC Page 0 */
731
732typedef struct _MPI2_CONFIG_PAGE_IOC_0
733{
734 MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
735 U32 Reserved1; /* 0x04 */
736 U32 Reserved2; /* 0x08 */
737 U16 VendorID; /* 0x0C */
738 U16 DeviceID; /* 0x0E */
739 U8 RevisionID; /* 0x10 */
740 U8 Reserved3; /* 0x11 */
741 U16 Reserved4; /* 0x12 */
742 U32 ClassCode; /* 0x14 */
743 U16 SubsystemVendorID; /* 0x18 */
744 U16 SubsystemID; /* 0x1A */
745} MPI2_CONFIG_PAGE_IOC_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_0,
746 Mpi2IOCPage0_t, MPI2_POINTER pMpi2IOCPage0_t;
747
748#define MPI2_IOCPAGE0_PAGEVERSION (0x02)
749
750
751/* IOC Page 1 */
752
753typedef struct _MPI2_CONFIG_PAGE_IOC_1
754{
755 MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
756 U32 Flags; /* 0x04 */
757 U32 CoalescingTimeout; /* 0x08 */
758 U8 CoalescingDepth; /* 0x0C */
759 U8 PCISlotNum; /* 0x0D */
760 U8 PCIBusNum; /* 0x0E */
761 U8 PCIDomainSegment; /* 0x0F */
762 U32 Reserved1; /* 0x10 */
763 U32 Reserved2; /* 0x14 */
764} MPI2_CONFIG_PAGE_IOC_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_1,
765 Mpi2IOCPage1_t, MPI2_POINTER pMpi2IOCPage1_t;
766
767#define MPI2_IOCPAGE1_PAGEVERSION (0x05)
768
769/* defines for IOC Page 1 Flags field */
770#define MPI2_IOCPAGE1_REPLY_COALESCING (0x00000001)
771
772#define MPI2_IOCPAGE1_PCISLOTNUM_UNKNOWN (0xFF)
773#define MPI2_IOCPAGE1_PCIBUSNUM_UNKNOWN (0xFF)
774#define MPI2_IOCPAGE1_PCIDOMAIN_UNKNOWN (0xFF)
775
776/* IOC Page 6 */
777
778typedef struct _MPI2_CONFIG_PAGE_IOC_6
779{
780 MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
781 U32 CapabilitiesFlags; /* 0x04 */
782 U8 MaxDrivesRAID0; /* 0x08 */
783 U8 MaxDrivesRAID1; /* 0x09 */
784 U8 MaxDrivesRAID1E; /* 0x0A */
785 U8 MaxDrivesRAID10; /* 0x0B */
786 U8 MinDrivesRAID0; /* 0x0C */
787 U8 MinDrivesRAID1; /* 0x0D */
788 U8 MinDrivesRAID1E; /* 0x0E */
789 U8 MinDrivesRAID10; /* 0x0F */
790 U32 Reserved1; /* 0x10 */
791 U8 MaxGlobalHotSpares; /* 0x14 */
792 U8 MaxPhysDisks; /* 0x15 */
793 U8 MaxVolumes; /* 0x16 */
794 U8 MaxConfigs; /* 0x17 */
795 U8 MaxOCEDisks; /* 0x18 */
796 U8 Reserved2; /* 0x19 */
797 U16 Reserved3; /* 0x1A */
798 U32 SupportedStripeSizeMapRAID0; /* 0x1C */
799 U32 SupportedStripeSizeMapRAID1E; /* 0x20 */
800 U32 SupportedStripeSizeMapRAID10; /* 0x24 */
801 U32 Reserved4; /* 0x28 */
802 U32 Reserved5; /* 0x2C */
803 U16 DefaultMetadataSize; /* 0x30 */
804 U16 Reserved6; /* 0x32 */
805 U16 MaxBadBlockTableEntries; /* 0x34 */
806 U16 Reserved7; /* 0x36 */
807 U32 IRNvsramVersion; /* 0x38 */
808} MPI2_CONFIG_PAGE_IOC_6, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_6,
809 Mpi2IOCPage6_t, MPI2_POINTER pMpi2IOCPage6_t;
810
811#define MPI2_IOCPAGE6_PAGEVERSION (0x04)
812
813/* defines for IOC Page 6 CapabilitiesFlags */
814#define MPI2_IOCPAGE6_CAP_FLAGS_RAID10_SUPPORT (0x00000010)
815#define MPI2_IOCPAGE6_CAP_FLAGS_RAID1_SUPPORT (0x00000008)
816#define MPI2_IOCPAGE6_CAP_FLAGS_RAID1E_SUPPORT (0x00000004)
817#define MPI2_IOCPAGE6_CAP_FLAGS_RAID0_SUPPORT (0x00000002)
818#define MPI2_IOCPAGE6_CAP_FLAGS_GLOBAL_HOT_SPARE (0x00000001)
819
820
821/* IOC Page 7 */
822
823#define MPI2_IOCPAGE7_EVENTMASK_WORDS (4)
824
825typedef struct _MPI2_CONFIG_PAGE_IOC_7
826{
827 MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
828 U32 Reserved1; /* 0x04 */
829 U32 EventMasks[MPI2_IOCPAGE7_EVENTMASK_WORDS];/* 0x08 */
830 U16 SASBroadcastPrimitiveMasks; /* 0x18 */
831 U16 Reserved2; /* 0x1A */
832 U32 Reserved3; /* 0x1C */
833} MPI2_CONFIG_PAGE_IOC_7, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_7,
834 Mpi2IOCPage7_t, MPI2_POINTER pMpi2IOCPage7_t;
835
836#define MPI2_IOCPAGE7_PAGEVERSION (0x01)
837
838
839/* IOC Page 8 */
840
841typedef struct _MPI2_CONFIG_PAGE_IOC_8
842{
843 MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
844 U8 NumDevsPerEnclosure; /* 0x04 */
845 U8 Reserved1; /* 0x05 */
846 U16 Reserved2; /* 0x06 */
847 U16 MaxPersistentEntries; /* 0x08 */
848 U16 MaxNumPhysicalMappedIDs; /* 0x0A */
849 U16 Flags; /* 0x0C */
850 U16 Reserved3; /* 0x0E */
851 U16 IRVolumeMappingFlags; /* 0x10 */
852 U16 Reserved4; /* 0x12 */
853 U32 Reserved5; /* 0x14 */
854} MPI2_CONFIG_PAGE_IOC_8, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_8,
855 Mpi2IOCPage8_t, MPI2_POINTER pMpi2IOCPage8_t;
856
857#define MPI2_IOCPAGE8_PAGEVERSION (0x00)
858
859/* defines for IOC Page 8 Flags field */
860#define MPI2_IOCPAGE8_FLAGS_DA_START_SLOT_1 (0x00000020)
861#define MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0 (0x00000010)
862
863#define MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE (0x0000000E)
864#define MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING (0x00000000)
865#define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING (0x00000002)
866
867#define MPI2_IOCPAGE8_FLAGS_DISABLE_PERSISTENT_MAPPING (0x00000001)
868#define MPI2_IOCPAGE8_FLAGS_ENABLE_PERSISTENT_MAPPING (0x00000000)
869
870/* defines for IOC Page 8 IRVolumeMappingFlags */
871#define MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE (0x00000003)
872#define MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING (0x00000000)
873#define MPI2_IOCPAGE8_IRFLAGS_HIGH_VOLUME_MAPPING (0x00000001)
874
875
876/****************************************************************************
877* BIOS Config Pages
878****************************************************************************/
879
880/* BIOS Page 1 */
881
882typedef struct _MPI2_CONFIG_PAGE_BIOS_1
883{
884 MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
885 U32 BiosOptions; /* 0x04 */
886 U32 IOCSettings; /* 0x08 */
887 U32 Reserved1; /* 0x0C */
888 U32 DeviceSettings; /* 0x10 */
889 U16 NumberOfDevices; /* 0x14 */
890 U16 Reserved2; /* 0x16 */
891 U16 IOTimeoutBlockDevicesNonRM; /* 0x18 */
892 U16 IOTimeoutSequential; /* 0x1A */
893 U16 IOTimeoutOther; /* 0x1C */
894 U16 IOTimeoutBlockDevicesRM; /* 0x1E */
895} MPI2_CONFIG_PAGE_BIOS_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_BIOS_1,
896 Mpi2BiosPage1_t, MPI2_POINTER pMpi2BiosPage1_t;
897
898#define MPI2_BIOSPAGE1_PAGEVERSION (0x04)
899
900/* values for BIOS Page 1 BiosOptions field */
901#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_BIOS (0x00000001)
902
903/* values for BIOS Page 1 IOCSettings field */
904#define MPI2_BIOSPAGE1_IOCSET_MASK_BOOT_PREFERENCE (0x00030000)
905#define MPI2_BIOSPAGE1_IOCSET_ENCLOSURE_SLOT_BOOT (0x00000000)
906#define MPI2_BIOSPAGE1_IOCSET_SAS_ADDRESS_BOOT (0x00010000)
907
908#define MPI2_BIOSPAGE1_IOCSET_MASK_RM_SETTING (0x000000C0)
909#define MPI2_BIOSPAGE1_IOCSET_NONE_RM_SETTING (0x00000000)
910#define MPI2_BIOSPAGE1_IOCSET_BOOT_RM_SETTING (0x00000040)
911#define MPI2_BIOSPAGE1_IOCSET_MEDIA_RM_SETTING (0x00000080)
912
913#define MPI2_BIOSPAGE1_IOCSET_MASK_ADAPTER_SUPPORT (0x00000030)
914#define MPI2_BIOSPAGE1_IOCSET_NO_SUPPORT (0x00000000)
915#define MPI2_BIOSPAGE1_IOCSET_BIOS_SUPPORT (0x00000010)
916#define MPI2_BIOSPAGE1_IOCSET_OS_SUPPORT (0x00000020)
917#define MPI2_BIOSPAGE1_IOCSET_ALL_SUPPORT (0x00000030)
918
919#define MPI2_BIOSPAGE1_IOCSET_ALTERNATE_CHS (0x00000008)
920
921/* values for BIOS Page 1 DeviceSettings field */
922#define MPI2_BIOSPAGE1_DEVSET_DISABLE_SMART_POLLING (0x00000010)
923#define MPI2_BIOSPAGE1_DEVSET_DISABLE_SEQ_LUN (0x00000008)
924#define MPI2_BIOSPAGE1_DEVSET_DISABLE_RM_LUN (0x00000004)
925#define MPI2_BIOSPAGE1_DEVSET_DISABLE_NON_RM_LUN (0x00000002)
926#define MPI2_BIOSPAGE1_DEVSET_DISABLE_OTHER_LUN (0x00000001)
927
928
929/* BIOS Page 2 */
930
931typedef struct _MPI2_BOOT_DEVICE_ADAPTER_ORDER
932{
933 U32 Reserved1; /* 0x00 */
934 U32 Reserved2; /* 0x04 */
935 U32 Reserved3; /* 0x08 */
936 U32 Reserved4; /* 0x0C */
937 U32 Reserved5; /* 0x10 */
938 U32 Reserved6; /* 0x14 */
939} MPI2_BOOT_DEVICE_ADAPTER_ORDER,
940 MPI2_POINTER PTR_MPI2_BOOT_DEVICE_ADAPTER_ORDER,
941 Mpi2BootDeviceAdapterOrder_t, MPI2_POINTER pMpi2BootDeviceAdapterOrder_t;
942
943typedef struct _MPI2_BOOT_DEVICE_SAS_WWID
944{
945 U64 SASAddress; /* 0x00 */
946 U8 LUN[8]; /* 0x08 */
947 U32 Reserved1; /* 0x10 */
948 U32 Reserved2; /* 0x14 */
949} MPI2_BOOT_DEVICE_SAS_WWID, MPI2_POINTER PTR_MPI2_BOOT_DEVICE_SAS_WWID,
950 Mpi2BootDeviceSasWwid_t, MPI2_POINTER pMpi2BootDeviceSasWwid_t;
951
952typedef struct _MPI2_BOOT_DEVICE_ENCLOSURE_SLOT
953{
954 U64 EnclosureLogicalID; /* 0x00 */
955 U32 Reserved1; /* 0x08 */
956 U32 Reserved2; /* 0x0C */
957 U16 SlotNumber; /* 0x10 */
958 U16 Reserved3; /* 0x12 */
959 U32 Reserved4; /* 0x14 */
960} MPI2_BOOT_DEVICE_ENCLOSURE_SLOT,
961 MPI2_POINTER PTR_MPI2_BOOT_DEVICE_ENCLOSURE_SLOT,
962 Mpi2BootDeviceEnclosureSlot_t, MPI2_POINTER pMpi2BootDeviceEnclosureSlot_t;
963
964typedef struct _MPI2_BOOT_DEVICE_DEVICE_NAME
965{
966 U64 DeviceName; /* 0x00 */
967 U8 LUN[8]; /* 0x08 */
968 U32 Reserved1; /* 0x10 */
969 U32 Reserved2; /* 0x14 */
970} MPI2_BOOT_DEVICE_DEVICE_NAME, MPI2_POINTER PTR_MPI2_BOOT_DEVICE_DEVICE_NAME,
971 Mpi2BootDeviceDeviceName_t, MPI2_POINTER pMpi2BootDeviceDeviceName_t;
972
973typedef union _MPI2_MPI2_BIOSPAGE2_BOOT_DEVICE
974{
975 MPI2_BOOT_DEVICE_ADAPTER_ORDER AdapterOrder;
976 MPI2_BOOT_DEVICE_SAS_WWID SasWwid;
977 MPI2_BOOT_DEVICE_ENCLOSURE_SLOT EnclosureSlot;
978 MPI2_BOOT_DEVICE_DEVICE_NAME DeviceName;
979} MPI2_BIOSPAGE2_BOOT_DEVICE, MPI2_POINTER PTR_MPI2_BIOSPAGE2_BOOT_DEVICE,
980 Mpi2BiosPage2BootDevice_t, MPI2_POINTER pMpi2BiosPage2BootDevice_t;
981
982typedef struct _MPI2_CONFIG_PAGE_BIOS_2
983{
984 MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
985 U32 Reserved1; /* 0x04 */
986 U32 Reserved2; /* 0x08 */
987 U32 Reserved3; /* 0x0C */
988 U32 Reserved4; /* 0x10 */
989 U32 Reserved5; /* 0x14 */
990 U32 Reserved6; /* 0x18 */
991 U8 ReqBootDeviceForm; /* 0x1C */
992 U8 Reserved7; /* 0x1D */
993 U16 Reserved8; /* 0x1E */
994 MPI2_BIOSPAGE2_BOOT_DEVICE RequestedBootDevice; /* 0x20 */
995 U8 ReqAltBootDeviceForm; /* 0x38 */
996 U8 Reserved9; /* 0x39 */
997 U16 Reserved10; /* 0x3A */
998 MPI2_BIOSPAGE2_BOOT_DEVICE RequestedAltBootDevice; /* 0x3C */
999 U8 CurrentBootDeviceForm; /* 0x58 */
1000 U8 Reserved11; /* 0x59 */
1001 U16 Reserved12; /* 0x5A */
1002 MPI2_BIOSPAGE2_BOOT_DEVICE CurrentBootDevice; /* 0x58 */
1003} MPI2_CONFIG_PAGE_BIOS_2, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_BIOS_2,
1004 Mpi2BiosPage2_t, MPI2_POINTER pMpi2BiosPage2_t;
1005
1006#define MPI2_BIOSPAGE2_PAGEVERSION (0x04)
1007
1008/* values for BIOS Page 2 BootDeviceForm fields */
1009#define MPI2_BIOSPAGE2_FORM_MASK (0x0F)
1010#define MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED (0x00)
1011#define MPI2_BIOSPAGE2_FORM_SAS_WWID (0x05)
1012#define MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT (0x06)
1013#define MPI2_BIOSPAGE2_FORM_DEVICE_NAME (0x07)
1014
1015
1016/* BIOS Page 3 */
1017
1018typedef struct _MPI2_ADAPTER_INFO
1019{
1020 U8 PciBusNumber; /* 0x00 */
1021 U8 PciDeviceAndFunctionNumber; /* 0x01 */
1022 U16 AdapterFlags; /* 0x02 */
1023} MPI2_ADAPTER_INFO, MPI2_POINTER PTR_MPI2_ADAPTER_INFO,
1024 Mpi2AdapterInfo_t, MPI2_POINTER pMpi2AdapterInfo_t;
1025
1026#define MPI2_ADAPTER_INFO_FLAGS_EMBEDDED (0x0001)
1027#define MPI2_ADAPTER_INFO_FLAGS_INIT_STATUS (0x0002)
1028
1029typedef struct _MPI2_CONFIG_PAGE_BIOS_3
1030{
1031 MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
1032 U32 GlobalFlags; /* 0x04 */
1033 U32 BiosVersion; /* 0x08 */
1034 MPI2_ADAPTER_INFO AdapterOrder[4]; /* 0x0C */
1035 U32 Reserved1; /* 0x1C */
1036} MPI2_CONFIG_PAGE_BIOS_3, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_BIOS_3,
1037 Mpi2BiosPage3_t, MPI2_POINTER pMpi2BiosPage3_t;
1038
1039#define MPI2_BIOSPAGE3_PAGEVERSION (0x00)
1040
1041/* values for BIOS Page 3 GlobalFlags */
1042#define MPI2_BIOSPAGE3_FLAGS_PAUSE_ON_ERROR (0x00000002)
1043#define MPI2_BIOSPAGE3_FLAGS_VERBOSE_ENABLE (0x00000004)
1044#define MPI2_BIOSPAGE3_FLAGS_HOOK_INT_40_DISABLE (0x00000010)
1045
1046#define MPI2_BIOSPAGE3_FLAGS_DEV_LIST_DISPLAY_MASK (0x000000E0)
1047#define MPI2_BIOSPAGE3_FLAGS_INSTALLED_DEV_DISPLAY (0x00000000)
1048#define MPI2_BIOSPAGE3_FLAGS_ADAPTER_DISPLAY (0x00000020)
1049#define MPI2_BIOSPAGE3_FLAGS_ADAPTER_DEV_DISPLAY (0x00000040)
1050
1051
1052/* BIOS Page 4 */
1053
1054/*
1055 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
1056 * one and check Header.PageLength or NumPhys at runtime.
1057 */
1058#ifndef MPI2_BIOS_PAGE_4_PHY_ENTRIES
1059#define MPI2_BIOS_PAGE_4_PHY_ENTRIES (1)
1060#endif
1061
1062typedef struct _MPI2_BIOS4_ENTRY
1063{
1064 U64 ReassignmentWWID; /* 0x00 */
1065 U64 ReassignmentDeviceName; /* 0x08 */
1066} MPI2_BIOS4_ENTRY, MPI2_POINTER PTR_MPI2_BIOS4_ENTRY,
1067 Mpi2MBios4Entry_t, MPI2_POINTER pMpi2Bios4Entry_t;
1068
1069typedef struct _MPI2_CONFIG_PAGE_BIOS_4
1070{
1071 MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
1072 U8 NumPhys; /* 0x04 */
1073 U8 Reserved1; /* 0x05 */
1074 U16 Reserved2; /* 0x06 */
1075 MPI2_BIOS4_ENTRY Phy[MPI2_BIOS_PAGE_4_PHY_ENTRIES]; /* 0x08 */
1076} MPI2_CONFIG_PAGE_BIOS_4, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_BIOS_4,
1077 Mpi2BiosPage4_t, MPI2_POINTER pMpi2BiosPage4_t;
1078
1079#define MPI2_BIOSPAGE4_PAGEVERSION (0x01)
1080
1081
1082/****************************************************************************
1083* RAID Volume Config Pages
1084****************************************************************************/
1085
1086/* RAID Volume Page 0 */
1087
1088typedef struct _MPI2_RAIDVOL0_PHYS_DISK
1089{
1090 U8 RAIDSetNum; /* 0x00 */
1091 U8 PhysDiskMap; /* 0x01 */
1092 U8 PhysDiskNum; /* 0x02 */
1093 U8 Reserved; /* 0x03 */
1094} MPI2_RAIDVOL0_PHYS_DISK, MPI2_POINTER PTR_MPI2_RAIDVOL0_PHYS_DISK,
1095 Mpi2RaidVol0PhysDisk_t, MPI2_POINTER pMpi2RaidVol0PhysDisk_t;
1096
1097/* defines for the PhysDiskMap field */
1098#define MPI2_RAIDVOL0_PHYSDISK_PRIMARY (0x01)
1099#define MPI2_RAIDVOL0_PHYSDISK_SECONDARY (0x02)
1100
1101typedef struct _MPI2_RAIDVOL0_SETTINGS
1102{
1103 U16 Settings; /* 0x00 */
1104 U8 HotSparePool; /* 0x01 */
1105 U8 Reserved; /* 0x02 */
1106} MPI2_RAIDVOL0_SETTINGS, MPI2_POINTER PTR_MPI2_RAIDVOL0_SETTINGS,
1107 Mpi2RaidVol0Settings_t, MPI2_POINTER pMpi2RaidVol0Settings_t;
1108
1109/* RAID Volume Page 0 HotSparePool defines, also used in RAID Physical Disk */
1110#define MPI2_RAID_HOT_SPARE_POOL_0 (0x01)
1111#define MPI2_RAID_HOT_SPARE_POOL_1 (0x02)
1112#define MPI2_RAID_HOT_SPARE_POOL_2 (0x04)
1113#define MPI2_RAID_HOT_SPARE_POOL_3 (0x08)
1114#define MPI2_RAID_HOT_SPARE_POOL_4 (0x10)
1115#define MPI2_RAID_HOT_SPARE_POOL_5 (0x20)
1116#define MPI2_RAID_HOT_SPARE_POOL_6 (0x40)
1117#define MPI2_RAID_HOT_SPARE_POOL_7 (0x80)
1118
1119/* RAID Volume Page 0 VolumeSettings defines */
1120#define MPI2_RAIDVOL0_SETTING_USE_PRODUCT_ID_SUFFIX (0x0008)
1121#define MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE (0x0004)
1122
1123#define MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING (0x0003)
1124#define MPI2_RAIDVOL0_SETTING_UNCHANGED (0x0000)
1125#define MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING (0x0001)
1126#define MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING (0x0002)
1127
1128/*
1129 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
1130 * one and check Header.PageLength at runtime.
1131 */
1132#ifndef MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX
1133#define MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX (1)
1134#endif
1135
1136typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_0
1137{
1138 MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
1139 U16 DevHandle; /* 0x04 */
1140 U8 VolumeState; /* 0x06 */
1141 U8 VolumeType; /* 0x07 */
1142 U32 VolumeStatusFlags; /* 0x08 */
1143 MPI2_RAIDVOL0_SETTINGS VolumeSettings; /* 0x0C */
1144 U64 MaxLBA; /* 0x10 */
1145 U32 StripeSize; /* 0x18 */
1146 U16 BlockSize; /* 0x1C */
1147 U16 Reserved1; /* 0x1E */
1148 U8 SupportedPhysDisks; /* 0x20 */
1149 U8 ResyncRate; /* 0x21 */
1150 U16 DataScrubDuration; /* 0x22 */
1151 U8 NumPhysDisks; /* 0x24 */
1152 U8 Reserved2; /* 0x25 */
1153 U8 Reserved3; /* 0x26 */
1154 U8 InactiveStatus; /* 0x27 */
1155 MPI2_RAIDVOL0_PHYS_DISK PhysDisk[MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX]; /* 0x28 */
1156} MPI2_CONFIG_PAGE_RAID_VOL_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_RAID_VOL_0,
1157 Mpi2RaidVolPage0_t, MPI2_POINTER pMpi2RaidVolPage0_t;
1158
1159#define MPI2_RAIDVOLPAGE0_PAGEVERSION (0x0A)
1160
1161/* values for RAID VolumeState */
1162#define MPI2_RAID_VOL_STATE_MISSING (0x00)
1163#define MPI2_RAID_VOL_STATE_FAILED (0x01)
1164#define MPI2_RAID_VOL_STATE_INITIALIZING (0x02)
1165#define MPI2_RAID_VOL_STATE_ONLINE (0x03)
1166#define MPI2_RAID_VOL_STATE_DEGRADED (0x04)
1167#define MPI2_RAID_VOL_STATE_OPTIMAL (0x05)
1168
1169/* values for RAID VolumeType */
1170#define MPI2_RAID_VOL_TYPE_RAID0 (0x00)
1171#define MPI2_RAID_VOL_TYPE_RAID1E (0x01)
1172#define MPI2_RAID_VOL_TYPE_RAID1 (0x02)
1173#define MPI2_RAID_VOL_TYPE_RAID10 (0x05)
1174#define MPI2_RAID_VOL_TYPE_UNKNOWN (0xFF)
1175
1176/* values for RAID Volume Page 0 VolumeStatusFlags field */
1177#define MPI2_RAIDVOL0_STATUS_FLAG_PENDING_RESYNC (0x02000000)
1178#define MPI2_RAIDVOL0_STATUS_FLAG_BACKG_INIT_PENDING (0x01000000)
1179#define MPI2_RAIDVOL0_STATUS_FLAG_MDC_PENDING (0x00800000)
1180#define MPI2_RAIDVOL0_STATUS_FLAG_USER_CONSIST_PENDING (0x00400000)
1181#define MPI2_RAIDVOL0_STATUS_FLAG_MAKE_DATA_CONSISTENT (0x00200000)
1182#define MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB (0x00100000)
1183#define MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK (0x00080000)
1184#define MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION (0x00040000)
1185#define MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT (0x00020000)
1186#define MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS (0x00010000)
1187#define MPI2_RAIDVOL0_STATUS_FLAG_OCE_ALLOWED (0x00000040)
1188#define MPI2_RAIDVOL0_STATUS_FLAG_BGI_COMPLETE (0x00000020)
1189#define MPI2_RAIDVOL0_STATUS_FLAG_1E_OFFSET_MIRROR (0x00000000)
1190#define MPI2_RAIDVOL0_STATUS_FLAG_1E_ADJACENT_MIRROR (0x00000010)
1191#define MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL (0x00000008)
1192#define MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE (0x00000004)
1193#define MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED (0x00000002)
1194#define MPI2_RAIDVOL0_STATUS_FLAG_ENABLED (0x00000001)
1195
1196/* values for RAID Volume Page 0 SupportedPhysDisks field */
1197#define MPI2_RAIDVOL0_SUPPORT_SOLID_STATE_DISKS (0x08)
1198#define MPI2_RAIDVOL0_SUPPORT_HARD_DISKS (0x04)
1199#define MPI2_RAIDVOL0_SUPPORT_SAS_PROTOCOL (0x02)
1200#define MPI2_RAIDVOL0_SUPPORT_SATA_PROTOCOL (0x01)
1201
1202/* values for RAID Volume Page 0 InactiveStatus field */
1203#define MPI2_RAIDVOLPAGE0_UNKNOWN_INACTIVE (0x00)
1204#define MPI2_RAIDVOLPAGE0_STALE_METADATA_INACTIVE (0x01)
1205#define MPI2_RAIDVOLPAGE0_FOREIGN_VOLUME_INACTIVE (0x02)
1206#define MPI2_RAIDVOLPAGE0_INSUFFICIENT_RESOURCE_INACTIVE (0x03)
1207#define MPI2_RAIDVOLPAGE0_CLONE_VOLUME_INACTIVE (0x04)
1208#define MPI2_RAIDVOLPAGE0_INSUFFICIENT_METADATA_INACTIVE (0x05)
1209#define MPI2_RAIDVOLPAGE0_PREVIOUSLY_DELETED (0x06)
1210
1211
1212/* RAID Volume Page 1 */
1213
1214typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_1
1215{
1216 MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
1217 U16 DevHandle; /* 0x04 */
1218 U16 Reserved0; /* 0x06 */
1219 U8 GUID[24]; /* 0x08 */
1220 U8 Name[16]; /* 0x20 */
1221 U64 WWID; /* 0x30 */
1222 U32 Reserved1; /* 0x38 */
1223 U32 Reserved2; /* 0x3C */
1224} MPI2_CONFIG_PAGE_RAID_VOL_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_RAID_VOL_1,
1225 Mpi2RaidVolPage1_t, MPI2_POINTER pMpi2RaidVolPage1_t;
1226
1227#define MPI2_RAIDVOLPAGE1_PAGEVERSION (0x03)
1228
1229
1230/****************************************************************************
1231* RAID Physical Disk Config Pages
1232****************************************************************************/
1233
1234/* RAID Physical Disk Page 0 */
1235
1236typedef struct _MPI2_RAIDPHYSDISK0_SETTINGS
1237{
1238 U16 Reserved1; /* 0x00 */
1239 U8 HotSparePool; /* 0x02 */
1240 U8 Reserved2; /* 0x03 */
1241} MPI2_RAIDPHYSDISK0_SETTINGS, MPI2_POINTER PTR_MPI2_RAIDPHYSDISK0_SETTINGS,
1242 Mpi2RaidPhysDisk0Settings_t, MPI2_POINTER pMpi2RaidPhysDisk0Settings_t;
1243
1244/* use MPI2_RAID_HOT_SPARE_POOL_ defines for the HotSparePool field */
1245
1246typedef struct _MPI2_RAIDPHYSDISK0_INQUIRY_DATA
1247{
1248 U8 VendorID[8]; /* 0x00 */
1249 U8 ProductID[16]; /* 0x08 */
1250 U8 ProductRevLevel[4]; /* 0x18 */
1251 U8 SerialNum[32]; /* 0x1C */
1252} MPI2_RAIDPHYSDISK0_INQUIRY_DATA,
1253 MPI2_POINTER PTR_MPI2_RAIDPHYSDISK0_INQUIRY_DATA,
1254 Mpi2RaidPhysDisk0InquiryData_t, MPI2_POINTER pMpi2RaidPhysDisk0InquiryData_t;
1255
1256typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_0
1257{
1258 MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
1259 U16 DevHandle; /* 0x04 */
1260 U8 Reserved1; /* 0x06 */
1261 U8 PhysDiskNum; /* 0x07 */
1262 MPI2_RAIDPHYSDISK0_SETTINGS PhysDiskSettings; /* 0x08 */
1263 U32 Reserved2; /* 0x0C */
1264 MPI2_RAIDPHYSDISK0_INQUIRY_DATA InquiryData; /* 0x10 */
1265 U32 Reserved3; /* 0x4C */
1266 U8 PhysDiskState; /* 0x50 */
1267 U8 OfflineReason; /* 0x51 */
1268 U8 IncompatibleReason; /* 0x52 */
1269 U8 PhysDiskAttributes; /* 0x53 */
1270 U32 PhysDiskStatusFlags; /* 0x54 */
1271 U64 DeviceMaxLBA; /* 0x58 */
1272 U64 HostMaxLBA; /* 0x60 */
1273 U64 CoercedMaxLBA; /* 0x68 */
1274 U16 BlockSize; /* 0x70 */
1275 U16 Reserved5; /* 0x72 */
1276 U32 Reserved6; /* 0x74 */
1277} MPI2_CONFIG_PAGE_RD_PDISK_0,
1278 MPI2_POINTER PTR_MPI2_CONFIG_PAGE_RD_PDISK_0,
1279 Mpi2RaidPhysDiskPage0_t, MPI2_POINTER pMpi2RaidPhysDiskPage0_t;
1280
1281#define MPI2_RAIDPHYSDISKPAGE0_PAGEVERSION (0x05)
1282
1283/* PhysDiskState defines */
1284#define MPI2_RAID_PD_STATE_NOT_CONFIGURED (0x00)
1285#define MPI2_RAID_PD_STATE_NOT_COMPATIBLE (0x01)
1286#define MPI2_RAID_PD_STATE_OFFLINE (0x02)
1287#define MPI2_RAID_PD_STATE_ONLINE (0x03)
1288#define MPI2_RAID_PD_STATE_HOT_SPARE (0x04)
1289#define MPI2_RAID_PD_STATE_DEGRADED (0x05)
1290#define MPI2_RAID_PD_STATE_REBUILDING (0x06)
1291#define MPI2_RAID_PD_STATE_OPTIMAL (0x07)
1292
1293/* OfflineReason defines */
1294#define MPI2_PHYSDISK0_ONLINE (0x00)
1295#define MPI2_PHYSDISK0_OFFLINE_MISSING (0x01)
1296#define MPI2_PHYSDISK0_OFFLINE_FAILED (0x03)
1297#define MPI2_PHYSDISK0_OFFLINE_INITIALIZING (0x04)
1298#define MPI2_PHYSDISK0_OFFLINE_REQUESTED (0x05)
1299#define MPI2_PHYSDISK0_OFFLINE_FAILED_REQUESTED (0x06)
1300#define MPI2_PHYSDISK0_OFFLINE_OTHER (0xFF)
1301
1302/* IncompatibleReason defines */
1303#define MPI2_PHYSDISK0_COMPATIBLE (0x00)
1304#define MPI2_PHYSDISK0_INCOMPATIBLE_PROTOCOL (0x01)
1305#define MPI2_PHYSDISK0_INCOMPATIBLE_BLOCKSIZE (0x02)
1306#define MPI2_PHYSDISK0_INCOMPATIBLE_MAX_LBA (0x03)
1307#define MPI2_PHYSDISK0_INCOMPATIBLE_SATA_EXTENDED_CMD (0x04)
1308#define MPI2_PHYSDISK0_INCOMPATIBLE_REMOVEABLE_MEDIA (0x05)
1309#define MPI2_PHYSDISK0_INCOMPATIBLE_UNKNOWN (0xFF)
1310
1311/* PhysDiskAttributes defines */
1312#define MPI2_PHYSDISK0_ATTRIB_SOLID_STATE_DRIVE (0x08)
1313#define MPI2_PHYSDISK0_ATTRIB_HARD_DISK_DRIVE (0x04)
1314#define MPI2_PHYSDISK0_ATTRIB_SAS_PROTOCOL (0x02)
1315#define MPI2_PHYSDISK0_ATTRIB_SATA_PROTOCOL (0x01)
1316
1317/* PhysDiskStatusFlags defines */
1318#define MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED (0x00000040)
1319#define MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET (0x00000020)
1320#define MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED (0x00000010)
1321#define MPI2_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS (0x00000000)
1322#define MPI2_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS (0x00000008)
1323#define MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME (0x00000004)
1324#define MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED (0x00000002)
1325#define MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC (0x00000001)
1326
1327
1328/* RAID Physical Disk Page 1 */
1329
1330/*
1331 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
1332 * one and check Header.PageLength or NumPhysDiskPaths at runtime.
1333 */
1334#ifndef MPI2_RAID_PHYS_DISK1_PATH_MAX
1335#define MPI2_RAID_PHYS_DISK1_PATH_MAX (1)
1336#endif
1337
1338typedef struct _MPI2_RAIDPHYSDISK1_PATH
1339{
1340 U16 DevHandle; /* 0x00 */
1341 U16 Reserved1; /* 0x02 */
1342 U64 WWID; /* 0x04 */
1343 U64 OwnerWWID; /* 0x0C */
1344 U8 OwnerIdentifier; /* 0x14 */
1345 U8 Reserved2; /* 0x15 */
1346 U16 Flags; /* 0x16 */
1347} MPI2_RAIDPHYSDISK1_PATH, MPI2_POINTER PTR_MPI2_RAIDPHYSDISK1_PATH,
1348 Mpi2RaidPhysDisk1Path_t, MPI2_POINTER pMpi2RaidPhysDisk1Path_t;
1349
1350/* RAID Physical Disk Page 1 Physical Disk Path Flags field defines */
1351#define MPI2_RAID_PHYSDISK1_FLAG_PRIMARY (0x0004)
1352#define MPI2_RAID_PHYSDISK1_FLAG_BROKEN (0x0002)
1353#define MPI2_RAID_PHYSDISK1_FLAG_INVALID (0x0001)
1354
1355typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1
1356{
1357 MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
1358 U8 NumPhysDiskPaths; /* 0x04 */
1359 U8 PhysDiskNum; /* 0x05 */
1360 U16 Reserved1; /* 0x06 */
1361 U32 Reserved2; /* 0x08 */
1362 MPI2_RAIDPHYSDISK1_PATH PhysicalDiskPath[MPI2_RAID_PHYS_DISK1_PATH_MAX];/* 0x0C */
1363} MPI2_CONFIG_PAGE_RD_PDISK_1,
1364 MPI2_POINTER PTR_MPI2_CONFIG_PAGE_RD_PDISK_1,
1365 Mpi2RaidPhysDiskPage1_t, MPI2_POINTER pMpi2RaidPhysDiskPage1_t;
1366
1367#define MPI2_RAIDPHYSDISKPAGE1_PAGEVERSION (0x02)
1368
1369
1370/****************************************************************************
1371* values for fields used by several types of SAS Config Pages
1372****************************************************************************/
1373
1374/* values for NegotiatedLinkRates fields */
1375#define MPI2_SAS_NEG_LINK_RATE_MASK_LOGICAL (0xF0)
1376#define MPI2_SAS_NEG_LINK_RATE_SHIFT_LOGICAL (4)
1377#define MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL (0x0F)
1378/* link rates used for Negotiated Physical and Logical Link Rate */
1379#define MPI2_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE (0x00)
1380#define MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED (0x01)
1381#define MPI2_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED (0x02)
1382#define MPI2_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE (0x03)
1383#define MPI2_SAS_NEG_LINK_RATE_PORT_SELECTOR (0x04)
1384#define MPI2_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS (0x05)
1385#define MPI2_SAS_NEG_LINK_RATE_1_5 (0x08)
1386#define MPI2_SAS_NEG_LINK_RATE_3_0 (0x09)
1387#define MPI2_SAS_NEG_LINK_RATE_6_0 (0x0A)
1388
1389
1390/* values for AttachedPhyInfo fields */
1391#define MPI2_SAS_APHYINFO_INSIDE_ZPSDS_PERSISTENT (0x00000040)
1392#define MPI2_SAS_APHYINFO_REQUESTED_INSIDE_ZPSDS (0x00000020)
1393#define MPI2_SAS_APHYINFO_BREAK_REPLY_CAPABLE (0x00000010)
1394
1395#define MPI2_SAS_APHYINFO_REASON_MASK (0x0000000F)
1396#define MPI2_SAS_APHYINFO_REASON_UNKNOWN (0x00000000)
1397#define MPI2_SAS_APHYINFO_REASON_POWER_ON (0x00000001)
1398#define MPI2_SAS_APHYINFO_REASON_HARD_RESET (0x00000002)
1399#define MPI2_SAS_APHYINFO_REASON_SMP_PHY_CONTROL (0x00000003)
1400#define MPI2_SAS_APHYINFO_REASON_LOSS_OF_SYNC (0x00000004)
1401#define MPI2_SAS_APHYINFO_REASON_MULTIPLEXING_SEQ (0x00000005)
1402#define MPI2_SAS_APHYINFO_REASON_IT_NEXUS_LOSS_TIMER (0x00000006)
1403#define MPI2_SAS_APHYINFO_REASON_BREAK_TIMEOUT (0x00000007)
1404#define MPI2_SAS_APHYINFO_REASON_PHY_TEST_STOPPED (0x00000008)
1405
1406
1407/* values for PhyInfo fields */
1408#define MPI2_SAS_PHYINFO_PHY_VACANT (0x80000000)
1409#define MPI2_SAS_PHYINFO_CHANGED_REQ_INSIDE_ZPSDS (0x04000000)
1410#define MPI2_SAS_PHYINFO_INSIDE_ZPSDS_PERSISTENT (0x02000000)
1411#define MPI2_SAS_PHYINFO_REQ_INSIDE_ZPSDS (0x01000000)
1412#define MPI2_SAS_PHYINFO_ZONE_GROUP_PERSISTENT (0x00400000)
1413#define MPI2_SAS_PHYINFO_INSIDE_ZPSDS (0x00200000)
1414#define MPI2_SAS_PHYINFO_ZONING_ENABLED (0x00100000)
1415
1416#define MPI2_SAS_PHYINFO_REASON_MASK (0x000F0000)
1417#define MPI2_SAS_PHYINFO_REASON_UNKNOWN (0x00000000)
1418#define MPI2_SAS_PHYINFO_REASON_POWER_ON (0x00010000)
1419#define MPI2_SAS_PHYINFO_REASON_HARD_RESET (0x00020000)
1420#define MPI2_SAS_PHYINFO_REASON_SMP_PHY_CONTROL (0x00030000)
1421#define MPI2_SAS_PHYINFO_REASON_LOSS_OF_SYNC (0x00040000)
1422#define MPI2_SAS_PHYINFO_REASON_MULTIPLEXING_SEQ (0x00050000)
1423#define MPI2_SAS_PHYINFO_REASON_IT_NEXUS_LOSS_TIMER (0x00060000)
1424#define MPI2_SAS_PHYINFO_REASON_BREAK_TIMEOUT (0x00070000)
1425#define MPI2_SAS_PHYINFO_REASON_PHY_TEST_STOPPED (0x00080000)
1426
1427#define MPI2_SAS_PHYINFO_MULTIPLEXING_SUPPORTED (0x00008000)
1428#define MPI2_SAS_PHYINFO_SATA_PORT_ACTIVE (0x00004000)
1429#define MPI2_SAS_PHYINFO_SATA_PORT_SELECTOR_PRESENT (0x00002000)
1430#define MPI2_SAS_PHYINFO_VIRTUAL_PHY (0x00001000)
1431
1432#define MPI2_SAS_PHYINFO_MASK_PARTIAL_PATHWAY_TIME (0x00000F00)
1433#define MPI2_SAS_PHYINFO_SHIFT_PARTIAL_PATHWAY_TIME (8)
1434
1435#define MPI2_SAS_PHYINFO_MASK_ROUTING_ATTRIBUTE (0x000000F0)
1436#define MPI2_SAS_PHYINFO_DIRECT_ROUTING (0x00000000)
1437#define MPI2_SAS_PHYINFO_SUBTRACTIVE_ROUTING (0x00000010)
1438#define MPI2_SAS_PHYINFO_TABLE_ROUTING (0x00000020)
1439
1440
1441/* values for SAS ProgrammedLinkRate fields */
1442#define MPI2_SAS_PRATE_MAX_RATE_MASK (0xF0)
1443#define MPI2_SAS_PRATE_MAX_RATE_NOT_PROGRAMMABLE (0x00)
1444#define MPI2_SAS_PRATE_MAX_RATE_1_5 (0x80)
1445#define MPI2_SAS_PRATE_MAX_RATE_3_0 (0x90)
1446#define MPI2_SAS_PRATE_MAX_RATE_6_0 (0xA0)
1447#define MPI2_SAS_PRATE_MIN_RATE_MASK (0x0F)
1448#define MPI2_SAS_PRATE_MIN_RATE_NOT_PROGRAMMABLE (0x00)
1449#define MPI2_SAS_PRATE_MIN_RATE_1_5 (0x08)
1450#define MPI2_SAS_PRATE_MIN_RATE_3_0 (0x09)
1451#define MPI2_SAS_PRATE_MIN_RATE_6_0 (0x0A)
1452
1453
1454/* values for SAS HwLinkRate fields */
1455#define MPI2_SAS_HWRATE_MAX_RATE_MASK (0xF0)
1456#define MPI2_SAS_HWRATE_MAX_RATE_1_5 (0x80)
1457#define MPI2_SAS_HWRATE_MAX_RATE_3_0 (0x90)
1458#define MPI2_SAS_HWRATE_MAX_RATE_6_0 (0xA0)
1459#define MPI2_SAS_HWRATE_MIN_RATE_MASK (0x0F)
1460#define MPI2_SAS_HWRATE_MIN_RATE_1_5 (0x08)
1461#define MPI2_SAS_HWRATE_MIN_RATE_3_0 (0x09)
1462#define MPI2_SAS_HWRATE_MIN_RATE_6_0 (0x0A)
1463
1464
1465
1466/****************************************************************************
1467* SAS IO Unit Config Pages
1468****************************************************************************/
1469
1470/* SAS IO Unit Page 0 */
1471
1472typedef struct _MPI2_SAS_IO_UNIT0_PHY_DATA
1473{
1474 U8 Port; /* 0x00 */
1475 U8 PortFlags; /* 0x01 */
1476 U8 PhyFlags; /* 0x02 */
1477 U8 NegotiatedLinkRate; /* 0x03 */
1478 U32 ControllerPhyDeviceInfo;/* 0x04 */
1479 U16 AttachedDevHandle; /* 0x08 */
1480 U16 ControllerDevHandle; /* 0x0A */
1481 U32 DiscoveryStatus; /* 0x0C */
1482 U32 Reserved; /* 0x10 */
1483} MPI2_SAS_IO_UNIT0_PHY_DATA, MPI2_POINTER PTR_MPI2_SAS_IO_UNIT0_PHY_DATA,
1484 Mpi2SasIOUnit0PhyData_t, MPI2_POINTER pMpi2SasIOUnit0PhyData_t;
1485
1486/*
1487 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
1488 * one and check Header.ExtPageLength or NumPhys at runtime.
1489 */
1490#ifndef MPI2_SAS_IOUNIT0_PHY_MAX
1491#define MPI2_SAS_IOUNIT0_PHY_MAX (1)
1492#endif
1493
1494typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_0
1495{
1496 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
1497 U32 Reserved1; /* 0x08 */
1498 U8 NumPhys; /* 0x0C */
1499 U8 Reserved2; /* 0x0D */
1500 U16 Reserved3; /* 0x0E */
1501 MPI2_SAS_IO_UNIT0_PHY_DATA PhyData[MPI2_SAS_IOUNIT0_PHY_MAX]; /* 0x10 */
1502} MPI2_CONFIG_PAGE_SASIOUNIT_0,
1503 MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_0,
1504 Mpi2SasIOUnitPage0_t, MPI2_POINTER pMpi2SasIOUnitPage0_t;
1505
1506#define MPI2_SASIOUNITPAGE0_PAGEVERSION (0x05)
1507
1508/* values for SAS IO Unit Page 0 PortFlags */
1509#define MPI2_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS (0x08)
1510#define MPI2_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG (0x01)
1511
1512/* values for SAS IO Unit Page 0 PhyFlags */
1513#define MPI2_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED (0x10)
1514#define MPI2_SASIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08)
1515
1516/* use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
1517
1518/* see mpi2_sas.h for values for SAS IO Unit Page 0 ControllerPhyDeviceInfo values */
1519
1520/* values for SAS IO Unit Page 0 DiscoveryStatus */
1521#define MPI2_SASIOUNIT0_DS_MAX_ENCLOSURES_EXCEED (0x80000000)
1522#define MPI2_SASIOUNIT0_DS_MAX_EXPANDERS_EXCEED (0x40000000)
1523#define MPI2_SASIOUNIT0_DS_MAX_DEVICES_EXCEED (0x20000000)
1524#define MPI2_SASIOUNIT0_DS_MAX_TOPO_PHYS_EXCEED (0x10000000)
1525#define MPI2_SASIOUNIT0_DS_DOWNSTREAM_INITIATOR (0x08000000)
1526#define MPI2_SASIOUNIT0_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000)
1527#define MPI2_SASIOUNIT0_DS_EXP_MULTI_SUBTRACTIVE (0x00004000)
1528#define MPI2_SASIOUNIT0_DS_MULTI_PORT_DOMAIN (0x00002000)
1529#define MPI2_SASIOUNIT0_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000)
1530#define MPI2_SASIOUNIT0_DS_UNSUPPORTED_DEVICE (0x00000800)
1531#define MPI2_SASIOUNIT0_DS_TABLE_LINK (0x00000400)
1532#define MPI2_SASIOUNIT0_DS_SUBTRACTIVE_LINK (0x00000200)
1533#define MPI2_SASIOUNIT0_DS_SMP_CRC_ERROR (0x00000100)
1534#define MPI2_SASIOUNIT0_DS_SMP_FUNCTION_FAILED (0x00000080)
1535#define MPI2_SASIOUNIT0_DS_INDEX_NOT_EXIST (0x00000040)
1536#define MPI2_SASIOUNIT0_DS_OUT_ROUTE_ENTRIES (0x00000020)
1537#define MPI2_SASIOUNIT0_DS_SMP_TIMEOUT (0x00000010)
1538#define MPI2_SASIOUNIT0_DS_MULTIPLE_PORTS (0x00000004)
1539#define MPI2_SASIOUNIT0_DS_UNADDRESSABLE_DEVICE (0x00000002)
1540#define MPI2_SASIOUNIT0_DS_LOOP_DETECTED (0x00000001)
1541
1542
1543/* SAS IO Unit Page 1 */
1544
1545typedef struct _MPI2_SAS_IO_UNIT1_PHY_DATA
1546{
1547 U8 Port; /* 0x00 */
1548 U8 PortFlags; /* 0x01 */
1549 U8 PhyFlags; /* 0x02 */
1550 U8 MaxMinLinkRate; /* 0x03 */
1551 U32 ControllerPhyDeviceInfo; /* 0x04 */
1552 U16 MaxTargetPortConnectTime; /* 0x08 */
1553 U16 Reserved1; /* 0x0A */
1554} MPI2_SAS_IO_UNIT1_PHY_DATA, MPI2_POINTER PTR_MPI2_SAS_IO_UNIT1_PHY_DATA,
1555 Mpi2SasIOUnit1PhyData_t, MPI2_POINTER pMpi2SasIOUnit1PhyData_t;
1556
1557/*
1558 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
1559 * one and check Header.ExtPageLength or NumPhys at runtime.
1560 */
1561#ifndef MPI2_SAS_IOUNIT1_PHY_MAX
1562#define MPI2_SAS_IOUNIT1_PHY_MAX (1)
1563#endif
1564
1565typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1
1566{
1567 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
1568 U16 ControlFlags; /* 0x08 */
1569 U16 SASNarrowMaxQueueDepth; /* 0x0A */
1570 U16 AdditionalControlFlags; /* 0x0C */
1571 U16 SASWideMaxQueueDepth; /* 0x0E */
1572 U8 NumPhys; /* 0x10 */
1573 U8 SATAMaxQDepth; /* 0x11 */
1574 U8 ReportDeviceMissingDelay; /* 0x12 */
1575 U8 IODeviceMissingDelay; /* 0x13 */
1576 MPI2_SAS_IO_UNIT1_PHY_DATA PhyData[MPI2_SAS_IOUNIT1_PHY_MAX]; /* 0x14 */
1577} MPI2_CONFIG_PAGE_SASIOUNIT_1,
1578 MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_1,
1579 Mpi2SasIOUnitPage1_t, MPI2_POINTER pMpi2SasIOUnitPage1_t;
1580
1581#define MPI2_SASIOUNITPAGE1_PAGEVERSION (0x09)
1582
1583/* values for SAS IO Unit Page 1 ControlFlags */
1584#define MPI2_SASIOUNIT1_CONTROL_DEVICE_SELF_TEST (0x8000)
1585#define MPI2_SASIOUNIT1_CONTROL_SATA_3_0_MAX (0x4000)
1586#define MPI2_SASIOUNIT1_CONTROL_SATA_1_5_MAX (0x2000)
1587#define MPI2_SASIOUNIT1_CONTROL_SATA_SW_PRESERVE (0x1000)
1588
1589#define MPI2_SASIOUNIT1_CONTROL_MASK_DEV_SUPPORT (0x0600)
1590#define MPI2_SASIOUNIT1_CONTROL_SHIFT_DEV_SUPPORT (9)
1591#define MPI2_SASIOUNIT1_CONTROL_DEV_SUPPORT_BOTH (0x0)
1592#define MPI2_SASIOUNIT1_CONTROL_DEV_SAS_SUPPORT (0x1)
1593#define MPI2_SASIOUNIT1_CONTROL_DEV_SATA_SUPPORT (0x2)
1594
1595#define MPI2_SASIOUNIT1_CONTROL_SATA_48BIT_LBA_REQUIRED (0x0080)
1596#define MPI2_SASIOUNIT1_CONTROL_SATA_SMART_REQUIRED (0x0040)
1597#define MPI2_SASIOUNIT1_CONTROL_SATA_NCQ_REQUIRED (0x0020)
1598#define MPI2_SASIOUNIT1_CONTROL_SATA_FUA_REQUIRED (0x0010)
1599#define MPI2_SASIOUNIT1_CONTROL_TABLE_SUBTRACTIVE_ILLEGAL (0x0008)
1600#define MPI2_SASIOUNIT1_CONTROL_SUBTRACTIVE_ILLEGAL (0x0004)
1601#define MPI2_SASIOUNIT1_CONTROL_FIRST_LVL_DISC_ONLY (0x0002)
1602#define MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION (0x0001)
1603
1604/* values for SAS IO Unit Page 1 AdditionalControlFlags */
1605#define MPI2_SASIOUNIT1_ACONTROL_MULTI_PORT_DOMAIN_ILLEGAL (0x0080)
1606#define MPI2_SASIOUNIT1_ACONTROL_SATA_ASYNCHROUNOUS_NOTIFICATION (0x0040)
1607#define MPI2_SASIOUNIT1_ACONTROL_INVALID_TOPOLOGY_CORRECTION (0x0020)
1608#define MPI2_SASIOUNIT1_ACONTROL_PORT_ENABLE_ONLY_SATA_LINK_RESET (0x0010)
1609#define MPI2_SASIOUNIT1_ACONTROL_OTHER_AFFILIATION_SATA_LINK_RESET (0x0008)
1610#define MPI2_SASIOUNIT1_ACONTROL_SELF_AFFILIATION_SATA_LINK_RESET (0x0004)
1611#define MPI2_SASIOUNIT1_ACONTROL_NO_AFFILIATION_SATA_LINK_RESET (0x0002)
1612#define MPI2_SASIOUNIT1_ACONTROL_ALLOW_TABLE_TO_TABLE (0x0001)
1613
1614/* defines for SAS IO Unit Page 1 ReportDeviceMissingDelay */
1615#define MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK (0x7F)
1616#define MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16 (0x80)
1617
1618/* values for SAS IO Unit Page 1 PortFlags */
1619#define MPI2_SASIOUNIT1_PORT_FLAGS_AUTO_PORT_CONFIG (0x01)
1620
1621/* values for SAS IO Unit Page 2 PhyFlags */
1622#define MPI2_SASIOUNIT1_PHYFLAGS_ZONING_ENABLE (0x10)
1623#define MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08)
1624
1625/* values for SAS IO Unit Page 0 MaxMinLinkRate */
1626#define MPI2_SASIOUNIT1_MAX_RATE_MASK (0xF0)
1627#define MPI2_SASIOUNIT1_MAX_RATE_1_5 (0x80)
1628#define MPI2_SASIOUNIT1_MAX_RATE_3_0 (0x90)
1629#define MPI2_SASIOUNIT1_MAX_RATE_6_0 (0xA0)
1630#define MPI2_SASIOUNIT1_MIN_RATE_MASK (0x0F)
1631#define MPI2_SASIOUNIT1_MIN_RATE_1_5 (0x08)
1632#define MPI2_SASIOUNIT1_MIN_RATE_3_0 (0x09)
1633#define MPI2_SASIOUNIT1_MIN_RATE_6_0 (0x0A)
1634
1635/* see mpi2_sas.h for values for SAS IO Unit Page 1 ControllerPhyDeviceInfo values */
1636
1637
1638/* SAS IO Unit Page 4 */
1639
1640typedef struct _MPI2_SAS_IOUNIT4_SPINUP_GROUP
1641{
1642 U8 MaxTargetSpinup; /* 0x00 */
1643 U8 SpinupDelay; /* 0x01 */
1644 U16 Reserved1; /* 0x02 */
1645} MPI2_SAS_IOUNIT4_SPINUP_GROUP, MPI2_POINTER PTR_MPI2_SAS_IOUNIT4_SPINUP_GROUP,
1646 Mpi2SasIOUnit4SpinupGroup_t, MPI2_POINTER pMpi2SasIOUnit4SpinupGroup_t;
1647
1648/*
1649 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
1650 * four and check Header.ExtPageLength or NumPhys at runtime.
1651 */
1652#ifndef MPI2_SAS_IOUNIT4_PHY_MAX
1653#define MPI2_SAS_IOUNIT4_PHY_MAX (4)
1654#endif
1655
1656typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_4
1657{
1658 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
1659 MPI2_SAS_IOUNIT4_SPINUP_GROUP SpinupGroupParameters[4]; /* 0x08 */
1660 U32 Reserved1; /* 0x18 */
1661 U32 Reserved2; /* 0x1C */
1662 U32 Reserved3; /* 0x20 */
1663 U8 BootDeviceWaitTime; /* 0x24 */
1664 U8 Reserved4; /* 0x25 */
1665 U16 Reserved5; /* 0x26 */
1666 U8 NumPhys; /* 0x28 */
1667 U8 PEInitialSpinupDelay; /* 0x29 */
1668 U8 PEReplyDelay; /* 0x2A */
1669 U8 Flags; /* 0x2B */
1670 U8 PHY[MPI2_SAS_IOUNIT4_PHY_MAX]; /* 0x2C */
1671} MPI2_CONFIG_PAGE_SASIOUNIT_4,
1672 MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_4,
1673 Mpi2SasIOUnitPage4_t, MPI2_POINTER pMpi2SasIOUnitPage4_t;
1674
1675#define MPI2_SASIOUNITPAGE4_PAGEVERSION (0x02)
1676
1677/* defines for Flags field */
1678#define MPI2_SASIOUNIT4_FLAGS_AUTO_PORTENABLE (0x01)
1679
1680/* defines for PHY field */
1681#define MPI2_SASIOUNIT4_PHY_SPINUP_GROUP_MASK (0x03)
1682
1683
1684/****************************************************************************
1685* SAS Expander Config Pages
1686****************************************************************************/
1687
1688/* SAS Expander Page 0 */
1689
1690typedef struct _MPI2_CONFIG_PAGE_EXPANDER_0
1691{
1692 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
1693 U8 PhysicalPort; /* 0x08 */
1694 U8 ReportGenLength; /* 0x09 */
1695 U16 EnclosureHandle; /* 0x0A */
1696 U64 SASAddress; /* 0x0C */
1697 U32 DiscoveryStatus; /* 0x14 */
1698 U16 DevHandle; /* 0x18 */
1699 U16 ParentDevHandle; /* 0x1A */
1700 U16 ExpanderChangeCount; /* 0x1C */
1701 U16 ExpanderRouteIndexes; /* 0x1E */
1702 U8 NumPhys; /* 0x20 */
1703 U8 SASLevel; /* 0x21 */
1704 U16 Flags; /* 0x22 */
1705 U16 STPBusInactivityTimeLimit; /* 0x24 */
1706 U16 STPMaxConnectTimeLimit; /* 0x26 */
1707 U16 STP_SMP_NexusLossTime; /* 0x28 */
1708 U16 MaxNumRoutedSasAddresses; /* 0x2A */
1709 U64 ActiveZoneManagerSASAddress;/* 0x2C */
1710 U16 ZoneLockInactivityLimit; /* 0x34 */
1711 U16 Reserved1; /* 0x36 */
1712} MPI2_CONFIG_PAGE_EXPANDER_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_EXPANDER_0,
1713 Mpi2ExpanderPage0_t, MPI2_POINTER pMpi2ExpanderPage0_t;
1714
1715#define MPI2_SASEXPANDER0_PAGEVERSION (0x05)
1716
1717/* values for SAS Expander Page 0 DiscoveryStatus field */
1718#define MPI2_SAS_EXPANDER0_DS_MAX_ENCLOSURES_EXCEED (0x80000000)
1719#define MPI2_SAS_EXPANDER0_DS_MAX_EXPANDERS_EXCEED (0x40000000)
1720#define MPI2_SAS_EXPANDER0_DS_MAX_DEVICES_EXCEED (0x20000000)
1721#define MPI2_SAS_EXPANDER0_DS_MAX_TOPO_PHYS_EXCEED (0x10000000)
1722#define MPI2_SAS_EXPANDER0_DS_DOWNSTREAM_INITIATOR (0x08000000)
1723#define MPI2_SAS_EXPANDER0_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000)
1724#define MPI2_SAS_EXPANDER0_DS_EXP_MULTI_SUBTRACTIVE (0x00004000)
1725#define MPI2_SAS_EXPANDER0_DS_MULTI_PORT_DOMAIN (0x00002000)
1726#define MPI2_SAS_EXPANDER0_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000)
1727#define MPI2_SAS_EXPANDER0_DS_UNSUPPORTED_DEVICE (0x00000800)
1728#define MPI2_SAS_EXPANDER0_DS_TABLE_LINK (0x00000400)
1729#define MPI2_SAS_EXPANDER0_DS_SUBTRACTIVE_LINK (0x00000200)
1730#define MPI2_SAS_EXPANDER0_DS_SMP_CRC_ERROR (0x00000100)
1731#define MPI2_SAS_EXPANDER0_DS_SMP_FUNCTION_FAILED (0x00000080)
1732#define MPI2_SAS_EXPANDER0_DS_INDEX_NOT_EXIST (0x00000040)
1733#define MPI2_SAS_EXPANDER0_DS_OUT_ROUTE_ENTRIES (0x00000020)
1734#define MPI2_SAS_EXPANDER0_DS_SMP_TIMEOUT (0x00000010)
1735#define MPI2_SAS_EXPANDER0_DS_MULTIPLE_PORTS (0x00000004)
1736#define MPI2_SAS_EXPANDER0_DS_UNADDRESSABLE_DEVICE (0x00000002)
1737#define MPI2_SAS_EXPANDER0_DS_LOOP_DETECTED (0x00000001)
1738
1739/* values for SAS Expander Page 0 Flags field */
1740#define MPI2_SAS_EXPANDER0_FLAGS_ZONE_LOCKED (0x1000)
1741#define MPI2_SAS_EXPANDER0_FLAGS_SUPPORTED_PHYSICAL_PRES (0x0800)
1742#define MPI2_SAS_EXPANDER0_FLAGS_ASSERTED_PHYSICAL_PRES (0x0400)
1743#define MPI2_SAS_EXPANDER0_FLAGS_ZONING_SUPPORT (0x0200)
1744#define MPI2_SAS_EXPANDER0_FLAGS_ENABLED_ZONING (0x0100)
1745#define MPI2_SAS_EXPANDER0_FLAGS_TABLE_TO_TABLE_SUPPORT (0x0080)
1746#define MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE (0x0010)
1747#define MPI2_SAS_EXPANDER0_FLAGS_OTHERS_CONFIG (0x0004)
1748#define MPI2_SAS_EXPANDER0_FLAGS_CONFIG_IN_PROGRESS (0x0002)
1749#define MPI2_SAS_EXPANDER0_FLAGS_ROUTE_TABLE_CONFIG (0x0001)
1750
1751
1752/* SAS Expander Page 1 */
1753
1754typedef struct _MPI2_CONFIG_PAGE_EXPANDER_1
1755{
1756 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
1757 U8 PhysicalPort; /* 0x08 */
1758 U8 Reserved1; /* 0x09 */
1759 U16 Reserved2; /* 0x0A */
1760 U8 NumPhys; /* 0x0C */
1761 U8 Phy; /* 0x0D */
1762 U16 NumTableEntriesProgrammed; /* 0x0E */
1763 U8 ProgrammedLinkRate; /* 0x10 */
1764 U8 HwLinkRate; /* 0x11 */
1765 U16 AttachedDevHandle; /* 0x12 */
1766 U32 PhyInfo; /* 0x14 */
1767 U32 AttachedDeviceInfo; /* 0x18 */
1768 U16 ExpanderDevHandle; /* 0x1C */
1769 U8 ChangeCount; /* 0x1E */
1770 U8 NegotiatedLinkRate; /* 0x1F */
1771 U8 PhyIdentifier; /* 0x20 */
1772 U8 AttachedPhyIdentifier; /* 0x21 */
1773 U8 Reserved3; /* 0x22 */
1774 U8 DiscoveryInfo; /* 0x23 */
1775 U32 AttachedPhyInfo; /* 0x24 */
1776 U8 ZoneGroup; /* 0x28 */
1777 U8 SelfConfigStatus; /* 0x29 */
1778 U16 Reserved4; /* 0x2A */
1779} MPI2_CONFIG_PAGE_EXPANDER_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_EXPANDER_1,
1780 Mpi2ExpanderPage1_t, MPI2_POINTER pMpi2ExpanderPage1_t;
1781
1782#define MPI2_SASEXPANDER1_PAGEVERSION (0x02)
1783
1784/* use MPI2_SAS_PRATE_ defines for the ProgrammedLinkRate field */
1785
1786/* use MPI2_SAS_HWRATE_ defines for the HwLinkRate field */
1787
1788/* use MPI2_SAS_PHYINFO_ for the PhyInfo field */
1789
1790/* see mpi2_sas.h for the MPI2_SAS_DEVICE_INFO_ defines used for the AttachedDeviceInfo field */
1791
1792/* use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
1793
1794/* use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */
1795
1796/* values for SAS Expander Page 1 DiscoveryInfo field */
1797#define MPI2_SAS_EXPANDER1_DISCINFO_BAD_PHY_DISABLED (0x04)
1798#define MPI2_SAS_EXPANDER1_DISCINFO_LINK_STATUS_CHANGE (0x02)
1799#define MPI2_SAS_EXPANDER1_DISCINFO_NO_ROUTING_ENTRIES (0x01)
1800
1801
1802/****************************************************************************
1803* SAS Device Config Pages
1804****************************************************************************/
1805
1806/* SAS Device Page 0 */
1807
1808typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0
1809{
1810 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
1811 U16 Slot; /* 0x08 */
1812 U16 EnclosureHandle; /* 0x0A */
1813 U64 SASAddress; /* 0x0C */
1814 U16 ParentDevHandle; /* 0x14 */
1815 U8 PhyNum; /* 0x16 */
1816 U8 AccessStatus; /* 0x17 */
1817 U16 DevHandle; /* 0x18 */
1818 U8 AttachedPhyIdentifier; /* 0x1A */
1819 U8 ZoneGroup; /* 0x1B */
1820 U32 DeviceInfo; /* 0x1C */
1821 U16 Flags; /* 0x20 */
1822 U8 PhysicalPort; /* 0x22 */
1823 U8 MaxPortConnections; /* 0x23 */
1824 U64 DeviceName; /* 0x24 */
1825 U8 PortGroups; /* 0x2C */
1826 U8 DmaGroup; /* 0x2D */
1827 U8 ControlGroup; /* 0x2E */
1828 U8 Reserved1; /* 0x2F */
1829 U32 Reserved2; /* 0x30 */
1830 U32 Reserved3; /* 0x34 */
1831} MPI2_CONFIG_PAGE_SAS_DEV_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_DEV_0,
1832 Mpi2SasDevicePage0_t, MPI2_POINTER pMpi2SasDevicePage0_t;
1833
1834#define MPI2_SASDEVICE0_PAGEVERSION (0x08)
1835
1836/* values for SAS Device Page 0 AccessStatus field */
1837#define MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS (0x00)
1838#define MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED (0x01)
1839#define MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED (0x02)
1840#define MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT (0x03)
1841#define MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION (0x04)
1842#define MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE (0x05)
1843#define MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE (0x06)
1844#define MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED (0x07)
1845/* specific values for SATA Init failures */
1846#define MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN (0x10)
1847#define MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT (0x11)
1848#define MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG (0x12)
1849#define MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION (0x13)
1850#define MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER (0x14)
1851#define MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN (0x15)
1852#define MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN (0x16)
1853#define MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN (0x17)
1854#define MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION (0x18)
1855#define MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE (0x19)
1856#define MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX (0x1F)
1857
1858/* see mpi2_sas.h for values for SAS Device Page 0 DeviceInfo values */
1859
1860/* values for SAS Device Page 0 Flags field */
1861#define MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY (0x0400)
1862#define MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE (0x0200)
1863#define MPI2_SAS_DEVICE0_FLAGS_UNSUPPORTED_DEVICE (0x0100)
1864#define MPI2_SAS_DEVICE0_FLAGS_SATA_48BIT_LBA_SUPPORTED (0x0080)
1865#define MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED (0x0040)
1866#define MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED (0x0020)
1867#define MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED (0x0010)
1868#define MPI2_SAS_DEVICE0_FLAGS_PORT_SELECTOR_ATTACH (0x0008)
1869#define MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001)
1870
1871
1872/* SAS Device Page 1 */
1873
1874typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_1
1875{
1876 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
1877 U32 Reserved1; /* 0x08 */
1878 U64 SASAddress; /* 0x0C */
1879 U32 Reserved2; /* 0x14 */
1880 U16 DevHandle; /* 0x18 */
1881 U16 Reserved3; /* 0x1A */
1882 U8 InitialRegDeviceFIS[20];/* 0x1C */
1883} MPI2_CONFIG_PAGE_SAS_DEV_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_DEV_1,
1884 Mpi2SasDevicePage1_t, MPI2_POINTER pMpi2SasDevicePage1_t;
1885
1886#define MPI2_SASDEVICE1_PAGEVERSION (0x01)
1887
1888
1889/****************************************************************************
1890* SAS PHY Config Pages
1891****************************************************************************/
1892
1893/* SAS PHY Page 0 */
1894
1895typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_0
1896{
1897 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
1898 U16 OwnerDevHandle; /* 0x08 */
1899 U16 Reserved1; /* 0x0A */
1900 U16 AttachedDevHandle; /* 0x0C */
1901 U8 AttachedPhyIdentifier; /* 0x0E */
1902 U8 Reserved2; /* 0x0F */
1903 U32 AttachedPhyInfo; /* 0x10 */
1904 U8 ProgrammedLinkRate; /* 0x14 */
1905 U8 HwLinkRate; /* 0x15 */
1906 U8 ChangeCount; /* 0x16 */
1907 U8 Flags; /* 0x17 */
1908 U32 PhyInfo; /* 0x18 */
1909 U8 NegotiatedLinkRate; /* 0x1C */
1910 U8 Reserved3; /* 0x1D */
1911 U16 Reserved4; /* 0x1E */
1912} MPI2_CONFIG_PAGE_SAS_PHY_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PHY_0,
1913 Mpi2SasPhyPage0_t, MPI2_POINTER pMpi2SasPhyPage0_t;
1914
1915#define MPI2_SASPHY0_PAGEVERSION (0x03)
1916
1917/* use MPI2_SAS_PRATE_ defines for the ProgrammedLinkRate field */
1918
1919/* use MPI2_SAS_HWRATE_ defines for the HwLinkRate field */
1920
1921/* values for SAS PHY Page 0 Flags field */
1922#define MPI2_SAS_PHY0_FLAGS_SGPIO_DIRECT_ATTACH_ENC (0x01)
1923
1924/* use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */
1925
1926/* use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
1927
1928/* use MPI2_SAS_PHYINFO_ for the PhyInfo field */
1929
1930
1931/* SAS PHY Page 1 */
1932
1933typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_1
1934{
1935 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
1936 U32 Reserved1; /* 0x08 */
1937 U32 InvalidDwordCount; /* 0x0C */
1938 U32 RunningDisparityErrorCount; /* 0x10 */
1939 U32 LossDwordSynchCount; /* 0x14 */
1940 U32 PhyResetProblemCount; /* 0x18 */
1941} MPI2_CONFIG_PAGE_SAS_PHY_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PHY_1,
1942 Mpi2SasPhyPage1_t, MPI2_POINTER pMpi2SasPhyPage1_t;
1943
1944#define MPI2_SASPHY1_PAGEVERSION (0x01)
1945
1946
1947/****************************************************************************
1948* SAS Port Config Pages
1949****************************************************************************/
1950
1951/* SAS Port Page 0 */
1952
1953typedef struct _MPI2_CONFIG_PAGE_SAS_PORT_0
1954{
1955 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
1956 U8 PortNumber; /* 0x08 */
1957 U8 PhysicalPort; /* 0x09 */
1958 U8 PortWidth; /* 0x0A */
1959 U8 PhysicalPortWidth; /* 0x0B */
1960 U8 ZoneGroup; /* 0x0C */
1961 U8 Reserved1; /* 0x0D */
1962 U16 Reserved2; /* 0x0E */
1963 U64 SASAddress; /* 0x10 */
1964 U32 DeviceInfo; /* 0x18 */
1965 U32 Reserved3; /* 0x1C */
1966 U32 Reserved4; /* 0x20 */
1967} MPI2_CONFIG_PAGE_SAS_PORT_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PORT_0,
1968 Mpi2SasPortPage0_t, MPI2_POINTER pMpi2SasPortPage0_t;
1969
1970#define MPI2_SASPORT0_PAGEVERSION (0x00)
1971
1972/* see mpi2_sas.h for values for SAS Port Page 0 DeviceInfo values */
1973
1974
1975/****************************************************************************
1976* SAS Enclosure Config Pages
1977****************************************************************************/
1978
1979/* SAS Enclosure Page 0 */
1980
1981typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0
1982{
1983 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
1984 U32 Reserved1; /* 0x08 */
1985 U64 EnclosureLogicalID; /* 0x0C */
1986 U16 Flags; /* 0x14 */
1987 U16 EnclosureHandle; /* 0x16 */
1988 U16 NumSlots; /* 0x18 */
1989 U16 StartSlot; /* 0x1A */
1990 U16 Reserved2; /* 0x1C */
1991 U16 SEPDevHandle; /* 0x1E */
1992 U32 Reserved3; /* 0x20 */
1993 U32 Reserved4; /* 0x24 */
1994} MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0,
1995 MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0,
1996 Mpi2SasEnclosurePage0_t, MPI2_POINTER pMpi2SasEnclosurePage0_t;
1997
1998#define MPI2_SASENCLOSURE0_PAGEVERSION (0x03)
1999
2000/* values for SAS Enclosure Page 0 Flags field */
2001#define MPI2_SAS_ENCLS0_FLAGS_MNG_MASK (0x000F)
2002#define MPI2_SAS_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000)
2003#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SES (0x0001)
2004#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SGPIO (0x0002)
2005#define MPI2_SAS_ENCLS0_FLAGS_MNG_EXP_SGPIO (0x0003)
2006#define MPI2_SAS_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0004)
2007#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_GPIO (0x0005)
2008
2009
2010/****************************************************************************
2011* Log Config Page
2012****************************************************************************/
2013
2014/* Log Page 0 */
2015
2016/*
2017 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
2018 * one and check Header.ExtPageLength or NumPhys at runtime.
2019 */
2020#ifndef MPI2_LOG_0_NUM_LOG_ENTRIES
2021#define MPI2_LOG_0_NUM_LOG_ENTRIES (1)
2022#endif
2023
2024#define MPI2_LOG_0_LOG_DATA_LENGTH (0x1C)
2025
2026typedef struct _MPI2_LOG_0_ENTRY
2027{
2028 U64 TimeStamp; /* 0x00 */
2029 U32 Reserved1; /* 0x08 */
2030 U16 LogSequence; /* 0x0C */
2031 U16 LogEntryQualifier; /* 0x0E */
2032 U8 VP_ID; /* 0x10 */
2033 U8 VF_ID; /* 0x11 */
2034 U16 Reserved2; /* 0x12 */
2035 U8 LogData[MPI2_LOG_0_LOG_DATA_LENGTH];/* 0x14 */
2036} MPI2_LOG_0_ENTRY, MPI2_POINTER PTR_MPI2_LOG_0_ENTRY,
2037 Mpi2Log0Entry_t, MPI2_POINTER pMpi2Log0Entry_t;
2038
2039/* values for Log Page 0 LogEntry LogEntryQualifier field */
2040#define MPI2_LOG_0_ENTRY_QUAL_ENTRY_UNUSED (0x0000)
2041#define MPI2_LOG_0_ENTRY_QUAL_POWER_ON_RESET (0x0001)
2042#define MPI2_LOG_0_ENTRY_QUAL_TIMESTAMP_UPDATE (0x0002)
2043#define MPI2_LOG_0_ENTRY_QUAL_MIN_IMPLEMENT_SPEC (0x8000)
2044#define MPI2_LOG_0_ENTRY_QUAL_MAX_IMPLEMENT_SPEC (0xFFFF)
2045
2046typedef struct _MPI2_CONFIG_PAGE_LOG_0
2047{
2048 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
2049 U32 Reserved1; /* 0x08 */
2050 U32 Reserved2; /* 0x0C */
2051 U16 NumLogEntries; /* 0x10 */
2052 U16 Reserved3; /* 0x12 */
2053 MPI2_LOG_0_ENTRY LogEntry[MPI2_LOG_0_NUM_LOG_ENTRIES]; /* 0x14 */
2054} MPI2_CONFIG_PAGE_LOG_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_LOG_0,
2055 Mpi2LogPage0_t, MPI2_POINTER pMpi2LogPage0_t;
2056
2057#define MPI2_LOG_0_PAGEVERSION (0x02)
2058
2059
2060/****************************************************************************
2061* RAID Config Page
2062****************************************************************************/
2063
2064/* RAID Page 0 */
2065
2066/*
2067 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
2068 * one and check Header.ExtPageLength or NumPhys at runtime.
2069 */
2070#ifndef MPI2_RAIDCONFIG0_MAX_ELEMENTS
2071#define MPI2_RAIDCONFIG0_MAX_ELEMENTS (1)
2072#endif
2073
2074typedef struct _MPI2_RAIDCONFIG0_CONFIG_ELEMENT
2075{
2076 U16 ElementFlags; /* 0x00 */
2077 U16 VolDevHandle; /* 0x02 */
2078 U8 HotSparePool; /* 0x04 */
2079 U8 PhysDiskNum; /* 0x05 */
2080 U16 PhysDiskDevHandle; /* 0x06 */
2081} MPI2_RAIDCONFIG0_CONFIG_ELEMENT,
2082 MPI2_POINTER PTR_MPI2_RAIDCONFIG0_CONFIG_ELEMENT,
2083 Mpi2RaidConfig0ConfigElement_t, MPI2_POINTER pMpi2RaidConfig0ConfigElement_t;
2084
2085/* values for the ElementFlags field */
2086#define MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE (0x000F)
2087#define MPI2_RAIDCONFIG0_EFLAGS_VOLUME_ELEMENT (0x0000)
2088#define MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT (0x0001)
2089#define MPI2_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT (0x0002)
2090#define MPI2_RAIDCONFIG0_EFLAGS_OCE_ELEMENT (0x0003)
2091
2092
2093typedef struct _MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0
2094{
2095 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
2096 U8 NumHotSpares; /* 0x08 */
2097 U8 NumPhysDisks; /* 0x09 */
2098 U8 NumVolumes; /* 0x0A */
2099 U8 ConfigNum; /* 0x0B */
2100 U32 Flags; /* 0x0C */
2101 U8 ConfigGUID[24]; /* 0x10 */
2102 U32 Reserved1; /* 0x28 */
2103 U8 NumElements; /* 0x2C */
2104 U8 Reserved2; /* 0x2D */
2105 U16 Reserved3; /* 0x2E */
2106 MPI2_RAIDCONFIG0_CONFIG_ELEMENT ConfigElement[MPI2_RAIDCONFIG0_MAX_ELEMENTS]; /* 0x30 */
2107} MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0,
2108 MPI2_POINTER PTR_MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0,
2109 Mpi2RaidConfigurationPage0_t, MPI2_POINTER pMpi2RaidConfigurationPage0_t;
2110
2111#define MPI2_RAIDCONFIG0_PAGEVERSION (0x00)
2112
2113/* values for RAID Configuration Page 0 Flags field */
2114#define MPI2_RAIDCONFIG0_FLAG_FOREIGN_CONFIG (0x00000001)
2115
2116
2117/****************************************************************************
2118* Driver Persistent Mapping Config Pages
2119****************************************************************************/
2120
2121/* Driver Persistent Mapping Page 0 */
2122
2123typedef struct _MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY
2124{
2125 U64 PhysicalIdentifier; /* 0x00 */
2126 U16 MappingInformation; /* 0x08 */
2127 U16 DeviceIndex; /* 0x0A */
2128 U32 PhysicalBitsMapping; /* 0x0C */
2129 U32 Reserved1; /* 0x10 */
2130} MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY,
2131 MPI2_POINTER PTR_MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY,
2132 Mpi2DriverMap0Entry_t, MPI2_POINTER pMpi2DriverMap0Entry_t;
2133
2134typedef struct _MPI2_CONFIG_PAGE_DRIVER_MAPPING_0
2135{
2136 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
2137 MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY Entry; /* 0x08 */
2138} MPI2_CONFIG_PAGE_DRIVER_MAPPING_0,
2139 MPI2_POINTER PTR_MPI2_CONFIG_PAGE_DRIVER_MAPPING_0,
2140 Mpi2DriverMappingPage0_t, MPI2_POINTER pMpi2DriverMappingPage0_t;
2141
2142#define MPI2_DRIVERMAPPING0_PAGEVERSION (0x00)
2143
2144/* values for Driver Persistent Mapping Page 0 MappingInformation field */
2145#define MPI2_DRVMAP0_MAPINFO_SLOT_MASK (0x07F0)
2146#define MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT (4)
2147#define MPI2_DRVMAP0_MAPINFO_MISSING_MASK (0x000F)
2148
2149
2150#endif
2151
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_init.h b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
new file mode 100644
index 000000000000..f1115f0f0eb2
--- /dev/null
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
@@ -0,0 +1,420 @@
1/*
2 * Copyright (c) 2000-2008 LSI Corporation.
3 *
4 *
5 * Name: mpi2_init.h
6 * Title: MPI SCSI initiator mode messages and structures
7 * Creation Date: June 23, 2006
8 *
9 * mpi2_init.h Version: 02.00.06
10 *
11 * Version History
12 * ---------------
13 *
14 * Date Version Description
15 * -------- -------- ------------------------------------------------------
16 * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
17 * 10-31-07 02.00.01 Fixed name for pMpi2SCSITaskManagementRequest_t.
18 * 12-18-07 02.00.02 Modified Task Management Target Reset Method defines.
19 * 02-29-08 02.00.03 Added Query Task Set and Query Unit Attention.
20 * 03-03-08 02.00.04 Fixed name of struct _MPI2_SCSI_TASK_MANAGE_REPLY.
21 * 05-21-08 02.00.05 Fixed typo in name of Mpi2SepRequest_t.
22 * 10-02-08 02.00.06 Removed Untagged and No Disconnect values from SCSI IO
23 * Control field Task Attribute flags.
24 * Moved LUN field defines to mpi2.h becasue they are
25 * common to many structures.
26 * --------------------------------------------------------------------------
27 */
28
29#ifndef MPI2_INIT_H
30#define MPI2_INIT_H
31
32/*****************************************************************************
33*
34* SCSI Initiator Messages
35*
36*****************************************************************************/
37
38/****************************************************************************
39* SCSI IO messages and associated structures
40****************************************************************************/
41
42typedef struct
43{
44 U8 CDB[20]; /* 0x00 */
45 U32 PrimaryReferenceTag; /* 0x14 */
46 U16 PrimaryApplicationTag; /* 0x18 */
47 U16 PrimaryApplicationTagMask; /* 0x1A */
48 U32 TransferLength; /* 0x1C */
49} MPI2_SCSI_IO_CDB_EEDP32, MPI2_POINTER PTR_MPI2_SCSI_IO_CDB_EEDP32,
50 Mpi2ScsiIoCdbEedp32_t, MPI2_POINTER pMpi2ScsiIoCdbEedp32_t;
51
52/* TBD: I don't think this is needed for MPI2/Gen2 */
53#if 0
54typedef struct
55{
56 U8 CDB[16]; /* 0x00 */
57 U32 DataLength; /* 0x10 */
58 U32 PrimaryReferenceTag; /* 0x14 */
59 U16 PrimaryApplicationTag; /* 0x18 */
60 U16 PrimaryApplicationTagMask; /* 0x1A */
61 U32 TransferLength; /* 0x1C */
62} MPI2_SCSI_IO32_CDB_EEDP16, MPI2_POINTER PTR_MPI2_SCSI_IO32_CDB_EEDP16,
63 Mpi2ScsiIo32CdbEedp16_t, MPI2_POINTER pMpi2ScsiIo32CdbEedp16_t;
64#endif
65
66typedef union
67{
68 U8 CDB32[32];
69 MPI2_SCSI_IO_CDB_EEDP32 EEDP32;
70 MPI2_SGE_SIMPLE_UNION SGE;
71} MPI2_SCSI_IO_CDB_UNION, MPI2_POINTER PTR_MPI2_SCSI_IO_CDB_UNION,
72 Mpi2ScsiIoCdb_t, MPI2_POINTER pMpi2ScsiIoCdb_t;
73
74/* SCSI IO Request Message */
75typedef struct _MPI2_SCSI_IO_REQUEST
76{
77 U16 DevHandle; /* 0x00 */
78 U8 ChainOffset; /* 0x02 */
79 U8 Function; /* 0x03 */
80 U16 Reserved1; /* 0x04 */
81 U8 Reserved2; /* 0x06 */
82 U8 MsgFlags; /* 0x07 */
83 U8 VP_ID; /* 0x08 */
84 U8 VF_ID; /* 0x09 */
85 U16 Reserved3; /* 0x0A */
86 U32 SenseBufferLowAddress; /* 0x0C */
87 U16 SGLFlags; /* 0x10 */
88 U8 SenseBufferLength; /* 0x12 */
89 U8 Reserved4; /* 0x13 */
90 U8 SGLOffset0; /* 0x14 */
91 U8 SGLOffset1; /* 0x15 */
92 U8 SGLOffset2; /* 0x16 */
93 U8 SGLOffset3; /* 0x17 */
94 U32 SkipCount; /* 0x18 */
95 U32 DataLength; /* 0x1C */
96 U32 BidirectionalDataLength; /* 0x20 */
97 U16 IoFlags; /* 0x24 */
98 U16 EEDPFlags; /* 0x26 */
99 U32 EEDPBlockSize; /* 0x28 */
100 U32 SecondaryReferenceTag; /* 0x2C */
101 U16 SecondaryApplicationTag; /* 0x30 */
102 U16 ApplicationTagTranslationMask; /* 0x32 */
103 U8 LUN[8]; /* 0x34 */
104 U32 Control; /* 0x3C */
105 MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */
106 MPI2_SGE_IO_UNION SGL; /* 0x60 */
107} MPI2_SCSI_IO_REQUEST, MPI2_POINTER PTR_MPI2_SCSI_IO_REQUEST,
108 Mpi2SCSIIORequest_t, MPI2_POINTER pMpi2SCSIIORequest_t;
109
110/* SCSI IO MsgFlags bits */
111
112/* MsgFlags for SenseBufferAddressSpace */
113#define MPI2_SCSIIO_MSGFLAGS_MASK_SENSE_ADDR (0x0C)
114#define MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR (0x00)
115#define MPI2_SCSIIO_MSGFLAGS_IOCDDR_SENSE_ADDR (0x04)
116#define MPI2_SCSIIO_MSGFLAGS_IOCPLB_SENSE_ADDR (0x08)
117#define MPI2_SCSIIO_MSGFLAGS_IOCPLBNTA_SENSE_ADDR (0x0C)
118
119/* SCSI IO SGLFlags bits */
120
121/* base values for Data Location Address Space */
122#define MPI2_SCSIIO_SGLFLAGS_ADDR_MASK (0x0C)
123#define MPI2_SCSIIO_SGLFLAGS_SYSTEM_ADDR (0x00)
124#define MPI2_SCSIIO_SGLFLAGS_IOCDDR_ADDR (0x04)
125#define MPI2_SCSIIO_SGLFLAGS_IOCPLB_ADDR (0x08)
126#define MPI2_SCSIIO_SGLFLAGS_IOCPLBNTA_ADDR (0x0C)
127
128/* base values for Type */
129#define MPI2_SCSIIO_SGLFLAGS_TYPE_MASK (0x03)
130#define MPI2_SCSIIO_SGLFLAGS_TYPE_MPI (0x00)
131#define MPI2_SCSIIO_SGLFLAGS_TYPE_IEEE32 (0x01)
132#define MPI2_SCSIIO_SGLFLAGS_TYPE_IEEE64 (0x02)
133
134/* shift values for each sub-field */
135#define MPI2_SCSIIO_SGLFLAGS_SGL3_SHIFT (12)
136#define MPI2_SCSIIO_SGLFLAGS_SGL2_SHIFT (8)
137#define MPI2_SCSIIO_SGLFLAGS_SGL1_SHIFT (4)
138#define MPI2_SCSIIO_SGLFLAGS_SGL0_SHIFT (0)
139
140/* SCSI IO IoFlags bits */
141
142/* Large CDB Address Space */
143#define MPI2_SCSIIO_CDB_ADDR_MASK (0x6000)
144#define MPI2_SCSIIO_CDB_ADDR_SYSTEM (0x0000)
145#define MPI2_SCSIIO_CDB_ADDR_IOCDDR (0x2000)
146#define MPI2_SCSIIO_CDB_ADDR_IOCPLB (0x4000)
147#define MPI2_SCSIIO_CDB_ADDR_IOCPLBNTA (0x6000)
148
149#define MPI2_SCSIIO_IOFLAGS_LARGE_CDB (0x1000)
150#define MPI2_SCSIIO_IOFLAGS_BIDIRECTIONAL (0x0800)
151#define MPI2_SCSIIO_IOFLAGS_MULTICAST (0x0400)
152#define MPI2_SCSIIO_IOFLAGS_CMD_DETERMINES_DATA_DIR (0x0200)
153#define MPI2_SCSIIO_IOFLAGS_CDBLENGTH_MASK (0x01FF)
154
155/* SCSI IO EEDPFlags bits */
156
157#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG (0x8000)
158#define MPI2_SCSIIO_EEDPFLAGS_INC_SEC_REFTAG (0x4000)
159#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG (0x2000)
160#define MPI2_SCSIIO_EEDPFLAGS_INC_SEC_APPTAG (0x1000)
161
162#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG (0x0400)
163#define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG (0x0200)
164#define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100)
165
166#define MPI2_SCSIIO_EEDPFLAGS_PASSTHRU_REFTAG (0x0008)
167
168#define MPI2_SCSIIO_EEDPFLAGS_MASK_OP (0x0007)
169#define MPI2_SCSIIO_EEDPFLAGS_NOOP_OP (0x0000)
170#define MPI2_SCSIIO_EEDPFLAGS_CHECK_OP (0x0001)
171#define MPI2_SCSIIO_EEDPFLAGS_STRIP_OP (0x0002)
172#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP (0x0003)
173#define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004)
174#define MPI2_SCSIIO_EEDPFLAGS_REPLACE_OP (0x0006)
175#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REGEN_OP (0x0007)
176
177/* SCSI IO LUN fields: use MPI2_LUN_ from mpi2.h */
178
179/* SCSI IO Control bits */
180#define MPI2_SCSIIO_CONTROL_ADDCDBLEN_MASK (0xFC000000)
181#define MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT (26)
182
183#define MPI2_SCSIIO_CONTROL_DATADIRECTION_MASK (0x03000000)
184#define MPI2_SCSIIO_CONTROL_NODATATRANSFER (0x00000000)
185#define MPI2_SCSIIO_CONTROL_WRITE (0x01000000)
186#define MPI2_SCSIIO_CONTROL_READ (0x02000000)
187#define MPI2_SCSIIO_CONTROL_BIDIRECTIONAL (0x03000000)
188
189#define MPI2_SCSIIO_CONTROL_TASKPRI_MASK (0x00007800)
190#define MPI2_SCSIIO_CONTROL_TASKPRI_SHIFT (11)
191
192#define MPI2_SCSIIO_CONTROL_TASKATTRIBUTE_MASK (0x00000700)
193#define MPI2_SCSIIO_CONTROL_SIMPLEQ (0x00000000)
194#define MPI2_SCSIIO_CONTROL_HEADOFQ (0x00000100)
195#define MPI2_SCSIIO_CONTROL_ORDEREDQ (0x00000200)
196#define MPI2_SCSIIO_CONTROL_ACAQ (0x00000400)
197
198#define MPI2_SCSIIO_CONTROL_TLR_MASK (0x000000C0)
199#define MPI2_SCSIIO_CONTROL_NO_TLR (0x00000000)
200#define MPI2_SCSIIO_CONTROL_TLR_ON (0x00000040)
201#define MPI2_SCSIIO_CONTROL_TLR_OFF (0x00000080)
202
203
204/* SCSI IO Error Reply Message */
205typedef struct _MPI2_SCSI_IO_REPLY
206{
207 U16 DevHandle; /* 0x00 */
208 U8 MsgLength; /* 0x02 */
209 U8 Function; /* 0x03 */
210 U16 Reserved1; /* 0x04 */
211 U8 Reserved2; /* 0x06 */
212 U8 MsgFlags; /* 0x07 */
213 U8 VP_ID; /* 0x08 */
214 U8 VF_ID; /* 0x09 */
215 U16 Reserved3; /* 0x0A */
216 U8 SCSIStatus; /* 0x0C */
217 U8 SCSIState; /* 0x0D */
218 U16 IOCStatus; /* 0x0E */
219 U32 IOCLogInfo; /* 0x10 */
220 U32 TransferCount; /* 0x14 */
221 U32 SenseCount; /* 0x18 */
222 U32 ResponseInfo; /* 0x1C */
223 U16 TaskTag; /* 0x20 */
224 U16 Reserved4; /* 0x22 */
225 U32 BidirectionalTransferCount; /* 0x24 */
226 U32 Reserved5; /* 0x28 */
227 U32 Reserved6; /* 0x2C */
228} MPI2_SCSI_IO_REPLY, MPI2_POINTER PTR_MPI2_SCSI_IO_REPLY,
229 Mpi2SCSIIOReply_t, MPI2_POINTER pMpi2SCSIIOReply_t;
230
231/* SCSI IO Reply SCSIStatus values (SAM-4 status codes) */
232
233#define MPI2_SCSI_STATUS_GOOD (0x00)
234#define MPI2_SCSI_STATUS_CHECK_CONDITION (0x02)
235#define MPI2_SCSI_STATUS_CONDITION_MET (0x04)
236#define MPI2_SCSI_STATUS_BUSY (0x08)
237#define MPI2_SCSI_STATUS_INTERMEDIATE (0x10)
238#define MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET (0x14)
239#define MPI2_SCSI_STATUS_RESERVATION_CONFLICT (0x18)
240#define MPI2_SCSI_STATUS_COMMAND_TERMINATED (0x22) /* obsolete */
241#define MPI2_SCSI_STATUS_TASK_SET_FULL (0x28)
242#define MPI2_SCSI_STATUS_ACA_ACTIVE (0x30)
243#define MPI2_SCSI_STATUS_TASK_ABORTED (0x40)
244
245/* SCSI IO Reply SCSIState flags */
246
247#define MPI2_SCSI_STATE_RESPONSE_INFO_VALID (0x10)
248#define MPI2_SCSI_STATE_TERMINATED (0x08)
249#define MPI2_SCSI_STATE_NO_SCSI_STATUS (0x04)
250#define MPI2_SCSI_STATE_AUTOSENSE_FAILED (0x02)
251#define MPI2_SCSI_STATE_AUTOSENSE_VALID (0x01)
252
253#define MPI2_SCSI_TASKTAG_UNKNOWN (0xFFFF)
254
255
256/****************************************************************************
257* SCSI Task Management messages
258****************************************************************************/
259
260/* SCSI Task Management Request Message */
261typedef struct _MPI2_SCSI_TASK_MANAGE_REQUEST
262{
263 U16 DevHandle; /* 0x00 */
264 U8 ChainOffset; /* 0x02 */
265 U8 Function; /* 0x03 */
266 U8 Reserved1; /* 0x04 */
267 U8 TaskType; /* 0x05 */
268 U8 Reserved2; /* 0x06 */
269 U8 MsgFlags; /* 0x07 */
270 U8 VP_ID; /* 0x08 */
271 U8 VF_ID; /* 0x09 */
272 U16 Reserved3; /* 0x0A */
273 U8 LUN[8]; /* 0x0C */
274 U32 Reserved4[7]; /* 0x14 */
275 U16 TaskMID; /* 0x30 */
276 U16 Reserved5; /* 0x32 */
277} MPI2_SCSI_TASK_MANAGE_REQUEST,
278 MPI2_POINTER PTR_MPI2_SCSI_TASK_MANAGE_REQUEST,
279 Mpi2SCSITaskManagementRequest_t,
280 MPI2_POINTER pMpi2SCSITaskManagementRequest_t;
281
282/* TaskType values */
283
284#define MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01)
285#define MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET (0x02)
286#define MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03)
287#define MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05)
288#define MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06)
289#define MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07)
290#define MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA (0x08)
291#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET (0x09)
292#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION (0x0A)
293
294/* MsgFlags bits */
295
296#define MPI2_SCSITASKMGMT_MSGFLAGS_MASK_TARGET_RESET (0x18)
297#define MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET (0x00)
298#define MPI2_SCSITASKMGMT_MSGFLAGS_NEXUS_RESET_SRST (0x08)
299#define MPI2_SCSITASKMGMT_MSGFLAGS_SAS_HARD_LINK_RESET (0x10)
300
301#define MPI2_SCSITASKMGMT_MSGFLAGS_DO_NOT_SEND_TASK_IU (0x01)
302
303
304
305/* SCSI Task Management Reply Message */
306typedef struct _MPI2_SCSI_TASK_MANAGE_REPLY
307{
308 U16 DevHandle; /* 0x00 */
309 U8 MsgLength; /* 0x02 */
310 U8 Function; /* 0x03 */
311 U8 ResponseCode; /* 0x04 */
312 U8 TaskType; /* 0x05 */
313 U8 Reserved1; /* 0x06 */
314 U8 MsgFlags; /* 0x07 */
315 U8 VP_ID; /* 0x08 */
316 U8 VF_ID; /* 0x09 */
317 U16 Reserved2; /* 0x0A */
318 U16 Reserved3; /* 0x0C */
319 U16 IOCStatus; /* 0x0E */
320 U32 IOCLogInfo; /* 0x10 */
321 U32 TerminationCount; /* 0x14 */
322} MPI2_SCSI_TASK_MANAGE_REPLY,
323 MPI2_POINTER PTR_MPI2_SCSI_TASK_MANAGE_REPLY,
324 Mpi2SCSITaskManagementReply_t, MPI2_POINTER pMpi2SCSIManagementReply_t;
325
326/* ResponseCode values */
327
328#define MPI2_SCSITASKMGMT_RSP_TM_COMPLETE (0x00)
329#define MPI2_SCSITASKMGMT_RSP_INVALID_FRAME (0x02)
330#define MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED (0x04)
331#define MPI2_SCSITASKMGMT_RSP_TM_FAILED (0x05)
332#define MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED (0x08)
333#define MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN (0x09)
334#define MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80)
335
336
337/****************************************************************************
338* SCSI Enclosure Processor messages
339****************************************************************************/
340
341/* SCSI Enclosure Processor Request Message */
342typedef struct _MPI2_SEP_REQUEST
343{
344 U16 DevHandle; /* 0x00 */
345 U8 ChainOffset; /* 0x02 */
346 U8 Function; /* 0x03 */
347 U8 Action; /* 0x04 */
348 U8 Flags; /* 0x05 */
349 U8 Reserved1; /* 0x06 */
350 U8 MsgFlags; /* 0x07 */
351 U8 VP_ID; /* 0x08 */
352 U8 VF_ID; /* 0x09 */
353 U16 Reserved2; /* 0x0A */
354 U32 SlotStatus; /* 0x0C */
355 U32 Reserved3; /* 0x10 */
356 U32 Reserved4; /* 0x14 */
357 U32 Reserved5; /* 0x18 */
358 U16 Slot; /* 0x1C */
359 U16 EnclosureHandle; /* 0x1E */
360} MPI2_SEP_REQUEST, MPI2_POINTER PTR_MPI2_SEP_REQUEST,
361 Mpi2SepRequest_t, MPI2_POINTER pMpi2SepRequest_t;
362
363/* Action defines */
364#define MPI2_SEP_REQ_ACTION_WRITE_STATUS (0x00)
365#define MPI2_SEP_REQ_ACTION_READ_STATUS (0x01)
366
367/* Flags defines */
368#define MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS (0x00)
369#define MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS (0x01)
370
371/* SlotStatus defines */
372#define MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE (0x00040000)
373#define MPI2_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000)
374#define MPI2_SEP_REQ_SLOTSTATUS_REBUILD_STOPPED (0x00000200)
375#define MPI2_SEP_REQ_SLOTSTATUS_HOT_SPARE (0x00000100)
376#define MPI2_SEP_REQ_SLOTSTATUS_UNCONFIGURED (0x00000080)
377#define MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT (0x00000040)
378#define MPI2_SEP_REQ_SLOTSTATUS_DEV_REBUILDING (0x00000004)
379#define MPI2_SEP_REQ_SLOTSTATUS_DEV_FAULTY (0x00000002)
380#define MPI2_SEP_REQ_SLOTSTATUS_NO_ERROR (0x00000001)
381
382
383/* SCSI Enclosure Processor Reply Message */
384typedef struct _MPI2_SEP_REPLY
385{
386 U16 DevHandle; /* 0x00 */
387 U8 MsgLength; /* 0x02 */
388 U8 Function; /* 0x03 */
389 U8 Action; /* 0x04 */
390 U8 Flags; /* 0x05 */
391 U8 Reserved1; /* 0x06 */
392 U8 MsgFlags; /* 0x07 */
393 U8 VP_ID; /* 0x08 */
394 U8 VF_ID; /* 0x09 */
395 U16 Reserved2; /* 0x0A */
396 U16 Reserved3; /* 0x0C */
397 U16 IOCStatus; /* 0x0E */
398 U32 IOCLogInfo; /* 0x10 */
399 U32 SlotStatus; /* 0x14 */
400 U32 Reserved4; /* 0x18 */
401 U16 Slot; /* 0x1C */
402 U16 EnclosureHandle; /* 0x1E */
403} MPI2_SEP_REPLY, MPI2_POINTER PTR_MPI2_SEP_REPLY,
404 Mpi2SepReply_t, MPI2_POINTER pMpi2SepReply_t;
405
406/* SlotStatus defines */
407#define MPI2_SEP_REPLY_SLOTSTATUS_REMOVE_READY (0x00040000)
408#define MPI2_SEP_REPLY_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000)
409#define MPI2_SEP_REPLY_SLOTSTATUS_REBUILD_STOPPED (0x00000200)
410#define MPI2_SEP_REPLY_SLOTSTATUS_HOT_SPARE (0x00000100)
411#define MPI2_SEP_REPLY_SLOTSTATUS_UNCONFIGURED (0x00000080)
412#define MPI2_SEP_REPLY_SLOTSTATUS_PREDICTED_FAULT (0x00000040)
413#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_REBUILDING (0x00000004)
414#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_FAULTY (0x00000002)
415#define MPI2_SEP_REPLY_SLOTSTATUS_NO_ERROR (0x00000001)
416
417
418#endif
419
420
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
new file mode 100644
index 000000000000..8c5d81870c03
--- /dev/null
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
@@ -0,0 +1,1295 @@
1/*
2 * Copyright (c) 2000-2009 LSI Corporation.
3 *
4 *
5 * Name: mpi2_ioc.h
6 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
7 * Creation Date: October 11, 2006
8 *
9 * mpi2_ioc.h Version: 02.00.10
10 *
11 * Version History
12 * ---------------
13 *
14 * Date Version Description
15 * -------- -------- ------------------------------------------------------
16 * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
17 * 06-04-07 02.00.01 In IOCFacts Reply structure, renamed MaxDevices to
18 * MaxTargets.
19 * Added TotalImageSize field to FWDownload Request.
20 * Added reserved words to FWUpload Request.
21 * 06-26-07 02.00.02 Added IR Configuration Change List Event.
22 * 08-31-07 02.00.03 Removed SystemReplyQueueDepth field from the IOCInit
23 * request and replaced it with
24 * ReplyDescriptorPostQueueDepth and ReplyFreeQueueDepth.
25 * Replaced the MinReplyQueueDepth field of the IOCFacts
26 * reply with MaxReplyDescriptorPostQueueDepth.
27 * Added MPI2_RDPQ_DEPTH_MIN define to specify the minimum
28 * depth for the Reply Descriptor Post Queue.
29 * Added SASAddress field to Initiator Device Table
30 * Overflow Event data.
31 * 10-31-07 02.00.04 Added ReasonCode MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING
32 * for SAS Initiator Device Status Change Event data.
33 * Modified Reason Code defines for SAS Topology Change
34 * List Event data, including adding a bit for PHY Vacant
35 * status, and adding a mask for the Reason Code.
36 * Added define for
37 * MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING.
38 * Added define for MPI2_EXT_IMAGE_TYPE_MEGARAID.
39 * 12-18-07 02.00.05 Added Boot Status defines for the IOCExceptions field of
40 * the IOCFacts Reply.
41 * Removed MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define.
42 * Moved MPI2_VERSION_UNION to mpi2.h.
43 * Changed MPI2_EVENT_NOTIFICATION_REQUEST to use masks
44 * instead of enables, and added SASBroadcastPrimitiveMasks
45 * field.
46 * Added Log Entry Added Event and related structure.
47 * 02-29-08 02.00.06 Added define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID.
48 * Removed define MPI2_IOCFACTS_PROTOCOL_SMP_TARGET.
49 * Added MaxVolumes and MaxPersistentEntries fields to
50 * IOCFacts reply.
51 * Added ProtocalFlags and IOCCapabilities fields to
52 * MPI2_FW_IMAGE_HEADER.
53 * Removed MPI2_PORTENABLE_FLAGS_ENABLE_SINGLE_PORT.
54 * 03-03-08 02.00.07 Fixed MPI2_FW_IMAGE_HEADER by changing Reserved26 to
55 * a U16 (from a U32).
56 * Removed extra 's' from EventMasks name.
57 * 06-27-08 02.00.08 Fixed an offset in a comment.
58 * 10-02-08 02.00.09 Removed SystemReplyFrameSize from MPI2_IOC_INIT_REQUEST.
59 * Removed CurReplyFrameSize from MPI2_IOC_FACTS_REPLY and
60 * renamed MinReplyFrameSize to ReplyFrameSize.
61 * Added MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX.
62 * Added two new RAIDOperation values for Integrated RAID
63 * Operations Status Event data.
64 * Added four new IR Configuration Change List Event data
65 * ReasonCode values.
66 * Added two new ReasonCode defines for SAS Device Status
67 * Change Event data.
68 * Added three new DiscoveryStatus bits for the SAS
69 * Discovery event data.
70 * Added Multiplexing Status Change bit to the PhyStatus
71 * field of the SAS Topology Change List event data.
72 * Removed define for MPI2_INIT_IMAGE_BOOTFLAGS_XMEMCOPY.
73 * BootFlags are now product-specific.
74 * Added defines for the indivdual signature bytes
75 * for MPI2_INIT_IMAGE_FOOTER.
76 * 01-19-09 02.00.10 Added MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY define.
77 * Added MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR
78 * define.
79 * Added MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE
80 * define.
81 * Removed MPI2_EVENT_SAS_DISC_DS_SATA_INIT_FAILURE define.
82 * --------------------------------------------------------------------------
83 */
84
85#ifndef MPI2_IOC_H
86#define MPI2_IOC_H
87
88/*****************************************************************************
89*
90* IOC Messages
91*
92*****************************************************************************/
93
94/****************************************************************************
95* IOCInit message
96****************************************************************************/
97
98/* IOCInit Request message */
99typedef struct _MPI2_IOC_INIT_REQUEST
100{
101 U8 WhoInit; /* 0x00 */
102 U8 Reserved1; /* 0x01 */
103 U8 ChainOffset; /* 0x02 */
104 U8 Function; /* 0x03 */
105 U16 Reserved2; /* 0x04 */
106 U8 Reserved3; /* 0x06 */
107 U8 MsgFlags; /* 0x07 */
108 U8 VP_ID; /* 0x08 */
109 U8 VF_ID; /* 0x09 */
110 U16 Reserved4; /* 0x0A */
111 U16 MsgVersion; /* 0x0C */
112 U16 HeaderVersion; /* 0x0E */
113 U32 Reserved5; /* 0x10 */
114 U32 Reserved6; /* 0x14 */
115 U16 Reserved7; /* 0x18 */
116 U16 SystemRequestFrameSize; /* 0x1A */
117 U16 ReplyDescriptorPostQueueDepth; /* 0x1C */
118 U16 ReplyFreeQueueDepth; /* 0x1E */
119 U32 SenseBufferAddressHigh; /* 0x20 */
120 U32 SystemReplyAddressHigh; /* 0x24 */
121 U64 SystemRequestFrameBaseAddress; /* 0x28 */
122 U64 ReplyDescriptorPostQueueAddress;/* 0x30 */
123 U64 ReplyFreeQueueAddress; /* 0x38 */
124 U64 TimeStamp; /* 0x40 */
125} MPI2_IOC_INIT_REQUEST, MPI2_POINTER PTR_MPI2_IOC_INIT_REQUEST,
126 Mpi2IOCInitRequest_t, MPI2_POINTER pMpi2IOCInitRequest_t;
127
128/* WhoInit values */
129#define MPI2_WHOINIT_NOT_INITIALIZED (0x00)
130#define MPI2_WHOINIT_SYSTEM_BIOS (0x01)
131#define MPI2_WHOINIT_ROM_BIOS (0x02)
132#define MPI2_WHOINIT_PCI_PEER (0x03)
133#define MPI2_WHOINIT_HOST_DRIVER (0x04)
134#define MPI2_WHOINIT_MANUFACTURER (0x05)
135
136/* MsgVersion */
137#define MPI2_IOCINIT_MSGVERSION_MAJOR_MASK (0xFF00)
138#define MPI2_IOCINIT_MSGVERSION_MAJOR_SHIFT (8)
139#define MPI2_IOCINIT_MSGVERSION_MINOR_MASK (0x00FF)
140#define MPI2_IOCINIT_MSGVERSION_MINOR_SHIFT (0)
141
142/* HeaderVersion */
143#define MPI2_IOCINIT_HDRVERSION_UNIT_MASK (0xFF00)
144#define MPI2_IOCINIT_HDRVERSION_UNIT_SHIFT (8)
145#define MPI2_IOCINIT_HDRVERSION_DEV_MASK (0x00FF)
146#define MPI2_IOCINIT_HDRVERSION_DEV_SHIFT (0)
147
148/* minimum depth for the Reply Descriptor Post Queue */
149#define MPI2_RDPQ_DEPTH_MIN (16)
150
151
152/* IOCInit Reply message */
153typedef struct _MPI2_IOC_INIT_REPLY
154{
155 U8 WhoInit; /* 0x00 */
156 U8 Reserved1; /* 0x01 */
157 U8 MsgLength; /* 0x02 */
158 U8 Function; /* 0x03 */
159 U16 Reserved2; /* 0x04 */
160 U8 Reserved3; /* 0x06 */
161 U8 MsgFlags; /* 0x07 */
162 U8 VP_ID; /* 0x08 */
163 U8 VF_ID; /* 0x09 */
164 U16 Reserved4; /* 0x0A */
165 U16 Reserved5; /* 0x0C */
166 U16 IOCStatus; /* 0x0E */
167 U32 IOCLogInfo; /* 0x10 */
168} MPI2_IOC_INIT_REPLY, MPI2_POINTER PTR_MPI2_IOC_INIT_REPLY,
169 Mpi2IOCInitReply_t, MPI2_POINTER pMpi2IOCInitReply_t;
170
171
172/****************************************************************************
173* IOCFacts message
174****************************************************************************/
175
176/* IOCFacts Request message */
177typedef struct _MPI2_IOC_FACTS_REQUEST
178{
179 U16 Reserved1; /* 0x00 */
180 U8 ChainOffset; /* 0x02 */
181 U8 Function; /* 0x03 */
182 U16 Reserved2; /* 0x04 */
183 U8 Reserved3; /* 0x06 */
184 U8 MsgFlags; /* 0x07 */
185 U8 VP_ID; /* 0x08 */
186 U8 VF_ID; /* 0x09 */
187 U16 Reserved4; /* 0x0A */
188} MPI2_IOC_FACTS_REQUEST, MPI2_POINTER PTR_MPI2_IOC_FACTS_REQUEST,
189 Mpi2IOCFactsRequest_t, MPI2_POINTER pMpi2IOCFactsRequest_t;
190
191
192/* IOCFacts Reply message */
193typedef struct _MPI2_IOC_FACTS_REPLY
194{
195 U16 MsgVersion; /* 0x00 */
196 U8 MsgLength; /* 0x02 */
197 U8 Function; /* 0x03 */
198 U16 HeaderVersion; /* 0x04 */
199 U8 IOCNumber; /* 0x06 */
200 U8 MsgFlags; /* 0x07 */
201 U8 VP_ID; /* 0x08 */
202 U8 VF_ID; /* 0x09 */
203 U16 Reserved1; /* 0x0A */
204 U16 IOCExceptions; /* 0x0C */
205 U16 IOCStatus; /* 0x0E */
206 U32 IOCLogInfo; /* 0x10 */
207 U8 MaxChainDepth; /* 0x14 */
208 U8 WhoInit; /* 0x15 */
209 U8 NumberOfPorts; /* 0x16 */
210 U8 Reserved2; /* 0x17 */
211 U16 RequestCredit; /* 0x18 */
212 U16 ProductID; /* 0x1A */
213 U32 IOCCapabilities; /* 0x1C */
214 MPI2_VERSION_UNION FWVersion; /* 0x20 */
215 U16 IOCRequestFrameSize; /* 0x24 */
216 U16 Reserved3; /* 0x26 */
217 U16 MaxInitiators; /* 0x28 */
218 U16 MaxTargets; /* 0x2A */
219 U16 MaxSasExpanders; /* 0x2C */
220 U16 MaxEnclosures; /* 0x2E */
221 U16 ProtocolFlags; /* 0x30 */
222 U16 HighPriorityCredit; /* 0x32 */
223 U16 MaxReplyDescriptorPostQueueDepth; /* 0x34 */
224 U8 ReplyFrameSize; /* 0x36 */
225 U8 MaxVolumes; /* 0x37 */
226 U16 MaxDevHandle; /* 0x38 */
227 U16 MaxPersistentEntries; /* 0x3A */
228 U32 Reserved4; /* 0x3C */
229} MPI2_IOC_FACTS_REPLY, MPI2_POINTER PTR_MPI2_IOC_FACTS_REPLY,
230 Mpi2IOCFactsReply_t, MPI2_POINTER pMpi2IOCFactsReply_t;
231
232/* MsgVersion */
233#define MPI2_IOCFACTS_MSGVERSION_MAJOR_MASK (0xFF00)
234#define MPI2_IOCFACTS_MSGVERSION_MAJOR_SHIFT (8)
235#define MPI2_IOCFACTS_MSGVERSION_MINOR_MASK (0x00FF)
236#define MPI2_IOCFACTS_MSGVERSION_MINOR_SHIFT (0)
237
238/* HeaderVersion */
239#define MPI2_IOCFACTS_HDRVERSION_UNIT_MASK (0xFF00)
240#define MPI2_IOCFACTS_HDRVERSION_UNIT_SHIFT (8)
241#define MPI2_IOCFACTS_HDRVERSION_DEV_MASK (0x00FF)
242#define MPI2_IOCFACTS_HDRVERSION_DEV_SHIFT (0)
243
244/* IOCExceptions */
245#define MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX (0x0100)
246
247#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_MASK (0x00E0)
248#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_GOOD (0x0000)
249#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_BACKUP (0x0020)
250#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_RESTORED (0x0040)
251#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_CORRUPT_BACKUP (0x0060)
252
253#define MPI2_IOCFACTS_EXCEPT_METADATA_UNSUPPORTED (0x0010)
254#define MPI2_IOCFACTS_EXCEPT_MANUFACT_CHECKSUM_FAIL (0x0008)
255#define MPI2_IOCFACTS_EXCEPT_FW_CHECKSUM_FAIL (0x0004)
256#define MPI2_IOCFACTS_EXCEPT_RAID_CONFIG_INVALID (0x0002)
257#define MPI2_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL (0x0001)
258
259/* defines for WhoInit field are after the IOCInit Request */
260
261/* ProductID field uses MPI2_FW_HEADER_PID_ */
262
263/* IOCCapabilities */
264#define MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY (0x00002000)
265#define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID (0x00001000)
266#define MPI2_IOCFACTS_CAPABILITY_TLR (0x00000800)
267#define MPI2_IOCFACTS_CAPABILITY_MULTICAST (0x00000100)
268#define MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET (0x00000080)
269#define MPI2_IOCFACTS_CAPABILITY_EEDP (0x00000040)
270#define MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER (0x00000010)
271#define MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER (0x00000008)
272#define MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING (0x00000004)
273
274/* ProtocolFlags */
275#define MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET (0x0001)
276#define MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR (0x0002)
277
278
279/****************************************************************************
280* PortFacts message
281****************************************************************************/
282
283/* PortFacts Request message */
284typedef struct _MPI2_PORT_FACTS_REQUEST
285{
286 U16 Reserved1; /* 0x00 */
287 U8 ChainOffset; /* 0x02 */
288 U8 Function; /* 0x03 */
289 U16 Reserved2; /* 0x04 */
290 U8 PortNumber; /* 0x06 */
291 U8 MsgFlags; /* 0x07 */
292 U8 VP_ID; /* 0x08 */
293 U8 VF_ID; /* 0x09 */
294 U16 Reserved3; /* 0x0A */
295} MPI2_PORT_FACTS_REQUEST, MPI2_POINTER PTR_MPI2_PORT_FACTS_REQUEST,
296 Mpi2PortFactsRequest_t, MPI2_POINTER pMpi2PortFactsRequest_t;
297
298/* PortFacts Reply message */
299typedef struct _MPI2_PORT_FACTS_REPLY
300{
301 U16 Reserved1; /* 0x00 */
302 U8 MsgLength; /* 0x02 */
303 U8 Function; /* 0x03 */
304 U16 Reserved2; /* 0x04 */
305 U8 PortNumber; /* 0x06 */
306 U8 MsgFlags; /* 0x07 */
307 U8 VP_ID; /* 0x08 */
308 U8 VF_ID; /* 0x09 */
309 U16 Reserved3; /* 0x0A */
310 U16 Reserved4; /* 0x0C */
311 U16 IOCStatus; /* 0x0E */
312 U32 IOCLogInfo; /* 0x10 */
313 U8 Reserved5; /* 0x14 */
314 U8 PortType; /* 0x15 */
315 U16 Reserved6; /* 0x16 */
316 U16 MaxPostedCmdBuffers; /* 0x18 */
317 U16 Reserved7; /* 0x1A */
318} MPI2_PORT_FACTS_REPLY, MPI2_POINTER PTR_MPI2_PORT_FACTS_REPLY,
319 Mpi2PortFactsReply_t, MPI2_POINTER pMpi2PortFactsReply_t;
320
321/* PortType values */
322#define MPI2_PORTFACTS_PORTTYPE_INACTIVE (0x00)
323#define MPI2_PORTFACTS_PORTTYPE_FC (0x10)
324#define MPI2_PORTFACTS_PORTTYPE_ISCSI (0x20)
325#define MPI2_PORTFACTS_PORTTYPE_SAS_PHYSICAL (0x30)
326#define MPI2_PORTFACTS_PORTTYPE_SAS_VIRTUAL (0x31)
327
328
329/****************************************************************************
330* PortEnable message
331****************************************************************************/
332
333/* PortEnable Request message */
334typedef struct _MPI2_PORT_ENABLE_REQUEST
335{
336 U16 Reserved1; /* 0x00 */
337 U8 ChainOffset; /* 0x02 */
338 U8 Function; /* 0x03 */
339 U8 Reserved2; /* 0x04 */
340 U8 PortFlags; /* 0x05 */
341 U8 Reserved3; /* 0x06 */
342 U8 MsgFlags; /* 0x07 */
343 U8 VP_ID; /* 0x08 */
344 U8 VF_ID; /* 0x09 */
345 U16 Reserved4; /* 0x0A */
346} MPI2_PORT_ENABLE_REQUEST, MPI2_POINTER PTR_MPI2_PORT_ENABLE_REQUEST,
347 Mpi2PortEnableRequest_t, MPI2_POINTER pMpi2PortEnableRequest_t;
348
349
350/* PortEnable Reply message */
351typedef struct _MPI2_PORT_ENABLE_REPLY
352{
353 U16 Reserved1; /* 0x00 */
354 U8 MsgLength; /* 0x02 */
355 U8 Function; /* 0x03 */
356 U8 Reserved2; /* 0x04 */
357 U8 PortFlags; /* 0x05 */
358 U8 Reserved3; /* 0x06 */
359 U8 MsgFlags; /* 0x07 */
360 U8 VP_ID; /* 0x08 */
361 U8 VF_ID; /* 0x09 */
362 U16 Reserved4; /* 0x0A */
363 U16 Reserved5; /* 0x0C */
364 U16 IOCStatus; /* 0x0E */
365 U32 IOCLogInfo; /* 0x10 */
366} MPI2_PORT_ENABLE_REPLY, MPI2_POINTER PTR_MPI2_PORT_ENABLE_REPLY,
367 Mpi2PortEnableReply_t, MPI2_POINTER pMpi2PortEnableReply_t;
368
369
370/****************************************************************************
371* EventNotification message
372****************************************************************************/
373
374/* EventNotification Request message */
375#define MPI2_EVENT_NOTIFY_EVENTMASK_WORDS (4)
376
377typedef struct _MPI2_EVENT_NOTIFICATION_REQUEST
378{
379 U16 Reserved1; /* 0x00 */
380 U8 ChainOffset; /* 0x02 */
381 U8 Function; /* 0x03 */
382 U16 Reserved2; /* 0x04 */
383 U8 Reserved3; /* 0x06 */
384 U8 MsgFlags; /* 0x07 */
385 U8 VP_ID; /* 0x08 */
386 U8 VF_ID; /* 0x09 */
387 U16 Reserved4; /* 0x0A */
388 U32 Reserved5; /* 0x0C */
389 U32 Reserved6; /* 0x10 */
390 U32 EventMasks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];/* 0x14 */
391 U16 SASBroadcastPrimitiveMasks; /* 0x24 */
392 U16 Reserved7; /* 0x26 */
393 U32 Reserved8; /* 0x28 */
394} MPI2_EVENT_NOTIFICATION_REQUEST,
395 MPI2_POINTER PTR_MPI2_EVENT_NOTIFICATION_REQUEST,
396 Mpi2EventNotificationRequest_t, MPI2_POINTER pMpi2EventNotificationRequest_t;
397
398
399/* EventNotification Reply message */
400typedef struct _MPI2_EVENT_NOTIFICATION_REPLY
401{
402 U16 EventDataLength; /* 0x00 */
403 U8 MsgLength; /* 0x02 */
404 U8 Function; /* 0x03 */
405 U16 Reserved1; /* 0x04 */
406 U8 AckRequired; /* 0x06 */
407 U8 MsgFlags; /* 0x07 */
408 U8 VP_ID; /* 0x08 */
409 U8 VF_ID; /* 0x09 */
410 U16 Reserved2; /* 0x0A */
411 U16 Reserved3; /* 0x0C */
412 U16 IOCStatus; /* 0x0E */
413 U32 IOCLogInfo; /* 0x10 */
414 U16 Event; /* 0x14 */
415 U16 Reserved4; /* 0x16 */
416 U32 EventContext; /* 0x18 */
417 U32 EventData[1]; /* 0x1C */
418} MPI2_EVENT_NOTIFICATION_REPLY, MPI2_POINTER PTR_MPI2_EVENT_NOTIFICATION_REPLY,
419 Mpi2EventNotificationReply_t, MPI2_POINTER pMpi2EventNotificationReply_t;
420
421/* AckRequired */
422#define MPI2_EVENT_NOTIFICATION_ACK_NOT_REQUIRED (0x00)
423#define MPI2_EVENT_NOTIFICATION_ACK_REQUIRED (0x01)
424
425/* Event */
426#define MPI2_EVENT_LOG_DATA (0x0001)
427#define MPI2_EVENT_STATE_CHANGE (0x0002)
428#define MPI2_EVENT_HARD_RESET_RECEIVED (0x0005)
429#define MPI2_EVENT_EVENT_CHANGE (0x000A)
430#define MPI2_EVENT_TASK_SET_FULL (0x000E)
431#define MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE (0x000F)
432#define MPI2_EVENT_IR_OPERATION_STATUS (0x0014)
433#define MPI2_EVENT_SAS_DISCOVERY (0x0016)
434#define MPI2_EVENT_SAS_BROADCAST_PRIMITIVE (0x0017)
435#define MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE (0x0018)
436#define MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW (0x0019)
437#define MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST (0x001C)
438#define MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE (0x001D)
439#define MPI2_EVENT_IR_VOLUME (0x001E)
440#define MPI2_EVENT_IR_PHYSICAL_DISK (0x001F)
441#define MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST (0x0020)
442#define MPI2_EVENT_LOG_ENTRY_ADDED (0x0021)
443
444
445/* Log Entry Added Event data */
446
447/* the following structure matches MPI2_LOG_0_ENTRY in mpi2_cnfg.h */
448#define MPI2_EVENT_DATA_LOG_DATA_LENGTH (0x1C)
449
450typedef struct _MPI2_EVENT_DATA_LOG_ENTRY_ADDED
451{
452 U64 TimeStamp; /* 0x00 */
453 U32 Reserved1; /* 0x08 */
454 U16 LogSequence; /* 0x0C */
455 U16 LogEntryQualifier; /* 0x0E */
456 U8 VP_ID; /* 0x10 */
457 U8 VF_ID; /* 0x11 */
458 U16 Reserved2; /* 0x12 */
459 U8 LogData[MPI2_EVENT_DATA_LOG_DATA_LENGTH];/* 0x14 */
460} MPI2_EVENT_DATA_LOG_ENTRY_ADDED,
461 MPI2_POINTER PTR_MPI2_EVENT_DATA_LOG_ENTRY_ADDED,
462 Mpi2EventDataLogEntryAdded_t, MPI2_POINTER pMpi2EventDataLogEntryAdded_t;
463
464/* Hard Reset Received Event data */
465
466typedef struct _MPI2_EVENT_DATA_HARD_RESET_RECEIVED
467{
468 U8 Reserved1; /* 0x00 */
469 U8 Port; /* 0x01 */
470 U16 Reserved2; /* 0x02 */
471} MPI2_EVENT_DATA_HARD_RESET_RECEIVED,
472 MPI2_POINTER PTR_MPI2_EVENT_DATA_HARD_RESET_RECEIVED,
473 Mpi2EventDataHardResetReceived_t,
474 MPI2_POINTER pMpi2EventDataHardResetReceived_t;
475
476/* Task Set Full Event data */
477
478typedef struct _MPI2_EVENT_DATA_TASK_SET_FULL
479{
480 U16 DevHandle; /* 0x00 */
481 U16 CurrentDepth; /* 0x02 */
482} MPI2_EVENT_DATA_TASK_SET_FULL, MPI2_POINTER PTR_MPI2_EVENT_DATA_TASK_SET_FULL,
483 Mpi2EventDataTaskSetFull_t, MPI2_POINTER pMpi2EventDataTaskSetFull_t;
484
485
486/* SAS Device Status Change Event data */
487
488typedef struct _MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE
489{
490 U16 TaskTag; /* 0x00 */
491 U8 ReasonCode; /* 0x02 */
492 U8 Reserved1; /* 0x03 */
493 U8 ASC; /* 0x04 */
494 U8 ASCQ; /* 0x05 */
495 U16 DevHandle; /* 0x06 */
496 U32 Reserved2; /* 0x08 */
497 U64 SASAddress; /* 0x0C */
498 U8 LUN[8]; /* 0x14 */
499} MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE,
500 MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE,
501 Mpi2EventDataSasDeviceStatusChange_t,
502 MPI2_POINTER pMpi2EventDataSasDeviceStatusChange_t;
503
504/* SAS Device Status Change Event data ReasonCode values */
505#define MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA (0x05)
506#define MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED (0x07)
507#define MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET (0x08)
508#define MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL (0x09)
509#define MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL (0x0A)
510#define MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL (0x0B)
511#define MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL (0x0C)
512#define MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION (0x0D)
513#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET (0x0E)
514#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL (0x0F)
515#define MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE (0x10)
516
517
518/* Integrated RAID Operation Status Event data */
519
520typedef struct _MPI2_EVENT_DATA_IR_OPERATION_STATUS
521{
522 U16 VolDevHandle; /* 0x00 */
523 U16 Reserved1; /* 0x02 */
524 U8 RAIDOperation; /* 0x04 */
525 U8 PercentComplete; /* 0x05 */
526 U16 Reserved2; /* 0x06 */
527 U32 Resereved3; /* 0x08 */
528} MPI2_EVENT_DATA_IR_OPERATION_STATUS,
529 MPI2_POINTER PTR_MPI2_EVENT_DATA_IR_OPERATION_STATUS,
530 Mpi2EventDataIrOperationStatus_t,
531 MPI2_POINTER pMpi2EventDataIrOperationStatus_t;
532
533/* Integrated RAID Operation Status Event data RAIDOperation values */
534#define MPI2_EVENT_IR_RAIDOP_RESYNC (0x00)
535#define MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION (0x01)
536#define MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK (0x02)
537#define MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT (0x03)
538#define MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT (0x04)
539
540
541/* Integrated RAID Volume Event data */
542
543typedef struct _MPI2_EVENT_DATA_IR_VOLUME
544{
545 U16 VolDevHandle; /* 0x00 */
546 U8 ReasonCode; /* 0x02 */
547 U8 Reserved1; /* 0x03 */
548 U32 NewValue; /* 0x04 */
549 U32 PreviousValue; /* 0x08 */
550} MPI2_EVENT_DATA_IR_VOLUME, MPI2_POINTER PTR_MPI2_EVENT_DATA_IR_VOLUME,
551 Mpi2EventDataIrVolume_t, MPI2_POINTER pMpi2EventDataIrVolume_t;
552
553/* Integrated RAID Volume Event data ReasonCode values */
554#define MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED (0x01)
555#define MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED (0x02)
556#define MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED (0x03)
557
558
559/* Integrated RAID Physical Disk Event data */
560
561typedef struct _MPI2_EVENT_DATA_IR_PHYSICAL_DISK
562{
563 U16 Reserved1; /* 0x00 */
564 U8 ReasonCode; /* 0x02 */
565 U8 PhysDiskNum; /* 0x03 */
566 U16 PhysDiskDevHandle; /* 0x04 */
567 U16 Reserved2; /* 0x06 */
568 U16 Slot; /* 0x08 */
569 U16 EnclosureHandle; /* 0x0A */
570 U32 NewValue; /* 0x0C */
571 U32 PreviousValue; /* 0x10 */
572} MPI2_EVENT_DATA_IR_PHYSICAL_DISK,
573 MPI2_POINTER PTR_MPI2_EVENT_DATA_IR_PHYSICAL_DISK,
574 Mpi2EventDataIrPhysicalDisk_t, MPI2_POINTER pMpi2EventDataIrPhysicalDisk_t;
575
576/* Integrated RAID Physical Disk Event data ReasonCode values */
577#define MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED (0x01)
578#define MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED (0x02)
579#define MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED (0x03)
580
581
582/* Integrated RAID Configuration Change List Event data */
583
584/*
585 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
586 * one and check NumElements at runtime.
587 */
588#ifndef MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT
589#define MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT (1)
590#endif
591
592typedef struct _MPI2_EVENT_IR_CONFIG_ELEMENT
593{
594 U16 ElementFlags; /* 0x00 */
595 U16 VolDevHandle; /* 0x02 */
596 U8 ReasonCode; /* 0x04 */
597 U8 PhysDiskNum; /* 0x05 */
598 U16 PhysDiskDevHandle; /* 0x06 */
599} MPI2_EVENT_IR_CONFIG_ELEMENT, MPI2_POINTER PTR_MPI2_EVENT_IR_CONFIG_ELEMENT,
600 Mpi2EventIrConfigElement_t, MPI2_POINTER pMpi2EventIrConfigElement_t;
601
602/* IR Configuration Change List Event data ElementFlags values */
603#define MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK (0x000F)
604#define MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT (0x0000)
605#define MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT (0x0001)
606#define MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT (0x0002)
607
608/* IR Configuration Change List Event data ReasonCode values */
609#define MPI2_EVENT_IR_CHANGE_RC_ADDED (0x01)
610#define MPI2_EVENT_IR_CHANGE_RC_REMOVED (0x02)
611#define MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE (0x03)
612#define MPI2_EVENT_IR_CHANGE_RC_HIDE (0x04)
613#define MPI2_EVENT_IR_CHANGE_RC_UNHIDE (0x05)
614#define MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED (0x06)
615#define MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED (0x07)
616#define MPI2_EVENT_IR_CHANGE_RC_PD_CREATED (0x08)
617#define MPI2_EVENT_IR_CHANGE_RC_PD_DELETED (0x09)
618
619typedef struct _MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST
620{
621 U8 NumElements; /* 0x00 */
622 U8 Reserved1; /* 0x01 */
623 U8 Reserved2; /* 0x02 */
624 U8 ConfigNum; /* 0x03 */
625 U32 Flags; /* 0x04 */
626 MPI2_EVENT_IR_CONFIG_ELEMENT ConfigElement[MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT]; /* 0x08 */
627} MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST,
628 MPI2_POINTER PTR_MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST,
629 Mpi2EventDataIrConfigChangeList_t,
630 MPI2_POINTER pMpi2EventDataIrConfigChangeList_t;
631
632/* IR Configuration Change List Event data Flags values */
633#define MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG (0x00000001)
634
635
636/* SAS Discovery Event data */
637
638typedef struct _MPI2_EVENT_DATA_SAS_DISCOVERY
639{
640 U8 Flags; /* 0x00 */
641 U8 ReasonCode; /* 0x01 */
642 U8 PhysicalPort; /* 0x02 */
643 U8 Reserved1; /* 0x03 */
644 U32 DiscoveryStatus; /* 0x04 */
645} MPI2_EVENT_DATA_SAS_DISCOVERY,
646 MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_DISCOVERY,
647 Mpi2EventDataSasDiscovery_t, MPI2_POINTER pMpi2EventDataSasDiscovery_t;
648
649/* SAS Discovery Event data Flags values */
650#define MPI2_EVENT_SAS_DISC_DEVICE_CHANGE (0x02)
651#define MPI2_EVENT_SAS_DISC_IN_PROGRESS (0x01)
652
653/* SAS Discovery Event data ReasonCode values */
654#define MPI2_EVENT_SAS_DISC_RC_STARTED (0x01)
655#define MPI2_EVENT_SAS_DISC_RC_COMPLETED (0x02)
656
657/* SAS Discovery Event data DiscoveryStatus values */
658#define MPI2_EVENT_SAS_DISC_DS_MAX_ENCLOSURES_EXCEED (0x80000000)
659#define MPI2_EVENT_SAS_DISC_DS_MAX_EXPANDERS_EXCEED (0x40000000)
660#define MPI2_EVENT_SAS_DISC_DS_MAX_DEVICES_EXCEED (0x20000000)
661#define MPI2_EVENT_SAS_DISC_DS_MAX_TOPO_PHYS_EXCEED (0x10000000)
662#define MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR (0x08000000)
663#define MPI2_EVENT_SAS_DISC_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000)
664#define MPI2_EVENT_SAS_DISC_DS_EXP_MULTI_SUBTRACTIVE (0x00004000)
665#define MPI2_EVENT_SAS_DISC_DS_MULTI_PORT_DOMAIN (0x00002000)
666#define MPI2_EVENT_SAS_DISC_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000)
667#define MPI2_EVENT_SAS_DISC_DS_UNSUPPORTED_DEVICE (0x00000800)
668#define MPI2_EVENT_SAS_DISC_DS_TABLE_LINK (0x00000400)
669#define MPI2_EVENT_SAS_DISC_DS_SUBTRACTIVE_LINK (0x00000200)
670#define MPI2_EVENT_SAS_DISC_DS_SMP_CRC_ERROR (0x00000100)
671#define MPI2_EVENT_SAS_DISC_DS_SMP_FUNCTION_FAILED (0x00000080)
672#define MPI2_EVENT_SAS_DISC_DS_INDEX_NOT_EXIST (0x00000040)
673#define MPI2_EVENT_SAS_DISC_DS_OUT_ROUTE_ENTRIES (0x00000020)
674#define MPI2_EVENT_SAS_DISC_DS_SMP_TIMEOUT (0x00000010)
675#define MPI2_EVENT_SAS_DISC_DS_MULTIPLE_PORTS (0x00000004)
676#define MPI2_EVENT_SAS_DISC_DS_UNADDRESSABLE_DEVICE (0x00000002)
677#define MPI2_EVENT_SAS_DISC_DS_LOOP_DETECTED (0x00000001)
678
679
680/* SAS Broadcast Primitive Event data */
681
682typedef struct _MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE
683{
684 U8 PhyNum; /* 0x00 */
685 U8 Port; /* 0x01 */
686 U8 PortWidth; /* 0x02 */
687 U8 Primitive; /* 0x03 */
688} MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE,
689 MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE,
690 Mpi2EventDataSasBroadcastPrimitive_t,
691 MPI2_POINTER pMpi2EventDataSasBroadcastPrimitive_t;
692
693/* defines for the Primitive field */
694#define MPI2_EVENT_PRIMITIVE_CHANGE (0x01)
695#define MPI2_EVENT_PRIMITIVE_SES (0x02)
696#define MPI2_EVENT_PRIMITIVE_EXPANDER (0x03)
697#define MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT (0x04)
698#define MPI2_EVENT_PRIMITIVE_RESERVED3 (0x05)
699#define MPI2_EVENT_PRIMITIVE_RESERVED4 (0x06)
700#define MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED (0x07)
701#define MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED (0x08)
702
703
704/* SAS Initiator Device Status Change Event data */
705
706typedef struct _MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE
707{
708 U8 ReasonCode; /* 0x00 */
709 U8 PhysicalPort; /* 0x01 */
710 U16 DevHandle; /* 0x02 */
711 U64 SASAddress; /* 0x04 */
712} MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE,
713 MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE,
714 Mpi2EventDataSasInitDevStatusChange_t,
715 MPI2_POINTER pMpi2EventDataSasInitDevStatusChange_t;
716
717/* SAS Initiator Device Status Change event ReasonCode values */
718#define MPI2_EVENT_SAS_INIT_RC_ADDED (0x01)
719#define MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING (0x02)
720
721
722/* SAS Initiator Device Table Overflow Event data */
723
724typedef struct _MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW
725{
726 U16 MaxInit; /* 0x00 */
727 U16 CurrentInit; /* 0x02 */
728 U64 SASAddress; /* 0x04 */
729} MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW,
730 MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW,
731 Mpi2EventDataSasInitTableOverflow_t,
732 MPI2_POINTER pMpi2EventDataSasInitTableOverflow_t;
733
734
735/* SAS Topology Change List Event data */
736
737/*
738 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
739 * one and check NumEntries at runtime.
740 */
741#ifndef MPI2_EVENT_SAS_TOPO_PHY_COUNT
742#define MPI2_EVENT_SAS_TOPO_PHY_COUNT (1)
743#endif
744
745typedef struct _MPI2_EVENT_SAS_TOPO_PHY_ENTRY
746{
747 U16 AttachedDevHandle; /* 0x00 */
748 U8 LinkRate; /* 0x02 */
749 U8 PhyStatus; /* 0x03 */
750} MPI2_EVENT_SAS_TOPO_PHY_ENTRY, MPI2_POINTER PTR_MPI2_EVENT_SAS_TOPO_PHY_ENTRY,
751 Mpi2EventSasTopoPhyEntry_t, MPI2_POINTER pMpi2EventSasTopoPhyEntry_t;
752
753typedef struct _MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST
754{
755 U16 EnclosureHandle; /* 0x00 */
756 U16 ExpanderDevHandle; /* 0x02 */
757 U8 NumPhys; /* 0x04 */
758 U8 Reserved1; /* 0x05 */
759 U16 Reserved2; /* 0x06 */
760 U8 NumEntries; /* 0x08 */
761 U8 StartPhyNum; /* 0x09 */
762 U8 ExpStatus; /* 0x0A */
763 U8 PhysicalPort; /* 0x0B */
764 MPI2_EVENT_SAS_TOPO_PHY_ENTRY PHY[MPI2_EVENT_SAS_TOPO_PHY_COUNT]; /* 0x0C*/
765} MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST,
766 MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST,
767 Mpi2EventDataSasTopologyChangeList_t,
768 MPI2_POINTER pMpi2EventDataSasTopologyChangeList_t;
769
770/* values for the ExpStatus field */
771#define MPI2_EVENT_SAS_TOPO_ES_ADDED (0x01)
772#define MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING (0x02)
773#define MPI2_EVENT_SAS_TOPO_ES_RESPONDING (0x03)
774#define MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING (0x04)
775
776/* defines for the LinkRate field */
777#define MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK (0xF0)
778#define MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT (4)
779#define MPI2_EVENT_SAS_TOPO_LR_PREV_MASK (0x0F)
780#define MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT (0)
781
782#define MPI2_EVENT_SAS_TOPO_LR_UNKNOWN_LINK_RATE (0x00)
783#define MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED (0x01)
784#define MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED (0x02)
785#define MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE (0x03)
786#define MPI2_EVENT_SAS_TOPO_LR_PORT_SELECTOR (0x04)
787#define MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS (0x05)
788#define MPI2_EVENT_SAS_TOPO_LR_RATE_1_5 (0x08)
789#define MPI2_EVENT_SAS_TOPO_LR_RATE_3_0 (0x09)
790#define MPI2_EVENT_SAS_TOPO_LR_RATE_6_0 (0x0A)
791
792/* values for the PhyStatus field */
793#define MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT (0x80)
794#define MPI2_EVENT_SAS_TOPO_PS_MULTIPLEX_CHANGE (0x10)
795/* values for the PhyStatus ReasonCode sub-field */
796#define MPI2_EVENT_SAS_TOPO_RC_MASK (0x0F)
797#define MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED (0x01)
798#define MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING (0x02)
799#define MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED (0x03)
800#define MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE (0x04)
801#define MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING (0x05)
802
803
804/* SAS Enclosure Device Status Change Event data */
805
806typedef struct _MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE
807{
808 U16 EnclosureHandle; /* 0x00 */
809 U8 ReasonCode; /* 0x02 */
810 U8 PhysicalPort; /* 0x03 */
811 U64 EnclosureLogicalID; /* 0x04 */
812 U16 NumSlots; /* 0x0C */
813 U16 StartSlot; /* 0x0E */
814 U32 PhyBits; /* 0x10 */
815} MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE,
816 MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE,
817 Mpi2EventDataSasEnclDevStatusChange_t,
818 MPI2_POINTER pMpi2EventDataSasEnclDevStatusChange_t;
819
820/* SAS Enclosure Device Status Change event ReasonCode values */
821#define MPI2_EVENT_SAS_ENCL_RC_ADDED (0x01)
822#define MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING (0x02)
823
824
825/****************************************************************************
826* EventAck message
827****************************************************************************/
828
829/* EventAck Request message */
830typedef struct _MPI2_EVENT_ACK_REQUEST
831{
832 U16 Reserved1; /* 0x00 */
833 U8 ChainOffset; /* 0x02 */
834 U8 Function; /* 0x03 */
835 U16 Reserved2; /* 0x04 */
836 U8 Reserved3; /* 0x06 */
837 U8 MsgFlags; /* 0x07 */
838 U8 VP_ID; /* 0x08 */
839 U8 VF_ID; /* 0x09 */
840 U16 Reserved4; /* 0x0A */
841 U16 Event; /* 0x0C */
842 U16 Reserved5; /* 0x0E */
843 U32 EventContext; /* 0x10 */
844} MPI2_EVENT_ACK_REQUEST, MPI2_POINTER PTR_MPI2_EVENT_ACK_REQUEST,
845 Mpi2EventAckRequest_t, MPI2_POINTER pMpi2EventAckRequest_t;
846
847
848/* EventAck Reply message */
849typedef struct _MPI2_EVENT_ACK_REPLY
850{
851 U16 Reserved1; /* 0x00 */
852 U8 MsgLength; /* 0x02 */
853 U8 Function; /* 0x03 */
854 U16 Reserved2; /* 0x04 */
855 U8 Reserved3; /* 0x06 */
856 U8 MsgFlags; /* 0x07 */
857 U8 VP_ID; /* 0x08 */
858 U8 VF_ID; /* 0x09 */
859 U16 Reserved4; /* 0x0A */
860 U16 Reserved5; /* 0x0C */
861 U16 IOCStatus; /* 0x0E */
862 U32 IOCLogInfo; /* 0x10 */
863} MPI2_EVENT_ACK_REPLY, MPI2_POINTER PTR_MPI2_EVENT_ACK_REPLY,
864 Mpi2EventAckReply_t, MPI2_POINTER pMpi2EventAckReply_t;
865
866
867/****************************************************************************
868* FWDownload message
869****************************************************************************/
870
871/* FWDownload Request message */
872typedef struct _MPI2_FW_DOWNLOAD_REQUEST
873{
874 U8 ImageType; /* 0x00 */
875 U8 Reserved1; /* 0x01 */
876 U8 ChainOffset; /* 0x02 */
877 U8 Function; /* 0x03 */
878 U16 Reserved2; /* 0x04 */
879 U8 Reserved3; /* 0x06 */
880 U8 MsgFlags; /* 0x07 */
881 U8 VP_ID; /* 0x08 */
882 U8 VF_ID; /* 0x09 */
883 U16 Reserved4; /* 0x0A */
884 U32 TotalImageSize; /* 0x0C */
885 U32 Reserved5; /* 0x10 */
886 MPI2_MPI_SGE_UNION SGL; /* 0x14 */
887} MPI2_FW_DOWNLOAD_REQUEST, MPI2_POINTER PTR_MPI2_FW_DOWNLOAD_REQUEST,
888 Mpi2FWDownloadRequest, MPI2_POINTER pMpi2FWDownloadRequest;
889
890#define MPI2_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT (0x01)
891
892#define MPI2_FW_DOWNLOAD_ITYPE_FW (0x01)
893#define MPI2_FW_DOWNLOAD_ITYPE_BIOS (0x02)
894#define MPI2_FW_DOWNLOAD_ITYPE_MANUFACTURING (0x06)
895#define MPI2_FW_DOWNLOAD_ITYPE_CONFIG_1 (0x07)
896#define MPI2_FW_DOWNLOAD_ITYPE_CONFIG_2 (0x08)
897#define MPI2_FW_DOWNLOAD_ITYPE_MEGARAID (0x09)
898#define MPI2_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
899
900/* FWDownload TransactionContext Element */
901typedef struct _MPI2_FW_DOWNLOAD_TCSGE
902{
903 U8 Reserved1; /* 0x00 */
904 U8 ContextSize; /* 0x01 */
905 U8 DetailsLength; /* 0x02 */
906 U8 Flags; /* 0x03 */
907 U32 Reserved2; /* 0x04 */
908 U32 ImageOffset; /* 0x08 */
909 U32 ImageSize; /* 0x0C */
910} MPI2_FW_DOWNLOAD_TCSGE, MPI2_POINTER PTR_MPI2_FW_DOWNLOAD_TCSGE,
911 Mpi2FWDownloadTCSGE_t, MPI2_POINTER pMpi2FWDownloadTCSGE_t;
912
913/* FWDownload Reply message */
914typedef struct _MPI2_FW_DOWNLOAD_REPLY
915{
916 U8 ImageType; /* 0x00 */
917 U8 Reserved1; /* 0x01 */
918 U8 MsgLength; /* 0x02 */
919 U8 Function; /* 0x03 */
920 U16 Reserved2; /* 0x04 */
921 U8 Reserved3; /* 0x06 */
922 U8 MsgFlags; /* 0x07 */
923 U8 VP_ID; /* 0x08 */
924 U8 VF_ID; /* 0x09 */
925 U16 Reserved4; /* 0x0A */
926 U16 Reserved5; /* 0x0C */
927 U16 IOCStatus; /* 0x0E */
928 U32 IOCLogInfo; /* 0x10 */
929} MPI2_FW_DOWNLOAD_REPLY, MPI2_POINTER PTR_MPI2_FW_DOWNLOAD_REPLY,
930 Mpi2FWDownloadReply_t, MPI2_POINTER pMpi2FWDownloadReply_t;
931
932
933/****************************************************************************
934* FWUpload message
935****************************************************************************/
936
937/* FWUpload Request message */
938typedef struct _MPI2_FW_UPLOAD_REQUEST
939{
940 U8 ImageType; /* 0x00 */
941 U8 Reserved1; /* 0x01 */
942 U8 ChainOffset; /* 0x02 */
943 U8 Function; /* 0x03 */
944 U16 Reserved2; /* 0x04 */
945 U8 Reserved3; /* 0x06 */
946 U8 MsgFlags; /* 0x07 */
947 U8 VP_ID; /* 0x08 */
948 U8 VF_ID; /* 0x09 */
949 U16 Reserved4; /* 0x0A */
950 U32 Reserved5; /* 0x0C */
951 U32 Reserved6; /* 0x10 */
952 MPI2_MPI_SGE_UNION SGL; /* 0x14 */
953} MPI2_FW_UPLOAD_REQUEST, MPI2_POINTER PTR_MPI2_FW_UPLOAD_REQUEST,
954 Mpi2FWUploadRequest_t, MPI2_POINTER pMpi2FWUploadRequest_t;
955
956#define MPI2_FW_UPLOAD_ITYPE_FW_CURRENT (0x00)
957#define MPI2_FW_UPLOAD_ITYPE_FW_FLASH (0x01)
958#define MPI2_FW_UPLOAD_ITYPE_BIOS_FLASH (0x02)
959#define MPI2_FW_UPLOAD_ITYPE_FW_BACKUP (0x05)
960#define MPI2_FW_UPLOAD_ITYPE_MANUFACTURING (0x06)
961#define MPI2_FW_UPLOAD_ITYPE_CONFIG_1 (0x07)
962#define MPI2_FW_UPLOAD_ITYPE_CONFIG_2 (0x08)
963#define MPI2_FW_UPLOAD_ITYPE_MEGARAID (0x09)
964#define MPI2_FW_UPLOAD_ITYPE_COMPLETE (0x0A)
965#define MPI2_FW_UPLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
966
967typedef struct _MPI2_FW_UPLOAD_TCSGE
968{
969 U8 Reserved1; /* 0x00 */
970 U8 ContextSize; /* 0x01 */
971 U8 DetailsLength; /* 0x02 */
972 U8 Flags; /* 0x03 */
973 U32 Reserved2; /* 0x04 */
974 U32 ImageOffset; /* 0x08 */
975 U32 ImageSize; /* 0x0C */
976} MPI2_FW_UPLOAD_TCSGE, MPI2_POINTER PTR_MPI2_FW_UPLOAD_TCSGE,
977 Mpi2FWUploadTCSGE_t, MPI2_POINTER pMpi2FWUploadTCSGE_t;
978
979/* FWUpload Reply message */
980typedef struct _MPI2_FW_UPLOAD_REPLY
981{
982 U8 ImageType; /* 0x00 */
983 U8 Reserved1; /* 0x01 */
984 U8 MsgLength; /* 0x02 */
985 U8 Function; /* 0x03 */
986 U16 Reserved2; /* 0x04 */
987 U8 Reserved3; /* 0x06 */
988 U8 MsgFlags; /* 0x07 */
989 U8 VP_ID; /* 0x08 */
990 U8 VF_ID; /* 0x09 */
991 U16 Reserved4; /* 0x0A */
992 U16 Reserved5; /* 0x0C */
993 U16 IOCStatus; /* 0x0E */
994 U32 IOCLogInfo; /* 0x10 */
995 U32 ActualImageSize; /* 0x14 */
996} MPI2_FW_UPLOAD_REPLY, MPI2_POINTER PTR_MPI2_FW_UPLOAD_REPLY,
997 Mpi2FWUploadReply_t, MPI2_POINTER pMPi2FWUploadReply_t;
998
999
1000/* FW Image Header */
1001typedef struct _MPI2_FW_IMAGE_HEADER
1002{
1003 U32 Signature; /* 0x00 */
1004 U32 Signature0; /* 0x04 */
1005 U32 Signature1; /* 0x08 */
1006 U32 Signature2; /* 0x0C */
1007 MPI2_VERSION_UNION MPIVersion; /* 0x10 */
1008 MPI2_VERSION_UNION FWVersion; /* 0x14 */
1009 MPI2_VERSION_UNION NVDATAVersion; /* 0x18 */
1010 MPI2_VERSION_UNION PackageVersion; /* 0x1C */
1011 U16 VendorID; /* 0x20 */
1012 U16 ProductID; /* 0x22 */
1013 U16 ProtocolFlags; /* 0x24 */
1014 U16 Reserved26; /* 0x26 */
1015 U32 IOCCapabilities; /* 0x28 */
1016 U32 ImageSize; /* 0x2C */
1017 U32 NextImageHeaderOffset; /* 0x30 */
1018 U32 Checksum; /* 0x34 */
1019 U32 Reserved38; /* 0x38 */
1020 U32 Reserved3C; /* 0x3C */
1021 U32 Reserved40; /* 0x40 */
1022 U32 Reserved44; /* 0x44 */
1023 U32 Reserved48; /* 0x48 */
1024 U32 Reserved4C; /* 0x4C */
1025 U32 Reserved50; /* 0x50 */
1026 U32 Reserved54; /* 0x54 */
1027 U32 Reserved58; /* 0x58 */
1028 U32 Reserved5C; /* 0x5C */
1029 U32 Reserved60; /* 0x60 */
1030 U32 FirmwareVersionNameWhat; /* 0x64 */
1031 U8 FirmwareVersionName[32]; /* 0x68 */
1032 U32 VendorNameWhat; /* 0x88 */
1033 U8 VendorName[32]; /* 0x8C */
1034 U32 PackageNameWhat; /* 0x88 */
1035 U8 PackageName[32]; /* 0x8C */
1036 U32 ReservedD0; /* 0xD0 */
1037 U32 ReservedD4; /* 0xD4 */
1038 U32 ReservedD8; /* 0xD8 */
1039 U32 ReservedDC; /* 0xDC */
1040 U32 ReservedE0; /* 0xE0 */
1041 U32 ReservedE4; /* 0xE4 */
1042 U32 ReservedE8; /* 0xE8 */
1043 U32 ReservedEC; /* 0xEC */
1044 U32 ReservedF0; /* 0xF0 */
1045 U32 ReservedF4; /* 0xF4 */
1046 U32 ReservedF8; /* 0xF8 */
1047 U32 ReservedFC; /* 0xFC */
1048} MPI2_FW_IMAGE_HEADER, MPI2_POINTER PTR_MPI2_FW_IMAGE_HEADER,
1049 Mpi2FWImageHeader_t, MPI2_POINTER pMpi2FWImageHeader_t;
1050
1051/* Signature field */
1052#define MPI2_FW_HEADER_SIGNATURE_OFFSET (0x00)
1053#define MPI2_FW_HEADER_SIGNATURE_MASK (0xFF000000)
1054#define MPI2_FW_HEADER_SIGNATURE (0xEA000000)
1055
1056/* Signature0 field */
1057#define MPI2_FW_HEADER_SIGNATURE0_OFFSET (0x04)
1058#define MPI2_FW_HEADER_SIGNATURE0 (0x5AFAA55A)
1059
1060/* Signature1 field */
1061#define MPI2_FW_HEADER_SIGNATURE1_OFFSET (0x08)
1062#define MPI2_FW_HEADER_SIGNATURE1 (0xA55AFAA5)
1063
1064/* Signature2 field */
1065#define MPI2_FW_HEADER_SIGNATURE2_OFFSET (0x0C)
1066#define MPI2_FW_HEADER_SIGNATURE2 (0x5AA55AFA)
1067
1068
1069/* defines for using the ProductID field */
1070#define MPI2_FW_HEADER_PID_TYPE_MASK (0xF000)
1071#define MPI2_FW_HEADER_PID_TYPE_SAS (0x2000)
1072
1073#define MPI2_FW_HEADER_PID_PROD_MASK (0x0F00)
1074#define MPI2_FW_HEADER_PID_PROD_A (0x0000)
1075
1076#define MPI2_FW_HEADER_PID_FAMILY_MASK (0x00FF)
1077/* SAS */
1078#define MPI2_FW_HEADER_PID_FAMILY_2108_SAS (0x0010)
1079
1080/* use MPI2_IOCFACTS_PROTOCOL_ defines for ProtocolFlags field */
1081
1082/* use MPI2_IOCFACTS_CAPABILITY_ defines for IOCCapabilities field */
1083
1084
1085#define MPI2_FW_HEADER_IMAGESIZE_OFFSET (0x2C)
1086#define MPI2_FW_HEADER_NEXTIMAGE_OFFSET (0x30)
1087#define MPI2_FW_HEADER_VERNMHWAT_OFFSET (0x64)
1088
1089#define MPI2_FW_HEADER_WHAT_SIGNATURE (0x29232840)
1090
1091#define MPI2_FW_HEADER_SIZE (0x100)
1092
1093
1094/* Extended Image Header */
1095typedef struct _MPI2_EXT_IMAGE_HEADER
1096
1097{
1098 U8 ImageType; /* 0x00 */
1099 U8 Reserved1; /* 0x01 */
1100 U16 Reserved2; /* 0x02 */
1101 U32 Checksum; /* 0x04 */
1102 U32 ImageSize; /* 0x08 */
1103 U32 NextImageHeaderOffset; /* 0x0C */
1104 U32 PackageVersion; /* 0x10 */
1105 U32 Reserved3; /* 0x14 */
1106 U32 Reserved4; /* 0x18 */
1107 U32 Reserved5; /* 0x1C */
1108 U8 IdentifyString[32]; /* 0x20 */
1109} MPI2_EXT_IMAGE_HEADER, MPI2_POINTER PTR_MPI2_EXT_IMAGE_HEADER,
1110 Mpi2ExtImageHeader_t, MPI2_POINTER pMpi2ExtImageHeader_t;
1111
1112/* useful offsets */
1113#define MPI2_EXT_IMAGE_IMAGETYPE_OFFSET (0x00)
1114#define MPI2_EXT_IMAGE_IMAGESIZE_OFFSET (0x08)
1115#define MPI2_EXT_IMAGE_NEXTIMAGE_OFFSET (0x0C)
1116
1117#define MPI2_EXT_IMAGE_HEADER_SIZE (0x40)
1118
1119/* defines for the ImageType field */
1120#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED (0x00)
1121#define MPI2_EXT_IMAGE_TYPE_FW (0x01)
1122#define MPI2_EXT_IMAGE_TYPE_NVDATA (0x03)
1123#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER (0x04)
1124#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION (0x05)
1125#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT (0x06)
1126#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07)
1127#define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08)
1128
1129#define MPI2_EXT_IMAGE_TYPE_MAX (MPI2_EXT_IMAGE_TYPE_MEGARAID)
1130
1131
1132
1133/* FLASH Layout Extended Image Data */
1134
1135/*
1136 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
1137 * one and check RegionsPerLayout at runtime.
1138 */
1139#ifndef MPI2_FLASH_NUMBER_OF_REGIONS
1140#define MPI2_FLASH_NUMBER_OF_REGIONS (1)
1141#endif
1142
1143/*
1144 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
1145 * one and check NumberOfLayouts at runtime.
1146 */
1147#ifndef MPI2_FLASH_NUMBER_OF_LAYOUTS
1148#define MPI2_FLASH_NUMBER_OF_LAYOUTS (1)
1149#endif
1150
1151typedef struct _MPI2_FLASH_REGION
1152{
1153 U8 RegionType; /* 0x00 */
1154 U8 Reserved1; /* 0x01 */
1155 U16 Reserved2; /* 0x02 */
1156 U32 RegionOffset; /* 0x04 */
1157 U32 RegionSize; /* 0x08 */
1158 U32 Reserved3; /* 0x0C */
1159} MPI2_FLASH_REGION, MPI2_POINTER PTR_MPI2_FLASH_REGION,
1160 Mpi2FlashRegion_t, MPI2_POINTER pMpi2FlashRegion_t;
1161
1162typedef struct _MPI2_FLASH_LAYOUT
1163{
1164 U32 FlashSize; /* 0x00 */
1165 U32 Reserved1; /* 0x04 */
1166 U32 Reserved2; /* 0x08 */
1167 U32 Reserved3; /* 0x0C */
1168 MPI2_FLASH_REGION Region[MPI2_FLASH_NUMBER_OF_REGIONS];/* 0x10 */
1169} MPI2_FLASH_LAYOUT, MPI2_POINTER PTR_MPI2_FLASH_LAYOUT,
1170 Mpi2FlashLayout_t, MPI2_POINTER pMpi2FlashLayout_t;
1171
1172typedef struct _MPI2_FLASH_LAYOUT_DATA
1173{
1174 U8 ImageRevision; /* 0x00 */
1175 U8 Reserved1; /* 0x01 */
1176 U8 SizeOfRegion; /* 0x02 */
1177 U8 Reserved2; /* 0x03 */
1178 U16 NumberOfLayouts; /* 0x04 */
1179 U16 RegionsPerLayout; /* 0x06 */
1180 U16 MinimumSectorAlignment; /* 0x08 */
1181 U16 Reserved3; /* 0x0A */
1182 U32 Reserved4; /* 0x0C */
1183 MPI2_FLASH_LAYOUT Layout[MPI2_FLASH_NUMBER_OF_LAYOUTS];/* 0x10 */
1184} MPI2_FLASH_LAYOUT_DATA, MPI2_POINTER PTR_MPI2_FLASH_LAYOUT_DATA,
1185 Mpi2FlashLayoutData_t, MPI2_POINTER pMpi2FlashLayoutData_t;
1186
1187/* defines for the RegionType field */
1188#define MPI2_FLASH_REGION_UNUSED (0x00)
1189#define MPI2_FLASH_REGION_FIRMWARE (0x01)
1190#define MPI2_FLASH_REGION_BIOS (0x02)
1191#define MPI2_FLASH_REGION_NVDATA (0x03)
1192#define MPI2_FLASH_REGION_FIRMWARE_BACKUP (0x05)
1193#define MPI2_FLASH_REGION_MFG_INFORMATION (0x06)
1194#define MPI2_FLASH_REGION_CONFIG_1 (0x07)
1195#define MPI2_FLASH_REGION_CONFIG_2 (0x08)
1196#define MPI2_FLASH_REGION_MEGARAID (0x09)
1197#define MPI2_FLASH_REGION_INIT (0x0A)
1198
1199/* ImageRevision */
1200#define MPI2_FLASH_LAYOUT_IMAGE_REVISION (0x00)
1201
1202
1203
1204/* Supported Devices Extended Image Data */
1205
1206/*
1207 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
1208 * one and check NumberOfDevices at runtime.
1209 */
1210#ifndef MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES
1211#define MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES (1)
1212#endif
1213
1214typedef struct _MPI2_SUPPORTED_DEVICE
1215{
1216 U16 DeviceID; /* 0x00 */
1217 U16 VendorID; /* 0x02 */
1218 U16 DeviceIDMask; /* 0x04 */
1219 U16 Reserved1; /* 0x06 */
1220 U8 LowPCIRev; /* 0x08 */
1221 U8 HighPCIRev; /* 0x09 */
1222 U16 Reserved2; /* 0x0A */
1223 U32 Reserved3; /* 0x0C */
1224} MPI2_SUPPORTED_DEVICE, MPI2_POINTER PTR_MPI2_SUPPORTED_DEVICE,
1225 Mpi2SupportedDevice_t, MPI2_POINTER pMpi2SupportedDevice_t;
1226
1227typedef struct _MPI2_SUPPORTED_DEVICES_DATA
1228{
1229 U8 ImageRevision; /* 0x00 */
1230 U8 Reserved1; /* 0x01 */
1231 U8 NumberOfDevices; /* 0x02 */
1232 U8 Reserved2; /* 0x03 */
1233 U32 Reserved3; /* 0x04 */
1234 MPI2_SUPPORTED_DEVICE SupportedDevice[MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES]; /* 0x08 */
1235} MPI2_SUPPORTED_DEVICES_DATA, MPI2_POINTER PTR_MPI2_SUPPORTED_DEVICES_DATA,
1236 Mpi2SupportedDevicesData_t, MPI2_POINTER pMpi2SupportedDevicesData_t;
1237
1238/* ImageRevision */
1239#define MPI2_SUPPORTED_DEVICES_IMAGE_REVISION (0x00)
1240
1241
1242/* Init Extended Image Data */
1243
1244typedef struct _MPI2_INIT_IMAGE_FOOTER
1245
1246{
1247 U32 BootFlags; /* 0x00 */
1248 U32 ImageSize; /* 0x04 */
1249 U32 Signature0; /* 0x08 */
1250 U32 Signature1; /* 0x0C */
1251 U32 Signature2; /* 0x10 */
1252 U32 ResetVector; /* 0x14 */
1253} MPI2_INIT_IMAGE_FOOTER, MPI2_POINTER PTR_MPI2_INIT_IMAGE_FOOTER,
1254 Mpi2InitImageFooter_t, MPI2_POINTER pMpi2InitImageFooter_t;
1255
1256/* defines for the BootFlags field */
1257#define MPI2_INIT_IMAGE_BOOTFLAGS_OFFSET (0x00)
1258
1259/* defines for the ImageSize field */
1260#define MPI2_INIT_IMAGE_IMAGESIZE_OFFSET (0x04)
1261
1262/* defines for the Signature0 field */
1263#define MPI2_INIT_IMAGE_SIGNATURE0_OFFSET (0x08)
1264#define MPI2_INIT_IMAGE_SIGNATURE0 (0x5AA55AEA)
1265
1266/* defines for the Signature1 field */
1267#define MPI2_INIT_IMAGE_SIGNATURE1_OFFSET (0x0C)
1268#define MPI2_INIT_IMAGE_SIGNATURE1 (0xA55AEAA5)
1269
1270/* defines for the Signature2 field */
1271#define MPI2_INIT_IMAGE_SIGNATURE2_OFFSET (0x10)
1272#define MPI2_INIT_IMAGE_SIGNATURE2 (0x5AEAA55A)
1273
1274/* Signature fields as individual bytes */
1275#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_0 (0xEA)
1276#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_1 (0x5A)
1277#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_2 (0xA5)
1278#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_3 (0x5A)
1279
1280#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_4 (0xA5)
1281#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_5 (0xEA)
1282#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_6 (0x5A)
1283#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_7 (0xA5)
1284
1285#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_8 (0x5A)
1286#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_9 (0xA5)
1287#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_A (0xEA)
1288#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_B (0x5A)
1289
1290/* defines for the ResetVector field */
1291#define MPI2_INIT_IMAGE_RESETVECTOR_OFFSET (0x14)
1292
1293
1294#endif
1295
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
new file mode 100644
index 000000000000..7134816d9046
--- /dev/null
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
@@ -0,0 +1,295 @@
1/*
2 * Copyright (c) 2000-2008 LSI Corporation.
3 *
4 *
5 * Name: mpi2_raid.h
6 * Title: MPI Integrated RAID messages and structures
7 * Creation Date: April 26, 2007
8 *
9 * mpi2_raid.h Version: 02.00.03
10 *
11 * Version History
12 * ---------------
13 *
14 * Date Version Description
15 * -------- -------- ------------------------------------------------------
16 * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
17 * 08-31-07 02.00.01 Modifications to RAID Action request and reply,
18 * including the Actions and ActionData.
19 * 02-29-08 02.00.02 Added MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD.
20 * 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that
21 * the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT
22 * can be sized by the build environment.
23 * --------------------------------------------------------------------------
24 */
25
26#ifndef MPI2_RAID_H
27#define MPI2_RAID_H
28
29/*****************************************************************************
30*
31* Integrated RAID Messages
32*
33*****************************************************************************/
34
35/****************************************************************************
36* RAID Action messages
37****************************************************************************/
38
39/* ActionDataWord defines for use with MPI2_RAID_ACTION_DELETE_VOLUME action */
40#define MPI2_RAID_ACTION_ADATA_KEEP_LBA0 (0x00000000)
41#define MPI2_RAID_ACTION_ADATA_ZERO_LBA0 (0x00000001)
42
43/* use MPI2_RAIDVOL0_SETTING_ defines from mpi2_cnfg.h for MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE action */
44
45/* ActionDataWord defines for use with MPI2_RAID_ACTION_DISABLE_ALL_VOLUMES action */
46#define MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD (0x00000001)
47
48/* ActionDataWord for MPI2_RAID_ACTION_SET_RAID_FUNCTION_RATE Action */
49typedef struct _MPI2_RAID_ACTION_RATE_DATA
50{
51 U8 RateToChange; /* 0x00 */
52 U8 RateOrMode; /* 0x01 */
53 U16 DataScrubDuration; /* 0x02 */
54} MPI2_RAID_ACTION_RATE_DATA, MPI2_POINTER PTR_MPI2_RAID_ACTION_RATE_DATA,
55 Mpi2RaidActionRateData_t, MPI2_POINTER pMpi2RaidActionRateData_t;
56
57#define MPI2_RAID_ACTION_SET_RATE_RESYNC (0x00)
58#define MPI2_RAID_ACTION_SET_RATE_DATA_SCRUB (0x01)
59#define MPI2_RAID_ACTION_SET_RATE_POWERSAVE_MODE (0x02)
60
61/* ActionDataWord for MPI2_RAID_ACTION_START_RAID_FUNCTION Action */
62typedef struct _MPI2_RAID_ACTION_START_RAID_FUNCTION
63{
64 U8 RAIDFunction; /* 0x00 */
65 U8 Flags; /* 0x01 */
66 U16 Reserved1; /* 0x02 */
67} MPI2_RAID_ACTION_START_RAID_FUNCTION,
68 MPI2_POINTER PTR_MPI2_RAID_ACTION_START_RAID_FUNCTION,
69 Mpi2RaidActionStartRaidFunction_t,
70 MPI2_POINTER pMpi2RaidActionStartRaidFunction_t;
71
72/* defines for the RAIDFunction field */
73#define MPI2_RAID_ACTION_START_BACKGROUND_INIT (0x00)
74#define MPI2_RAID_ACTION_START_ONLINE_CAP_EXPANSION (0x01)
75#define MPI2_RAID_ACTION_START_CONSISTENCY_CHECK (0x02)
76
77/* defines for the Flags field */
78#define MPI2_RAID_ACTION_START_NEW (0x00)
79#define MPI2_RAID_ACTION_START_RESUME (0x01)
80
81/* ActionDataWord for MPI2_RAID_ACTION_STOP_RAID_FUNCTION Action */
82typedef struct _MPI2_RAID_ACTION_STOP_RAID_FUNCTION
83{
84 U8 RAIDFunction; /* 0x00 */
85 U8 Flags; /* 0x01 */
86 U16 Reserved1; /* 0x02 */
87} MPI2_RAID_ACTION_STOP_RAID_FUNCTION,
88 MPI2_POINTER PTR_MPI2_RAID_ACTION_STOP_RAID_FUNCTION,
89 Mpi2RaidActionStopRaidFunction_t,
90 MPI2_POINTER pMpi2RaidActionStopRaidFunction_t;
91
92/* defines for the RAIDFunction field */
93#define MPI2_RAID_ACTION_STOP_BACKGROUND_INIT (0x00)
94#define MPI2_RAID_ACTION_STOP_ONLINE_CAP_EXPANSION (0x01)
95#define MPI2_RAID_ACTION_STOP_CONSISTENCY_CHECK (0x02)
96
97/* defines for the Flags field */
98#define MPI2_RAID_ACTION_STOP_ABORT (0x00)
99#define MPI2_RAID_ACTION_STOP_PAUSE (0x01)
100
101/* ActionDataWord for MPI2_RAID_ACTION_CREATE_HOT_SPARE Action */
102typedef struct _MPI2_RAID_ACTION_HOT_SPARE
103{
104 U8 HotSparePool; /* 0x00 */
105 U8 Reserved1; /* 0x01 */
106 U16 DevHandle; /* 0x02 */
107} MPI2_RAID_ACTION_HOT_SPARE, MPI2_POINTER PTR_MPI2_RAID_ACTION_HOT_SPARE,
108 Mpi2RaidActionHotSpare_t, MPI2_POINTER pMpi2RaidActionHotSpare_t;
109
110/* ActionDataWord for MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE Action */
111typedef struct _MPI2_RAID_ACTION_FW_UPDATE_MODE
112{
113 U8 Flags; /* 0x00 */
114 U8 DeviceFirmwareUpdateModeTimeout; /* 0x01 */
115 U16 Reserved1; /* 0x02 */
116} MPI2_RAID_ACTION_FW_UPDATE_MODE,
117 MPI2_POINTER PTR_MPI2_RAID_ACTION_FW_UPDATE_MODE,
118 Mpi2RaidActionFwUpdateMode_t, MPI2_POINTER pMpi2RaidActionFwUpdateMode_t;
119
120/* ActionDataWord defines for use with MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE action */
121#define MPI2_RAID_ACTION_ADATA_DISABLE_FW_UPDATE (0x00)
122#define MPI2_RAID_ACTION_ADATA_ENABLE_FW_UPDATE (0x01)
123
124typedef union _MPI2_RAID_ACTION_DATA
125{
126 U32 Word;
127 MPI2_RAID_ACTION_RATE_DATA Rates;
128 MPI2_RAID_ACTION_START_RAID_FUNCTION StartRaidFunction;
129 MPI2_RAID_ACTION_STOP_RAID_FUNCTION StopRaidFunction;
130 MPI2_RAID_ACTION_HOT_SPARE HotSpare;
131 MPI2_RAID_ACTION_FW_UPDATE_MODE FwUpdateMode;
132} MPI2_RAID_ACTION_DATA, MPI2_POINTER PTR_MPI2_RAID_ACTION_DATA,
133 Mpi2RaidActionData_t, MPI2_POINTER pMpi2RaidActionData_t;
134
135
136/* RAID Action Request Message */
137typedef struct _MPI2_RAID_ACTION_REQUEST
138{
139 U8 Action; /* 0x00 */
140 U8 Reserved1; /* 0x01 */
141 U8 ChainOffset; /* 0x02 */
142 U8 Function; /* 0x03 */
143 U16 VolDevHandle; /* 0x04 */
144 U8 PhysDiskNum; /* 0x06 */
145 U8 MsgFlags; /* 0x07 */
146 U8 VP_ID; /* 0x08 */
147 U8 VF_ID; /* 0x09 */
148 U16 Reserved2; /* 0x0A */
149 U32 Reserved3; /* 0x0C */
150 MPI2_RAID_ACTION_DATA ActionDataWord; /* 0x10 */
151 MPI2_SGE_SIMPLE_UNION ActionDataSGE; /* 0x14 */
152} MPI2_RAID_ACTION_REQUEST, MPI2_POINTER PTR_MPI2_RAID_ACTION_REQUEST,
153 Mpi2RaidActionRequest_t, MPI2_POINTER pMpi2RaidActionRequest_t;
154
155/* RAID Action request Action values */
156
157#define MPI2_RAID_ACTION_INDICATOR_STRUCT (0x01)
158#define MPI2_RAID_ACTION_CREATE_VOLUME (0x02)
159#define MPI2_RAID_ACTION_DELETE_VOLUME (0x03)
160#define MPI2_RAID_ACTION_DISABLE_ALL_VOLUMES (0x04)
161#define MPI2_RAID_ACTION_ENABLE_ALL_VOLUMES (0x05)
162#define MPI2_RAID_ACTION_PHYSDISK_OFFLINE (0x0A)
163#define MPI2_RAID_ACTION_PHYSDISK_ONLINE (0x0B)
164#define MPI2_RAID_ACTION_FAIL_PHYSDISK (0x0F)
165#define MPI2_RAID_ACTION_ACTIVATE_VOLUME (0x11)
166#define MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE (0x15)
167#define MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE (0x17)
168#define MPI2_RAID_ACTION_SET_VOLUME_NAME (0x18)
169#define MPI2_RAID_ACTION_SET_RAID_FUNCTION_RATE (0x19)
170#define MPI2_RAID_ACTION_ENABLE_FAILED_VOLUME (0x1C)
171#define MPI2_RAID_ACTION_CREATE_HOT_SPARE (0x1D)
172#define MPI2_RAID_ACTION_DELETE_HOT_SPARE (0x1E)
173#define MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED (0x20)
174#define MPI2_RAID_ACTION_START_RAID_FUNCTION (0x21)
175#define MPI2_RAID_ACTION_STOP_RAID_FUNCTION (0x22)
176
177
178/* RAID Volume Creation Structure */
179
180/*
181 * The following define can be customized for the targeted product.
182 */
183#ifndef MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS
184#define MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS (1)
185#endif
186
187typedef struct _MPI2_RAID_VOLUME_PHYSDISK
188{
189 U8 RAIDSetNum; /* 0x00 */
190 U8 PhysDiskMap; /* 0x01 */
191 U16 PhysDiskDevHandle; /* 0x02 */
192} MPI2_RAID_VOLUME_PHYSDISK, MPI2_POINTER PTR_MPI2_RAID_VOLUME_PHYSDISK,
193 Mpi2RaidVolumePhysDisk_t, MPI2_POINTER pMpi2RaidVolumePhysDisk_t;
194
195/* defines for the PhysDiskMap field */
196#define MPI2_RAIDACTION_PHYSDISK_PRIMARY (0x01)
197#define MPI2_RAIDACTION_PHYSDISK_SECONDARY (0x02)
198
199typedef struct _MPI2_RAID_VOLUME_CREATION_STRUCT
200{
201 U8 NumPhysDisks; /* 0x00 */
202 U8 VolumeType; /* 0x01 */
203 U16 Reserved1; /* 0x02 */
204 U32 VolumeCreationFlags; /* 0x04 */
205 U32 VolumeSettings; /* 0x08 */
206 U8 Reserved2; /* 0x0C */
207 U8 ResyncRate; /* 0x0D */
208 U16 DataScrubDuration; /* 0x0E */
209 U64 VolumeMaxLBA; /* 0x10 */
210 U32 StripeSize; /* 0x18 */
211 U8 Name[16]; /* 0x1C */
212 MPI2_RAID_VOLUME_PHYSDISK PhysDisk[MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS];/* 0x2C */
213} MPI2_RAID_VOLUME_CREATION_STRUCT,
214 MPI2_POINTER PTR_MPI2_RAID_VOLUME_CREATION_STRUCT,
215 Mpi2RaidVolumeCreationStruct_t, MPI2_POINTER pMpi2RaidVolumeCreationStruct_t;
216
217/* use MPI2_RAID_VOL_TYPE_ defines from mpi2_cnfg.h for VolumeType */
218
219/* defines for the VolumeCreationFlags field */
220#define MPI2_RAID_VOL_CREATION_USE_DEFAULT_SETTINGS (0x80)
221#define MPI2_RAID_VOL_CREATION_BACKGROUND_INIT (0x04)
222#define MPI2_RAID_VOL_CREATION_LOW_LEVEL_INIT (0x02)
223#define MPI2_RAID_VOL_CREATION_MIGRATE_DATA (0x01)
224
225
226/* RAID Online Capacity Expansion Structure */
227
228typedef struct _MPI2_RAID_ONLINE_CAPACITY_EXPANSION
229{
230 U32 Flags; /* 0x00 */
231 U16 DevHandle0; /* 0x04 */
232 U16 Reserved1; /* 0x06 */
233 U16 DevHandle1; /* 0x08 */
234 U16 Reserved2; /* 0x0A */
235} MPI2_RAID_ONLINE_CAPACITY_EXPANSION,
236 MPI2_POINTER PTR_MPI2_RAID_ONLINE_CAPACITY_EXPANSION,
237 Mpi2RaidOnlineCapacityExpansion_t,
238 MPI2_POINTER pMpi2RaidOnlineCapacityExpansion_t;
239
240
241/* RAID Volume Indicator Structure */
242
243typedef struct _MPI2_RAID_VOL_INDICATOR
244{
245 U64 TotalBlocks; /* 0x00 */
246 U64 BlocksRemaining; /* 0x08 */
247 U32 Flags; /* 0x10 */
248} MPI2_RAID_VOL_INDICATOR, MPI2_POINTER PTR_MPI2_RAID_VOL_INDICATOR,
249 Mpi2RaidVolIndicator_t, MPI2_POINTER pMpi2RaidVolIndicator_t;
250
251/* defines for RAID Volume Indicator Flags field */
252#define MPI2_RAID_VOL_FLAGS_OP_MASK (0x0000000F)
253#define MPI2_RAID_VOL_FLAGS_OP_BACKGROUND_INIT (0x00000000)
254#define MPI2_RAID_VOL_FLAGS_OP_ONLINE_CAP_EXPANSION (0x00000001)
255#define MPI2_RAID_VOL_FLAGS_OP_CONSISTENCY_CHECK (0x00000002)
256#define MPI2_RAID_VOL_FLAGS_OP_RESYNC (0x00000003)
257
258
259/* RAID Action Reply ActionData union */
260typedef union _MPI2_RAID_ACTION_REPLY_DATA
261{
262 U32 Word[5];
263 MPI2_RAID_VOL_INDICATOR RaidVolumeIndicator;
264 U16 VolDevHandle;
265 U8 VolumeState;
266 U8 PhysDiskNum;
267} MPI2_RAID_ACTION_REPLY_DATA, MPI2_POINTER PTR_MPI2_RAID_ACTION_REPLY_DATA,
268 Mpi2RaidActionReplyData_t, MPI2_POINTER pMpi2RaidActionReplyData_t;
269
270/* use MPI2_RAIDVOL0_SETTING_ defines from mpi2_cnfg.h for MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE action */
271
272
273/* RAID Action Reply Message */
274typedef struct _MPI2_RAID_ACTION_REPLY
275{
276 U8 Action; /* 0x00 */
277 U8 Reserved1; /* 0x01 */
278 U8 MsgLength; /* 0x02 */
279 U8 Function; /* 0x03 */
280 U16 VolDevHandle; /* 0x04 */
281 U8 PhysDiskNum; /* 0x06 */
282 U8 MsgFlags; /* 0x07 */
283 U8 VP_ID; /* 0x08 */
284 U8 VF_ID; /* 0x09 */
285 U16 Reserved2; /* 0x0A */
286 U16 Reserved3; /* 0x0C */
287 U16 IOCStatus; /* 0x0E */
288 U32 IOCLogInfo; /* 0x10 */
289 MPI2_RAID_ACTION_REPLY_DATA ActionData; /* 0x14 */
290} MPI2_RAID_ACTION_REPLY, MPI2_POINTER PTR_MPI2_RAID_ACTION_REPLY,
291 Mpi2RaidActionReply_t, MPI2_POINTER pMpi2RaidActionReply_t;
292
293
294#endif
295
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
new file mode 100644
index 000000000000..8a42b136cf53
--- /dev/null
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
@@ -0,0 +1,282 @@
1/*
2 * Copyright (c) 2000-2007 LSI Corporation.
3 *
4 *
5 * Name: mpi2_sas.h
6 * Title: MPI Serial Attached SCSI structures and definitions
7 * Creation Date: February 9, 2007
8 *
9 * mpi2.h Version: 02.00.02
10 *
11 * Version History
12 * ---------------
13 *
14 * Date Version Description
15 * -------- -------- ------------------------------------------------------
16 * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
17 * 06-26-07 02.00.01 Added Clear All Persistent Operation to SAS IO Unit
18 * Control Request.
19 * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control
20 * Request.
21 * --------------------------------------------------------------------------
22 */
23
24#ifndef MPI2_SAS_H
25#define MPI2_SAS_H
26
27/*
28 * Values for SASStatus.
29 */
30#define MPI2_SASSTATUS_SUCCESS (0x00)
31#define MPI2_SASSTATUS_UNKNOWN_ERROR (0x01)
32#define MPI2_SASSTATUS_INVALID_FRAME (0x02)
33#define MPI2_SASSTATUS_UTC_BAD_DEST (0x03)
34#define MPI2_SASSTATUS_UTC_BREAK_RECEIVED (0x04)
35#define MPI2_SASSTATUS_UTC_CONNECT_RATE_NOT_SUPPORTED (0x05)
36#define MPI2_SASSTATUS_UTC_PORT_LAYER_REQUEST (0x06)
37#define MPI2_SASSTATUS_UTC_PROTOCOL_NOT_SUPPORTED (0x07)
38#define MPI2_SASSTATUS_UTC_STP_RESOURCES_BUSY (0x08)
39#define MPI2_SASSTATUS_UTC_WRONG_DESTINATION (0x09)
40#define MPI2_SASSTATUS_SHORT_INFORMATION_UNIT (0x0A)
41#define MPI2_SASSTATUS_LONG_INFORMATION_UNIT (0x0B)
42#define MPI2_SASSTATUS_XFER_RDY_INCORRECT_WRITE_DATA (0x0C)
43#define MPI2_SASSTATUS_XFER_RDY_REQUEST_OFFSET_ERROR (0x0D)
44#define MPI2_SASSTATUS_XFER_RDY_NOT_EXPECTED (0x0E)
45#define MPI2_SASSTATUS_DATA_INCORRECT_DATA_LENGTH (0x0F)
46#define MPI2_SASSTATUS_DATA_TOO_MUCH_READ_DATA (0x10)
47#define MPI2_SASSTATUS_DATA_OFFSET_ERROR (0x11)
48#define MPI2_SASSTATUS_SDSF_NAK_RECEIVED (0x12)
49#define MPI2_SASSTATUS_SDSF_CONNECTION_FAILED (0x13)
50#define MPI2_SASSTATUS_INITIATOR_RESPONSE_TIMEOUT (0x14)
51
52
53/*
54 * Values for the SAS DeviceInfo field used in SAS Device Status Change Event
55 * data and SAS Configuration pages.
56 */
57#define MPI2_SAS_DEVICE_INFO_SEP (0x00004000)
58#define MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE (0x00002000)
59#define MPI2_SAS_DEVICE_INFO_LSI_DEVICE (0x00001000)
60#define MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH (0x00000800)
61#define MPI2_SAS_DEVICE_INFO_SSP_TARGET (0x00000400)
62#define MPI2_SAS_DEVICE_INFO_STP_TARGET (0x00000200)
63#define MPI2_SAS_DEVICE_INFO_SMP_TARGET (0x00000100)
64#define MPI2_SAS_DEVICE_INFO_SATA_DEVICE (0x00000080)
65#define MPI2_SAS_DEVICE_INFO_SSP_INITIATOR (0x00000040)
66#define MPI2_SAS_DEVICE_INFO_STP_INITIATOR (0x00000020)
67#define MPI2_SAS_DEVICE_INFO_SMP_INITIATOR (0x00000010)
68#define MPI2_SAS_DEVICE_INFO_SATA_HOST (0x00000008)
69
70#define MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE (0x00000007)
71#define MPI2_SAS_DEVICE_INFO_NO_DEVICE (0x00000000)
72#define MPI2_SAS_DEVICE_INFO_END_DEVICE (0x00000001)
73#define MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER (0x00000002)
74#define MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER (0x00000003)
75
76
77/*****************************************************************************
78*
79* SAS Messages
80*
81*****************************************************************************/
82
83/****************************************************************************
84* SMP Passthrough messages
85****************************************************************************/
86
87/* SMP Passthrough Request Message */
88typedef struct _MPI2_SMP_PASSTHROUGH_REQUEST
89{
90 U8 PassthroughFlags; /* 0x00 */
91 U8 PhysicalPort; /* 0x01 */
92 U8 ChainOffset; /* 0x02 */
93 U8 Function; /* 0x03 */
94 U16 RequestDataLength; /* 0x04 */
95 U8 SGLFlags; /* 0x06 */
96 U8 MsgFlags; /* 0x07 */
97 U8 VP_ID; /* 0x08 */
98 U8 VF_ID; /* 0x09 */
99 U16 Reserved1; /* 0x0A */
100 U32 Reserved2; /* 0x0C */
101 U64 SASAddress; /* 0x10 */
102 U32 Reserved3; /* 0x18 */
103 U32 Reserved4; /* 0x1C */
104 MPI2_SIMPLE_SGE_UNION SGL; /* 0x20 */
105} MPI2_SMP_PASSTHROUGH_REQUEST, MPI2_POINTER PTR_MPI2_SMP_PASSTHROUGH_REQUEST,
106 Mpi2SmpPassthroughRequest_t, MPI2_POINTER pMpi2SmpPassthroughRequest_t;
107
108/* values for PassthroughFlags field */
109#define MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE (0x80)
110
111/* values for SGLFlags field are in the SGL section of mpi2.h */
112
113
114/* SMP Passthrough Reply Message */
115typedef struct _MPI2_SMP_PASSTHROUGH_REPLY
116{
117 U8 PassthroughFlags; /* 0x00 */
118 U8 PhysicalPort; /* 0x01 */
119 U8 MsgLength; /* 0x02 */
120 U8 Function; /* 0x03 */
121 U16 ResponseDataLength; /* 0x04 */
122 U8 SGLFlags; /* 0x06 */
123 U8 MsgFlags; /* 0x07 */
124 U8 VP_ID; /* 0x08 */
125 U8 VF_ID; /* 0x09 */
126 U16 Reserved1; /* 0x0A */
127 U8 Reserved2; /* 0x0C */
128 U8 SASStatus; /* 0x0D */
129 U16 IOCStatus; /* 0x0E */
130 U32 IOCLogInfo; /* 0x10 */
131 U32 Reserved3; /* 0x14 */
132 U8 ResponseData[4]; /* 0x18 */
133} MPI2_SMP_PASSTHROUGH_REPLY, MPI2_POINTER PTR_MPI2_SMP_PASSTHROUGH_REPLY,
134 Mpi2SmpPassthroughReply_t, MPI2_POINTER pMpi2SmpPassthroughReply_t;
135
136/* values for PassthroughFlags field */
137#define MPI2_SMP_PT_REPLY_PT_FLAGS_IMMEDIATE (0x80)
138
139/* values for SASStatus field are at the top of this file */
140
141
142/****************************************************************************
143* SATA Passthrough messages
144****************************************************************************/
145
146/* SATA Passthrough Request Message */
147typedef struct _MPI2_SATA_PASSTHROUGH_REQUEST
148{
149 U16 DevHandle; /* 0x00 */
150 U8 ChainOffset; /* 0x02 */
151 U8 Function; /* 0x03 */
152 U16 PassthroughFlags; /* 0x04 */
153 U8 SGLFlags; /* 0x06 */
154 U8 MsgFlags; /* 0x07 */
155 U8 VP_ID; /* 0x08 */
156 U8 VF_ID; /* 0x09 */
157 U16 Reserved1; /* 0x0A */
158 U32 Reserved2; /* 0x0C */
159 U32 Reserved3; /* 0x10 */
160 U32 Reserved4; /* 0x14 */
161 U32 DataLength; /* 0x18 */
162 U8 CommandFIS[20]; /* 0x1C */
163 MPI2_SIMPLE_SGE_UNION SGL; /* 0x20 */
164} MPI2_SATA_PASSTHROUGH_REQUEST, MPI2_POINTER PTR_MPI2_SATA_PASSTHROUGH_REQUEST,
165 Mpi2SataPassthroughRequest_t, MPI2_POINTER pMpi2SataPassthroughRequest_t;
166
167/* values for PassthroughFlags field */
168#define MPI2_SATA_PT_REQ_PT_FLAGS_EXECUTE_DIAG (0x0100)
169#define MPI2_SATA_PT_REQ_PT_FLAGS_DMA (0x0020)
170#define MPI2_SATA_PT_REQ_PT_FLAGS_PIO (0x0010)
171#define MPI2_SATA_PT_REQ_PT_FLAGS_UNSPECIFIED_VU (0x0004)
172#define MPI2_SATA_PT_REQ_PT_FLAGS_WRITE (0x0002)
173#define MPI2_SATA_PT_REQ_PT_FLAGS_READ (0x0001)
174
175/* values for SGLFlags field are in the SGL section of mpi2.h */
176
177
178/* SATA Passthrough Reply Message */
179typedef struct _MPI2_SATA_PASSTHROUGH_REPLY
180{
181 U16 DevHandle; /* 0x00 */
182 U8 MsgLength; /* 0x02 */
183 U8 Function; /* 0x03 */
184 U16 PassthroughFlags; /* 0x04 */
185 U8 SGLFlags; /* 0x06 */
186 U8 MsgFlags; /* 0x07 */
187 U8 VP_ID; /* 0x08 */
188 U8 VF_ID; /* 0x09 */
189 U16 Reserved1; /* 0x0A */
190 U8 Reserved2; /* 0x0C */
191 U8 SASStatus; /* 0x0D */
192 U16 IOCStatus; /* 0x0E */
193 U32 IOCLogInfo; /* 0x10 */
194 U8 StatusFIS[20]; /* 0x14 */
195 U32 StatusControlRegisters; /* 0x28 */
196 U32 TransferCount; /* 0x2C */
197} MPI2_SATA_PASSTHROUGH_REPLY, MPI2_POINTER PTR_MPI2_SATA_PASSTHROUGH_REPLY,
198 Mpi2SataPassthroughReply_t, MPI2_POINTER pMpi2SataPassthroughReply_t;
199
200/* values for SASStatus field are at the top of this file */
201
202
203/****************************************************************************
204* SAS IO Unit Control messages
205****************************************************************************/
206
207/* SAS IO Unit Control Request Message */
208typedef struct _MPI2_SAS_IOUNIT_CONTROL_REQUEST
209{
210 U8 Operation; /* 0x00 */
211 U8 Reserved1; /* 0x01 */
212 U8 ChainOffset; /* 0x02 */
213 U8 Function; /* 0x03 */
214 U16 DevHandle; /* 0x04 */
215 U8 IOCParameter; /* 0x06 */
216 U8 MsgFlags; /* 0x07 */
217 U8 VP_ID; /* 0x08 */
218 U8 VF_ID; /* 0x09 */
219 U16 Reserved3; /* 0x0A */
220 U16 Reserved4; /* 0x0C */
221 U8 PhyNum; /* 0x0E */
222 U8 PrimFlags; /* 0x0F */
223 U32 Primitive; /* 0x10 */
224 U8 LookupMethod; /* 0x14 */
225 U8 Reserved5; /* 0x15 */
226 U16 SlotNumber; /* 0x16 */
227 U64 LookupAddress; /* 0x18 */
228 U32 IOCParameterValue; /* 0x20 */
229 U32 Reserved7; /* 0x24 */
230 U32 Reserved8; /* 0x28 */
231} MPI2_SAS_IOUNIT_CONTROL_REQUEST,
232 MPI2_POINTER PTR_MPI2_SAS_IOUNIT_CONTROL_REQUEST,
233 Mpi2SasIoUnitControlRequest_t, MPI2_POINTER pMpi2SasIoUnitControlRequest_t;
234
235/* values for the Operation field */
236#define MPI2_SAS_OP_CLEAR_ALL_PERSISTENT (0x02)
237#define MPI2_SAS_OP_PHY_LINK_RESET (0x06)
238#define MPI2_SAS_OP_PHY_HARD_RESET (0x07)
239#define MPI2_SAS_OP_PHY_CLEAR_ERROR_LOG (0x08)
240#define MPI2_SAS_OP_SEND_PRIMITIVE (0x0A)
241#define MPI2_SAS_OP_FORCE_FULL_DISCOVERY (0x0B)
242#define MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL (0x0C)
243#define MPI2_SAS_OP_REMOVE_DEVICE (0x0D)
244#define MPI2_SAS_OP_LOOKUP_MAPPING (0x0E)
245#define MPI2_SAS_OP_SET_IOC_PARAMETER (0x0F)
246#define MPI2_SAS_OP_PRODUCT_SPECIFIC_MIN (0x80)
247
248/* values for the PrimFlags field */
249#define MPI2_SAS_PRIMFLAGS_SINGLE (0x08)
250#define MPI2_SAS_PRIMFLAGS_TRIPLE (0x02)
251#define MPI2_SAS_PRIMFLAGS_REDUNDANT (0x01)
252
253/* values for the LookupMethod field */
254#define MPI2_SAS_LOOKUP_METHOD_SAS_ADDRESS (0x01)
255#define MPI2_SAS_LOOKUP_METHOD_SAS_ENCLOSURE_SLOT (0x02)
256#define MPI2_SAS_LOOKUP_METHOD_SAS_DEVICE_NAME (0x03)
257
258
259/* SAS IO Unit Control Reply Message */
260typedef struct _MPI2_SAS_IOUNIT_CONTROL_REPLY
261{
262 U8 Operation; /* 0x00 */
263 U8 Reserved1; /* 0x01 */
264 U8 MsgLength; /* 0x02 */
265 U8 Function; /* 0x03 */
266 U16 DevHandle; /* 0x04 */
267 U8 IOCParameter; /* 0x06 */
268 U8 MsgFlags; /* 0x07 */
269 U8 VP_ID; /* 0x08 */
270 U8 VF_ID; /* 0x09 */
271 U16 Reserved3; /* 0x0A */
272 U16 Reserved4; /* 0x0C */
273 U16 IOCStatus; /* 0x0E */
274 U32 IOCLogInfo; /* 0x10 */
275} MPI2_SAS_IOUNIT_CONTROL_REPLY,
276 MPI2_POINTER PTR_MPI2_SAS_IOUNIT_CONTROL_REPLY,
277 Mpi2SasIoUnitControlReply_t, MPI2_POINTER pMpi2SasIoUnitControlReply_t;
278
279
280#endif
281
282
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
new file mode 100644
index 000000000000..2ff4e936bd39
--- /dev/null
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
@@ -0,0 +1,249 @@
1/*
2 * Copyright (c) 2000-2008 LSI Corporation.
3 *
4 *
5 * Name: mpi2_tool.h
6 * Title: MPI diagnostic tool structures and definitions
7 * Creation Date: March 26, 2007
8 *
9 * mpi2_tool.h Version: 02.00.02
10 *
11 * Version History
12 * ---------------
13 *
14 * Date Version Description
15 * -------- -------- ------------------------------------------------------
16 * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
17 * 12-18-07 02.00.01 Added Diagnostic Buffer Post and Diagnostic Release
18 * structures and defines.
19 * 02-29-08 02.00.02 Modified various names to make them 32-character unique.
20 * --------------------------------------------------------------------------
21 */
22
23#ifndef MPI2_TOOL_H
24#define MPI2_TOOL_H
25
26/*****************************************************************************
27*
28* Toolbox Messages
29*
30*****************************************************************************/
31
32/* defines for the Tools */
33#define MPI2_TOOLBOX_CLEAN_TOOL (0x00)
34#define MPI2_TOOLBOX_MEMORY_MOVE_TOOL (0x01)
35#define MPI2_TOOLBOX_BEACON_TOOL (0x05)
36
37/****************************************************************************
38* Toolbox reply
39****************************************************************************/
40
41typedef struct _MPI2_TOOLBOX_REPLY
42{
43 U8 Tool; /* 0x00 */
44 U8 Reserved1; /* 0x01 */
45 U8 MsgLength; /* 0x02 */
46 U8 Function; /* 0x03 */
47 U16 Reserved2; /* 0x04 */
48 U8 Reserved3; /* 0x06 */
49 U8 MsgFlags; /* 0x07 */
50 U8 VP_ID; /* 0x08 */
51 U8 VF_ID; /* 0x09 */
52 U16 Reserved4; /* 0x0A */
53 U16 Reserved5; /* 0x0C */
54 U16 IOCStatus; /* 0x0E */
55 U32 IOCLogInfo; /* 0x10 */
56} MPI2_TOOLBOX_REPLY, MPI2_POINTER PTR_MPI2_TOOLBOX_REPLY,
57 Mpi2ToolboxReply_t, MPI2_POINTER pMpi2ToolboxReply_t;
58
59
60/****************************************************************************
61* Toolbox Clean Tool request
62****************************************************************************/
63
64typedef struct _MPI2_TOOLBOX_CLEAN_REQUEST
65{
66 U8 Tool; /* 0x00 */
67 U8 Reserved1; /* 0x01 */
68 U8 ChainOffset; /* 0x02 */
69 U8 Function; /* 0x03 */
70 U16 Reserved2; /* 0x04 */
71 U8 Reserved3; /* 0x06 */
72 U8 MsgFlags; /* 0x07 */
73 U8 VP_ID; /* 0x08 */
74 U8 VF_ID; /* 0x09 */
75 U16 Reserved4; /* 0x0A */
76 U32 Flags; /* 0x0C */
77 } MPI2_TOOLBOX_CLEAN_REQUEST, MPI2_POINTER PTR_MPI2_TOOLBOX_CLEAN_REQUEST,
78 Mpi2ToolboxCleanRequest_t, MPI2_POINTER pMpi2ToolboxCleanRequest_t;
79
80/* values for the Flags field */
81#define MPI2_TOOLBOX_CLEAN_BOOT_SERVICES (0x80000000)
82#define MPI2_TOOLBOX_CLEAN_PERSIST_MANUFACT_PAGES (0x40000000)
83#define MPI2_TOOLBOX_CLEAN_OTHER_PERSIST_PAGES (0x20000000)
84#define MPI2_TOOLBOX_CLEAN_FW_CURRENT (0x10000000)
85#define MPI2_TOOLBOX_CLEAN_FW_BACKUP (0x08000000)
86#define MPI2_TOOLBOX_CLEAN_MEGARAID (0x02000000)
87#define MPI2_TOOLBOX_CLEAN_INITIALIZATION (0x01000000)
88#define MPI2_TOOLBOX_CLEAN_FLASH (0x00000004)
89#define MPI2_TOOLBOX_CLEAN_SEEPROM (0x00000002)
90#define MPI2_TOOLBOX_CLEAN_NVSRAM (0x00000001)
91
92
93/****************************************************************************
94* Toolbox Memory Move request
95****************************************************************************/
96
97typedef struct _MPI2_TOOLBOX_MEM_MOVE_REQUEST
98{
99 U8 Tool; /* 0x00 */
100 U8 Reserved1; /* 0x01 */
101 U8 ChainOffset; /* 0x02 */
102 U8 Function; /* 0x03 */
103 U16 Reserved2; /* 0x04 */
104 U8 Reserved3; /* 0x06 */
105 U8 MsgFlags; /* 0x07 */
106 U8 VP_ID; /* 0x08 */
107 U8 VF_ID; /* 0x09 */
108 U16 Reserved4; /* 0x0A */
109 MPI2_SGE_SIMPLE_UNION SGL; /* 0x0C */
110} MPI2_TOOLBOX_MEM_MOVE_REQUEST, MPI2_POINTER PTR_MPI2_TOOLBOX_MEM_MOVE_REQUEST,
111 Mpi2ToolboxMemMoveRequest_t, MPI2_POINTER pMpi2ToolboxMemMoveRequest_t;
112
113
114/****************************************************************************
115* Toolbox Beacon Tool request
116****************************************************************************/
117
118typedef struct _MPI2_TOOLBOX_BEACON_REQUEST
119{
120 U8 Tool; /* 0x00 */
121 U8 Reserved1; /* 0x01 */
122 U8 ChainOffset; /* 0x02 */
123 U8 Function; /* 0x03 */
124 U16 Reserved2; /* 0x04 */
125 U8 Reserved3; /* 0x06 */
126 U8 MsgFlags; /* 0x07 */
127 U8 VP_ID; /* 0x08 */
128 U8 VF_ID; /* 0x09 */
129 U16 Reserved4; /* 0x0A */
130 U8 Reserved5; /* 0x0C */
131 U8 PhysicalPort; /* 0x0D */
132 U8 Reserved6; /* 0x0E */
133 U8 Flags; /* 0x0F */
134} MPI2_TOOLBOX_BEACON_REQUEST, MPI2_POINTER PTR_MPI2_TOOLBOX_BEACON_REQUEST,
135 Mpi2ToolboxBeaconRequest_t, MPI2_POINTER pMpi2ToolboxBeaconRequest_t;
136
137/* values for the Flags field */
138#define MPI2_TOOLBOX_FLAGS_BEACONMODE_OFF (0x00)
139#define MPI2_TOOLBOX_FLAGS_BEACONMODE_ON (0x01)
140
141
142/*****************************************************************************
143*
144* Diagnostic Buffer Messages
145*
146*****************************************************************************/
147
148
149/****************************************************************************
150* Diagnostic Buffer Post request
151****************************************************************************/
152
153typedef struct _MPI2_DIAG_BUFFER_POST_REQUEST
154{
155 U8 Reserved1; /* 0x00 */
156 U8 BufferType; /* 0x01 */
157 U8 ChainOffset; /* 0x02 */
158 U8 Function; /* 0x03 */
159 U16 Reserved2; /* 0x04 */
160 U8 Reserved3; /* 0x06 */
161 U8 MsgFlags; /* 0x07 */
162 U8 VP_ID; /* 0x08 */
163 U8 VF_ID; /* 0x09 */
164 U16 Reserved4; /* 0x0A */
165 U64 BufferAddress; /* 0x0C */
166 U32 BufferLength; /* 0x14 */
167 U32 Reserved5; /* 0x18 */
168 U32 Reserved6; /* 0x1C */
169 U32 Flags; /* 0x20 */
170 U32 ProductSpecific[23]; /* 0x24 */
171} MPI2_DIAG_BUFFER_POST_REQUEST, MPI2_POINTER PTR_MPI2_DIAG_BUFFER_POST_REQUEST,
172 Mpi2DiagBufferPostRequest_t, MPI2_POINTER pMpi2DiagBufferPostRequest_t;
173
174/* values for the BufferType field */
175#define MPI2_DIAG_BUF_TYPE_TRACE (0x00)
176#define MPI2_DIAG_BUF_TYPE_SNAPSHOT (0x01)
177/* count of the number of buffer types */
178#define MPI2_DIAG_BUF_TYPE_COUNT (0x02)
179
180
181/****************************************************************************
182* Diagnostic Buffer Post reply
183****************************************************************************/
184
185typedef struct _MPI2_DIAG_BUFFER_POST_REPLY
186{
187 U8 Reserved1; /* 0x00 */
188 U8 BufferType; /* 0x01 */
189 U8 MsgLength; /* 0x02 */
190 U8 Function; /* 0x03 */
191 U16 Reserved2; /* 0x04 */
192 U8 Reserved3; /* 0x06 */
193 U8 MsgFlags; /* 0x07 */
194 U8 VP_ID; /* 0x08 */
195 U8 VF_ID; /* 0x09 */
196 U16 Reserved4; /* 0x0A */
197 U16 Reserved5; /* 0x0C */
198 U16 IOCStatus; /* 0x0E */
199 U32 IOCLogInfo; /* 0x10 */
200 U32 TransferLength; /* 0x14 */
201} MPI2_DIAG_BUFFER_POST_REPLY, MPI2_POINTER PTR_MPI2_DIAG_BUFFER_POST_REPLY,
202 Mpi2DiagBufferPostReply_t, MPI2_POINTER pMpi2DiagBufferPostReply_t;
203
204
205/****************************************************************************
206* Diagnostic Release request
207****************************************************************************/
208
209typedef struct _MPI2_DIAG_RELEASE_REQUEST
210{
211 U8 Reserved1; /* 0x00 */
212 U8 BufferType; /* 0x01 */
213 U8 ChainOffset; /* 0x02 */
214 U8 Function; /* 0x03 */
215 U16 Reserved2; /* 0x04 */
216 U8 Reserved3; /* 0x06 */
217 U8 MsgFlags; /* 0x07 */
218 U8 VP_ID; /* 0x08 */
219 U8 VF_ID; /* 0x09 */
220 U16 Reserved4; /* 0x0A */
221} MPI2_DIAG_RELEASE_REQUEST, MPI2_POINTER PTR_MPI2_DIAG_RELEASE_REQUEST,
222 Mpi2DiagReleaseRequest_t, MPI2_POINTER pMpi2DiagReleaseRequest_t;
223
224
225/****************************************************************************
226* Diagnostic Buffer Post reply
227****************************************************************************/
228
229typedef struct _MPI2_DIAG_RELEASE_REPLY
230{
231 U8 Reserved1; /* 0x00 */
232 U8 BufferType; /* 0x01 */
233 U8 MsgLength; /* 0x02 */
234 U8 Function; /* 0x03 */
235 U16 Reserved2; /* 0x04 */
236 U8 Reserved3; /* 0x06 */
237 U8 MsgFlags; /* 0x07 */
238 U8 VP_ID; /* 0x08 */
239 U8 VF_ID; /* 0x09 */
240 U16 Reserved4; /* 0x0A */
241 U16 Reserved5; /* 0x0C */
242 U16 IOCStatus; /* 0x0E */
243 U32 IOCLogInfo; /* 0x10 */
244} MPI2_DIAG_RELEASE_REPLY, MPI2_POINTER PTR_MPI2_DIAG_RELEASE_REPLY,
245 Mpi2DiagReleaseReply_t, MPI2_POINTER pMpi2DiagReleaseReply_t;
246
247
248#endif
249
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_type.h b/drivers/scsi/mpt2sas/mpi/mpi2_type.h
new file mode 100644
index 000000000000..cfde017bf16e
--- /dev/null
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_type.h
@@ -0,0 +1,61 @@
1/*
2 * Copyright (c) 2000-2007 LSI Corporation.
3 *
4 *
5 * Name: mpi2_type.h
6 * Title: MPI basic type definitions
7 * Creation Date: August 16, 2006
8 *
9 * mpi2_type.h Version: 02.00.00
10 *
11 * Version History
12 * ---------------
13 *
14 * Date Version Description
15 * -------- -------- ------------------------------------------------------
16 * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
17 * --------------------------------------------------------------------------
18 */
19
20#ifndef MPI2_TYPE_H
21#define MPI2_TYPE_H
22
23
24/*******************************************************************************
25 * Define MPI2_POINTER if it hasn't already been defined. By default
26 * MPI2_POINTER is defined to be a near pointer. MPI2_POINTER can be defined as
27 * a far pointer by defining MPI2_POINTER as "far *" before this header file is
28 * included.
29 */
30#ifndef MPI2_POINTER
31#define MPI2_POINTER *
32#endif
33
34/* the basic types may have already been included by mpi_type.h */
35#ifndef MPI_TYPE_H
36/*****************************************************************************
37*
38* Basic Types
39*
40*****************************************************************************/
41
42typedef u8 U8;
43typedef __le16 U16;
44typedef __le32 U32;
45typedef __le64 U64 __attribute__((aligned(4)));
46
47/*****************************************************************************
48*
49* Pointer Types
50*
51*****************************************************************************/
52
53typedef U8 *PU8;
54typedef U16 *PU16;
55typedef U32 *PU32;
56typedef U64 *PU64;
57
58#endif
59
60#endif
61
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
new file mode 100644
index 000000000000..52427a8324f5
--- /dev/null
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -0,0 +1,3435 @@
1/*
2 * This is the Fusion MPT base driver providing common API layer interface
3 * for access to MPT (Message Passing Technology) firmware.
4 *
5 * This code is based on drivers/scsi/mpt2sas/mpt2_base.c
6 * Copyright (C) 2007-2008 LSI Corporation
7 * (mailto:DL-MPTFusionLinux@lsi.com)
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * NO WARRANTY
20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 * solely responsible for determining the appropriateness of using and
25 * distributing the Program and assumes all risks associated with its
26 * exercise of rights under this Agreement, including but not limited to
27 * the risks and costs of program errors, damage to or loss of data,
28 * programs or equipment, and unavailability or interruption of operations.
29
30 * DISCLAIMER OF LIABILITY
31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38
39 * You should have received a copy of the GNU General Public License
40 * along with this program; if not, write to the Free Software
41 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
42 * USA.
43 */
44
45#include <linux/version.h>
46#include <linux/kernel.h>
47#include <linux/module.h>
48#include <linux/errno.h>
49#include <linux/init.h>
50#include <linux/slab.h>
51#include <linux/types.h>
52#include <linux/pci.h>
53#include <linux/kdev_t.h>
54#include <linux/blkdev.h>
55#include <linux/delay.h>
56#include <linux/interrupt.h>
57#include <linux/dma-mapping.h>
58#include <linux/sort.h>
59#include <linux/io.h>
60
61#include "mpt2sas_base.h"
62
63static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS];
64
65#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
66#define MPT2SAS_MAX_REQUEST_QUEUE 500 /* maximum controller queue depth */
67
68static int max_queue_depth = -1;
69module_param(max_queue_depth, int, 0);
70MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
71
72static int max_sgl_entries = -1;
73module_param(max_sgl_entries, int, 0);
74MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
75
76static int msix_disable = -1;
77module_param(msix_disable, int, 0);
78MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
79
80/**
81 * _base_fault_reset_work - workq handling ioc fault conditions
82 * @work: input argument, used to derive ioc
83 * Context: sleep.
84 *
85 * Return nothing.
86 */
87static void
88_base_fault_reset_work(struct work_struct *work)
89{
90 struct MPT2SAS_ADAPTER *ioc =
91 container_of(work, struct MPT2SAS_ADAPTER, fault_reset_work.work);
92 unsigned long flags;
93 u32 doorbell;
94 int rc;
95
96 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
97 if (ioc->ioc_reset_in_progress)
98 goto rearm_timer;
99 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
100
101 doorbell = mpt2sas_base_get_iocstate(ioc, 0);
102 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
103 rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
104 FORCE_BIG_HAMMER);
105 printk(MPT2SAS_WARN_FMT "%s: hard reset: %s\n", ioc->name,
106 __func__, (rc == 0) ? "success" : "failed");
107 doorbell = mpt2sas_base_get_iocstate(ioc, 0);
108 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
109 mpt2sas_base_fault_info(ioc, doorbell &
110 MPI2_DOORBELL_DATA_MASK);
111 }
112
113 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
114 rearm_timer:
115 if (ioc->fault_reset_work_q)
116 queue_delayed_work(ioc->fault_reset_work_q,
117 &ioc->fault_reset_work,
118 msecs_to_jiffies(FAULT_POLLING_INTERVAL));
119 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
120}
121
122#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
123/**
124 * _base_sas_ioc_info - verbose translation of the ioc status
125 * @ioc: pointer to scsi command object
126 * @mpi_reply: reply mf payload returned from firmware
127 * @request_hdr: request mf
128 *
129 * Return nothing.
130 */
131static void
132_base_sas_ioc_info(struct MPT2SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
133 MPI2RequestHeader_t *request_hdr)
134{
135 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
136 MPI2_IOCSTATUS_MASK;
137 char *desc = NULL;
138 u16 frame_sz;
139 char *func_str = NULL;
140
141 /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
142 if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
143 request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
144 request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
145 return;
146
147 switch (ioc_status) {
148
149/****************************************************************************
150* Common IOCStatus values for all replies
151****************************************************************************/
152
153 case MPI2_IOCSTATUS_INVALID_FUNCTION:
154 desc = "invalid function";
155 break;
156 case MPI2_IOCSTATUS_BUSY:
157 desc = "busy";
158 break;
159 case MPI2_IOCSTATUS_INVALID_SGL:
160 desc = "invalid sgl";
161 break;
162 case MPI2_IOCSTATUS_INTERNAL_ERROR:
163 desc = "internal error";
164 break;
165 case MPI2_IOCSTATUS_INVALID_VPID:
166 desc = "invalid vpid";
167 break;
168 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
169 desc = "insufficient resources";
170 break;
171 case MPI2_IOCSTATUS_INVALID_FIELD:
172 desc = "invalid field";
173 break;
174 case MPI2_IOCSTATUS_INVALID_STATE:
175 desc = "invalid state";
176 break;
177 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
178 desc = "op state not supported";
179 break;
180
181/****************************************************************************
182* Config IOCStatus values
183****************************************************************************/
184
185 case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
186 desc = "config invalid action";
187 break;
188 case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
189 desc = "config invalid type";
190 break;
191 case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
192 desc = "config invalid page";
193 break;
194 case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
195 desc = "config invalid data";
196 break;
197 case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
198 desc = "config no defaults";
199 break;
200 case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
201 desc = "config cant commit";
202 break;
203
204/****************************************************************************
205* SCSI IO Reply
206****************************************************************************/
207
208 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
209 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
210 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
211 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
212 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
213 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
214 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
215 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
216 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
217 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
218 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
219 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
220 break;
221
222/****************************************************************************
223* For use by SCSI Initiator and SCSI Target end-to-end data protection
224****************************************************************************/
225
226 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
227 desc = "eedp guard error";
228 break;
229 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
230 desc = "eedp ref tag error";
231 break;
232 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
233 desc = "eedp app tag error";
234 break;
235
236/****************************************************************************
237* SCSI Target values
238****************************************************************************/
239
240 case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
241 desc = "target invalid io index";
242 break;
243 case MPI2_IOCSTATUS_TARGET_ABORTED:
244 desc = "target aborted";
245 break;
246 case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
247 desc = "target no conn retryable";
248 break;
249 case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
250 desc = "target no connection";
251 break;
252 case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
253 desc = "target xfer count mismatch";
254 break;
255 case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
256 desc = "target data offset error";
257 break;
258 case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
259 desc = "target too much write data";
260 break;
261 case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
262 desc = "target iu too short";
263 break;
264 case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
265 desc = "target ack nak timeout";
266 break;
267 case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
268 desc = "target nak received";
269 break;
270
271/****************************************************************************
272* Serial Attached SCSI values
273****************************************************************************/
274
275 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
276 desc = "smp request failed";
277 break;
278 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
279 desc = "smp data overrun";
280 break;
281
282/****************************************************************************
283* Diagnostic Buffer Post / Diagnostic Release values
284****************************************************************************/
285
286 case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
287 desc = "diagnostic released";
288 break;
289 default:
290 break;
291 }
292
293 if (!desc)
294 return;
295
296 switch (request_hdr->Function) {
297 case MPI2_FUNCTION_CONFIG:
298 frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
299 func_str = "config_page";
300 break;
301 case MPI2_FUNCTION_SCSI_TASK_MGMT:
302 frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
303 func_str = "task_mgmt";
304 break;
305 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
306 frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
307 func_str = "sas_iounit_ctl";
308 break;
309 case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
310 frame_sz = sizeof(Mpi2SepRequest_t);
311 func_str = "enclosure";
312 break;
313 case MPI2_FUNCTION_IOC_INIT:
314 frame_sz = sizeof(Mpi2IOCInitRequest_t);
315 func_str = "ioc_init";
316 break;
317 case MPI2_FUNCTION_PORT_ENABLE:
318 frame_sz = sizeof(Mpi2PortEnableRequest_t);
319 func_str = "port_enable";
320 break;
321 case MPI2_FUNCTION_SMP_PASSTHROUGH:
322 frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
323 func_str = "smp_passthru";
324 break;
325 default:
326 frame_sz = 32;
327 func_str = "unknown";
328 break;
329 }
330
331 printk(MPT2SAS_WARN_FMT "ioc_status: %s(0x%04x), request(0x%p),"
332 " (%s)\n", ioc->name, desc, ioc_status, request_hdr, func_str);
333
334 _debug_dump_mf(request_hdr, frame_sz/4);
335}
336
337/**
338 * _base_display_event_data - verbose translation of firmware asyn events
339 * @ioc: pointer to scsi command object
340 * @mpi_reply: reply mf payload returned from firmware
341 *
342 * Return nothing.
343 */
344static void
345_base_display_event_data(struct MPT2SAS_ADAPTER *ioc,
346 Mpi2EventNotificationReply_t *mpi_reply)
347{
348 char *desc = NULL;
349 u16 event;
350
351 if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
352 return;
353
354 event = le16_to_cpu(mpi_reply->Event);
355
356 switch (event) {
357 case MPI2_EVENT_LOG_DATA:
358 desc = "Log Data";
359 break;
360 case MPI2_EVENT_STATE_CHANGE:
361 desc = "Status Change";
362 break;
363 case MPI2_EVENT_HARD_RESET_RECEIVED:
364 desc = "Hard Reset Received";
365 break;
366 case MPI2_EVENT_EVENT_CHANGE:
367 desc = "Event Change";
368 break;
369 case MPI2_EVENT_TASK_SET_FULL:
370 desc = "Task Set Full";
371 break;
372 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
373 desc = "Device Status Change";
374 break;
375 case MPI2_EVENT_IR_OPERATION_STATUS:
376 desc = "IR Operation Status";
377 break;
378 case MPI2_EVENT_SAS_DISCOVERY:
379 desc = "Discovery";
380 break;
381 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
382 desc = "SAS Broadcast Primitive";
383 break;
384 case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
385 desc = "SAS Init Device Status Change";
386 break;
387 case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
388 desc = "SAS Init Table Overflow";
389 break;
390 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
391 desc = "SAS Topology Change List";
392 break;
393 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
394 desc = "SAS Enclosure Device Status Change";
395 break;
396 case MPI2_EVENT_IR_VOLUME:
397 desc = "IR Volume";
398 break;
399 case MPI2_EVENT_IR_PHYSICAL_DISK:
400 desc = "IR Physical Disk";
401 break;
402 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
403 desc = "IR Configuration Change List";
404 break;
405 case MPI2_EVENT_LOG_ENTRY_ADDED:
406 desc = "Log Entry Added";
407 break;
408 }
409
410 if (!desc)
411 return;
412
413 printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, desc);
414}
415#endif
416
417/**
418 * _base_sas_log_info - verbose translation of firmware log info
419 * @ioc: pointer to scsi command object
420 * @log_info: log info
421 *
422 * Return nothing.
423 */
424static void
425_base_sas_log_info(struct MPT2SAS_ADAPTER *ioc , u32 log_info)
426{
427 union loginfo_type {
428 u32 loginfo;
429 struct {
430 u32 subcode:16;
431 u32 code:8;
432 u32 originator:4;
433 u32 bus_type:4;
434 } dw;
435 };
436 union loginfo_type sas_loginfo;
437 char *originator_str = NULL;
438
439 sas_loginfo.loginfo = log_info;
440 if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
441 return;
442
443 /* eat the loginfos associated with task aborts */
444 if (ioc->ignore_loginfos && (log_info == 30050000 || log_info ==
445 0x31140000 || log_info == 0x31130000))
446 return;
447
448 switch (sas_loginfo.dw.originator) {
449 case 0:
450 originator_str = "IOP";
451 break;
452 case 1:
453 originator_str = "PL";
454 break;
455 case 2:
456 originator_str = "IR";
457 break;
458 }
459
460 printk(MPT2SAS_WARN_FMT "log_info(0x%08x): originator(%s), "
461 "code(0x%02x), sub_code(0x%04x)\n", ioc->name, log_info,
462 originator_str, sas_loginfo.dw.code,
463 sas_loginfo.dw.subcode);
464}
465
466/**
467 * mpt2sas_base_fault_info - verbose translation of firmware FAULT code
468 * @ioc: pointer to scsi command object
469 * @fault_code: fault code
470 *
471 * Return nothing.
472 */
473void
474mpt2sas_base_fault_info(struct MPT2SAS_ADAPTER *ioc , u16 fault_code)
475{
476 printk(MPT2SAS_ERR_FMT "fault_state(0x%04x)!\n",
477 ioc->name, fault_code);
478}
479
480/**
481 * _base_display_reply_info -
482 * @ioc: pointer to scsi command object
483 * @smid: system request message index
484 * @VF_ID: virtual function id
485 * @reply: reply message frame(lower 32bit addr)
486 *
487 * Return nothing.
488 */
489static void
490_base_display_reply_info(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID,
491 u32 reply)
492{
493 MPI2DefaultReply_t *mpi_reply;
494 u16 ioc_status;
495
496 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
497 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
498#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
499 if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
500 (ioc->logging_level & MPT_DEBUG_REPLY)) {
501 _base_sas_ioc_info(ioc , mpi_reply,
502 mpt2sas_base_get_msg_frame(ioc, smid));
503 }
504#endif
505 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
506 _base_sas_log_info(ioc, le32_to_cpu(mpi_reply->IOCLogInfo));
507}
508
509/**
510 * mpt2sas_base_done - base internal command completion routine
511 * @ioc: pointer to scsi command object
512 * @smid: system request message index
513 * @VF_ID: virtual function id
514 * @reply: reply message frame(lower 32bit addr)
515 *
516 * Return nothing.
517 */
518void
519mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
520{
521 MPI2DefaultReply_t *mpi_reply;
522
523 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
524 if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
525 return;
526
527 if (ioc->base_cmds.status == MPT2_CMD_NOT_USED)
528 return;
529
530 ioc->base_cmds.status |= MPT2_CMD_COMPLETE;
531 if (mpi_reply) {
532 ioc->base_cmds.status |= MPT2_CMD_REPLY_VALID;
533 memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
534 }
535 ioc->base_cmds.status &= ~MPT2_CMD_PENDING;
536 complete(&ioc->base_cmds.done);
537}
538
539/**
540 * _base_async_event - main callback handler for firmware asyn events
541 * @ioc: pointer to scsi command object
542 * @VF_ID: virtual function id
543 * @reply: reply message frame(lower 32bit addr)
544 *
545 * Return nothing.
546 */
547static void
548_base_async_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, u32 reply)
549{
550 Mpi2EventNotificationReply_t *mpi_reply;
551 Mpi2EventAckRequest_t *ack_request;
552 u16 smid;
553
554 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
555 if (!mpi_reply)
556 return;
557 if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
558 return;
559#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
560 _base_display_event_data(ioc, mpi_reply);
561#endif
562 if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
563 goto out;
564 smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
565 if (!smid) {
566 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
567 ioc->name, __func__);
568 goto out;
569 }
570
571 ack_request = mpt2sas_base_get_msg_frame(ioc, smid);
572 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
573 ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
574 ack_request->Event = mpi_reply->Event;
575 ack_request->EventContext = mpi_reply->EventContext;
576 ack_request->VF_ID = VF_ID;
577 mpt2sas_base_put_smid_default(ioc, smid, VF_ID);
578
579 out:
580
581 /* scsih callback handler */
582 mpt2sas_scsih_event_callback(ioc, VF_ID, reply);
583
584 /* ctl callback handler */
585 mpt2sas_ctl_event_callback(ioc, VF_ID, reply);
586}
587
588/**
589 * _base_mask_interrupts - disable interrupts
590 * @ioc: pointer to scsi command object
591 *
592 * Disabling ResetIRQ, Reply and Doorbell Interrupts
593 *
594 * Return nothing.
595 */
596static void
597_base_mask_interrupts(struct MPT2SAS_ADAPTER *ioc)
598{
599 u32 him_register;
600
601 ioc->mask_interrupts = 1;
602 him_register = readl(&ioc->chip->HostInterruptMask);
603 him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
604 writel(him_register, &ioc->chip->HostInterruptMask);
605 readl(&ioc->chip->HostInterruptMask);
606}
607
608/**
609 * _base_unmask_interrupts - enable interrupts
610 * @ioc: pointer to scsi command object
611 *
612 * Enabling only Reply Interrupts
613 *
614 * Return nothing.
615 */
616static void
617_base_unmask_interrupts(struct MPT2SAS_ADAPTER *ioc)
618{
619 u32 him_register;
620
621 writel(0, &ioc->chip->HostInterruptStatus);
622 him_register = readl(&ioc->chip->HostInterruptMask);
623 him_register &= ~MPI2_HIM_RIM;
624 writel(him_register, &ioc->chip->HostInterruptMask);
625 ioc->mask_interrupts = 0;
626}
627
628/**
629 * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
630 * @irq: irq number (not used)
631 * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
632 * @r: pt_regs pointer (not used)
633 *
634 * Return IRQ_HANDLE if processed, else IRQ_NONE.
635 */
636static irqreturn_t
637_base_interrupt(int irq, void *bus_id)
638{
639 u32 post_index, post_index_next, completed_cmds;
640 u8 request_desript_type;
641 u16 smid;
642 u8 cb_idx;
643 u32 reply;
644 u8 VF_ID;
645 int i;
646 struct MPT2SAS_ADAPTER *ioc = bus_id;
647
648 if (ioc->mask_interrupts)
649 return IRQ_NONE;
650
651 post_index = ioc->reply_post_host_index;
652 request_desript_type = ioc->reply_post_free[post_index].
653 Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
654 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
655 return IRQ_NONE;
656
657 completed_cmds = 0;
658 do {
659 if (ioc->reply_post_free[post_index].Words == ~0ULL)
660 goto out;
661 reply = 0;
662 cb_idx = 0xFF;
663 smid = le16_to_cpu(ioc->reply_post_free[post_index].
664 Default.DescriptorTypeDependent1);
665 VF_ID = ioc->reply_post_free[post_index].
666 Default.VF_ID;
667 if (request_desript_type ==
668 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
669 reply = le32_to_cpu(ioc->reply_post_free[post_index].
670 AddressReply.ReplyFrameAddress);
671 } else if (request_desript_type ==
672 MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER)
673 goto next;
674 else if (request_desript_type ==
675 MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS)
676 goto next;
677 if (smid)
678 cb_idx = ioc->scsi_lookup[smid - 1].cb_idx;
679 if (smid && cb_idx != 0xFF) {
680 mpt_callbacks[cb_idx](ioc, smid, VF_ID, reply);
681 if (reply)
682 _base_display_reply_info(ioc, smid, VF_ID,
683 reply);
684 mpt2sas_base_free_smid(ioc, smid);
685 }
686 if (!smid)
687 _base_async_event(ioc, VF_ID, reply);
688
689 /* reply free queue handling */
690 if (reply) {
691 ioc->reply_free_host_index =
692 (ioc->reply_free_host_index ==
693 (ioc->reply_free_queue_depth - 1)) ?
694 0 : ioc->reply_free_host_index + 1;
695 ioc->reply_free[ioc->reply_free_host_index] =
696 cpu_to_le32(reply);
697 writel(ioc->reply_free_host_index,
698 &ioc->chip->ReplyFreeHostIndex);
699 wmb();
700 }
701
702 next:
703 post_index_next = (post_index == (ioc->reply_post_queue_depth -
704 1)) ? 0 : post_index + 1;
705 request_desript_type =
706 ioc->reply_post_free[post_index_next].Default.ReplyFlags
707 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
708 completed_cmds++;
709 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
710 goto out;
711 post_index = post_index_next;
712 } while (1);
713
714 out:
715
716 if (!completed_cmds)
717 return IRQ_NONE;
718
719 /* reply post descriptor handling */
720 post_index_next = ioc->reply_post_host_index;
721 for (i = 0 ; i < completed_cmds; i++) {
722 post_index = post_index_next;
723 /* poison the reply post descriptor */
724 ioc->reply_post_free[post_index_next].Words = ~0ULL;
725 post_index_next = (post_index ==
726 (ioc->reply_post_queue_depth - 1))
727 ? 0 : post_index + 1;
728 }
729 ioc->reply_post_host_index = post_index_next;
730 writel(post_index_next, &ioc->chip->ReplyPostHostIndex);
731 wmb();
732 return IRQ_HANDLED;
733}
734
735/**
736 * mpt2sas_base_release_callback_handler - clear interupt callback handler
737 * @cb_idx: callback index
738 *
739 * Return nothing.
740 */
741void
742mpt2sas_base_release_callback_handler(u8 cb_idx)
743{
744 mpt_callbacks[cb_idx] = NULL;
745}
746
747/**
748 * mpt2sas_base_register_callback_handler - obtain index for the interrupt callback handler
749 * @cb_func: callback function
750 *
751 * Returns cb_func.
752 */
753u8
754mpt2sas_base_register_callback_handler(MPT_CALLBACK cb_func)
755{
756 u8 cb_idx;
757
758 for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
759 if (mpt_callbacks[cb_idx] == NULL)
760 break;
761
762 mpt_callbacks[cb_idx] = cb_func;
763 return cb_idx;
764}
765
766/**
767 * mpt2sas_base_initialize_callback_handler - initialize the interrupt callback handler
768 *
769 * Return nothing.
770 */
771void
772mpt2sas_base_initialize_callback_handler(void)
773{
774 u8 cb_idx;
775
776 for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
777 mpt2sas_base_release_callback_handler(cb_idx);
778}
779
780/**
781 * mpt2sas_base_build_zero_len_sge - build zero length sg entry
782 * @ioc: per adapter object
783 * @paddr: virtual address for SGE
784 *
785 * Create a zero length scatter gather entry to insure the IOCs hardware has
786 * something to use if the target device goes brain dead and tries
787 * to send data even when none is asked for.
788 *
789 * Return nothing.
790 */
791void
792mpt2sas_base_build_zero_len_sge(struct MPT2SAS_ADAPTER *ioc, void *paddr)
793{
794 u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
795 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
796 MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
797 MPI2_SGE_FLAGS_SHIFT);
798 ioc->base_add_sg_single(paddr, flags_length, -1);
799}
800
801/**
802 * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
803 * @paddr: virtual address for SGE
804 * @flags_length: SGE flags and data transfer length
805 * @dma_addr: Physical address
806 *
807 * Return nothing.
808 */
809static void
810_base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
811{
812 Mpi2SGESimple32_t *sgel = paddr;
813
814 flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
815 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
816 sgel->FlagsLength = cpu_to_le32(flags_length);
817 sgel->Address = cpu_to_le32(dma_addr);
818}
819
820
821/**
822 * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
823 * @paddr: virtual address for SGE
824 * @flags_length: SGE flags and data transfer length
825 * @dma_addr: Physical address
826 *
827 * Return nothing.
828 */
829static void
830_base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
831{
832 Mpi2SGESimple64_t *sgel = paddr;
833
834 flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
835 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
836 sgel->FlagsLength = cpu_to_le32(flags_length);
837 sgel->Address = cpu_to_le64(dma_addr);
838}
839
840#define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
841
842/**
843 * _base_config_dma_addressing - set dma addressing
844 * @ioc: per adapter object
845 * @pdev: PCI device struct
846 *
847 * Returns 0 for success, non-zero for failure.
848 */
849static int
850_base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev)
851{
852 struct sysinfo s;
853 char *desc = NULL;
854
855 if (sizeof(dma_addr_t) > 4) {
856 const uint64_t required_mask =
857 dma_get_required_mask(&pdev->dev);
858 if ((required_mask > DMA_32BIT_MASK) && !pci_set_dma_mask(pdev,
859 DMA_64BIT_MASK) && !pci_set_consistent_dma_mask(pdev,
860 DMA_64BIT_MASK)) {
861 ioc->base_add_sg_single = &_base_add_sg_single_64;
862 ioc->sge_size = sizeof(Mpi2SGESimple64_t);
863 desc = "64";
864 goto out;
865 }
866 }
867
868 if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)
869 && !pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
870 ioc->base_add_sg_single = &_base_add_sg_single_32;
871 ioc->sge_size = sizeof(Mpi2SGESimple32_t);
872 desc = "32";
873 } else
874 return -ENODEV;
875
876 out:
877 si_meminfo(&s);
878 printk(MPT2SAS_INFO_FMT "%s BIT PCI BUS DMA ADDRESSING SUPPORTED, "
879 "total mem (%ld kB)\n", ioc->name, desc, convert_to_kb(s.totalram));
880
881 return 0;
882}
883
884/**
885 * _base_save_msix_table - backup msix vector table
886 * @ioc: per adapter object
887 *
888 * This address an errata where diag reset clears out the table
889 */
890static void
891_base_save_msix_table(struct MPT2SAS_ADAPTER *ioc)
892{
893 int i;
894
895 if (!ioc->msix_enable || ioc->msix_table_backup == NULL)
896 return;
897
898 for (i = 0; i < ioc->msix_vector_count; i++)
899 ioc->msix_table_backup[i] = ioc->msix_table[i];
900}
901
902/**
903 * _base_restore_msix_table - this restores the msix vector table
904 * @ioc: per adapter object
905 *
906 */
907static void
908_base_restore_msix_table(struct MPT2SAS_ADAPTER *ioc)
909{
910 int i;
911
912 if (!ioc->msix_enable || ioc->msix_table_backup == NULL)
913 return;
914
915 for (i = 0; i < ioc->msix_vector_count; i++)
916 ioc->msix_table[i] = ioc->msix_table_backup[i];
917}
918
919/**
920 * _base_check_enable_msix - checks MSIX capabable.
921 * @ioc: per adapter object
922 *
923 * Check to see if card is capable of MSIX, and set number
924 * of avaliable msix vectors
925 */
926static int
927_base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc)
928{
929 int base;
930 u16 message_control;
931 u32 msix_table_offset;
932
933 base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
934 if (!base) {
935 dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "msix not "
936 "supported\n", ioc->name));
937 return -EINVAL;
938 }
939
940 /* get msix vector count */
941 pci_read_config_word(ioc->pdev, base + 2, &message_control);
942 ioc->msix_vector_count = (message_control & 0x3FF) + 1;
943
944 /* get msix table */
945 pci_read_config_dword(ioc->pdev, base + 4, &msix_table_offset);
946 msix_table_offset &= 0xFFFFFFF8;
947 ioc->msix_table = (u32 *)((void *)ioc->chip + msix_table_offset);
948
949 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "msix is supported, "
950 "vector_count(%d), table_offset(0x%08x), table(%p)\n", ioc->name,
951 ioc->msix_vector_count, msix_table_offset, ioc->msix_table));
952 return 0;
953}
954
955/**
956 * _base_disable_msix - disables msix
957 * @ioc: per adapter object
958 *
959 */
960static void
961_base_disable_msix(struct MPT2SAS_ADAPTER *ioc)
962{
963 if (ioc->msix_enable) {
964 pci_disable_msix(ioc->pdev);
965 kfree(ioc->msix_table_backup);
966 ioc->msix_table_backup = NULL;
967 ioc->msix_enable = 0;
968 }
969}
970
971/**
972 * _base_enable_msix - enables msix, failback to io_apic
973 * @ioc: per adapter object
974 *
975 */
976static int
977_base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
978{
979 struct msix_entry entries;
980 int r;
981 u8 try_msix = 0;
982
983 if (msix_disable == -1 || msix_disable == 0)
984 try_msix = 1;
985
986 if (!try_msix)
987 goto try_ioapic;
988
989 if (_base_check_enable_msix(ioc) != 0)
990 goto try_ioapic;
991
992 ioc->msix_table_backup = kcalloc(ioc->msix_vector_count,
993 sizeof(u32), GFP_KERNEL);
994 if (!ioc->msix_table_backup) {
995 dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation for "
996 "msix_table_backup failed!!!\n", ioc->name));
997 goto try_ioapic;
998 }
999
1000 memset(&entries, 0, sizeof(struct msix_entry));
1001 r = pci_enable_msix(ioc->pdev, &entries, 1);
1002 if (r) {
1003 dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "pci_enable_msix "
1004 "failed (r=%d) !!!\n", ioc->name, r));
1005 goto try_ioapic;
1006 }
1007
1008 r = request_irq(entries.vector, _base_interrupt, IRQF_SHARED,
1009 ioc->name, ioc);
1010 if (r) {
1011 dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "unable to allocate "
1012 "interrupt %d !!!\n", ioc->name, entries.vector));
1013 pci_disable_msix(ioc->pdev);
1014 goto try_ioapic;
1015 }
1016
1017 ioc->pci_irq = entries.vector;
1018 ioc->msix_enable = 1;
1019 return 0;
1020
1021/* failback to io_apic interrupt routing */
1022 try_ioapic:
1023
1024 r = request_irq(ioc->pdev->irq, _base_interrupt, IRQF_SHARED,
1025 ioc->name, ioc);
1026 if (r) {
1027 printk(MPT2SAS_ERR_FMT "unable to allocate interrupt %d!\n",
1028 ioc->name, ioc->pdev->irq);
1029 r = -EBUSY;
1030 goto out_fail;
1031 }
1032
1033 ioc->pci_irq = ioc->pdev->irq;
1034 return 0;
1035
1036 out_fail:
1037 return r;
1038}
1039
1040/**
1041 * mpt2sas_base_map_resources - map in controller resources (io/irq/memap)
1042 * @ioc: per adapter object
1043 *
1044 * Returns 0 for success, non-zero for failure.
1045 */
1046int
1047mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
1048{
1049 struct pci_dev *pdev = ioc->pdev;
1050 u32 memap_sz;
1051 u32 pio_sz;
1052 int i, r = 0;
1053
1054 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n",
1055 ioc->name, __func__));
1056
1057 ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
1058 if (pci_enable_device_mem(pdev)) {
1059 printk(MPT2SAS_WARN_FMT "pci_enable_device_mem: "
1060 "failed\n", ioc->name);
1061 return -ENODEV;
1062 }
1063
1064
1065 if (pci_request_selected_regions(pdev, ioc->bars,
1066 MPT2SAS_DRIVER_NAME)) {
1067 printk(MPT2SAS_WARN_FMT "pci_request_selected_regions: "
1068 "failed\n", ioc->name);
1069 r = -ENODEV;
1070 goto out_fail;
1071 }
1072
1073 pci_set_master(pdev);
1074
1075 if (_base_config_dma_addressing(ioc, pdev) != 0) {
1076 printk(MPT2SAS_WARN_FMT "no suitable DMA mask for %s\n",
1077 ioc->name, pci_name(pdev));
1078 r = -ENODEV;
1079 goto out_fail;
1080 }
1081
1082 for (i = 0, memap_sz = 0, pio_sz = 0 ; i < DEVICE_COUNT_RESOURCE; i++) {
1083 if (pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO) {
1084 if (pio_sz)
1085 continue;
1086 ioc->pio_chip = pci_resource_start(pdev, i);
1087 pio_sz = pci_resource_len(pdev, i);
1088 } else {
1089 if (memap_sz)
1090 continue;
1091 ioc->chip_phys = pci_resource_start(pdev, i);
1092 memap_sz = pci_resource_len(pdev, i);
1093 ioc->chip = ioremap(ioc->chip_phys, memap_sz);
1094 if (ioc->chip == NULL) {
1095 printk(MPT2SAS_ERR_FMT "unable to map adapter "
1096 "memory!\n", ioc->name);
1097 r = -EINVAL;
1098 goto out_fail;
1099 }
1100 }
1101 }
1102
1103 pci_set_drvdata(pdev, ioc->shost);
1104 _base_mask_interrupts(ioc);
1105 r = _base_enable_msix(ioc);
1106 if (r)
1107 goto out_fail;
1108
1109 printk(MPT2SAS_INFO_FMT "%s: IRQ %d\n",
1110 ioc->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
1111 "IO-APIC enabled"), ioc->pci_irq);
1112 printk(MPT2SAS_INFO_FMT "iomem(0x%lx), mapped(0x%p), size(%d)\n",
1113 ioc->name, ioc->chip_phys, ioc->chip, memap_sz);
1114 printk(MPT2SAS_INFO_FMT "ioport(0x%lx), size(%d)\n",
1115 ioc->name, ioc->pio_chip, pio_sz);
1116
1117 return 0;
1118
1119 out_fail:
1120 if (ioc->chip_phys)
1121 iounmap(ioc->chip);
1122 ioc->chip_phys = 0;
1123 ioc->pci_irq = -1;
1124 pci_release_selected_regions(ioc->pdev, ioc->bars);
1125 pci_disable_device(pdev);
1126 pci_set_drvdata(pdev, NULL);
1127 return r;
1128}
1129
1130/**
1131 * mpt2sas_base_get_msg_frame_dma - obtain request mf pointer phys addr
1132 * @ioc: per adapter object
1133 * @smid: system request message index(smid zero is invalid)
1134 *
1135 * Returns phys pointer to message frame.
1136 */
1137dma_addr_t
1138mpt2sas_base_get_msg_frame_dma(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1139{
1140 return ioc->request_dma + (smid * ioc->request_sz);
1141}
1142
1143/**
1144 * mpt2sas_base_get_msg_frame - obtain request mf pointer
1145 * @ioc: per adapter object
1146 * @smid: system request message index(smid zero is invalid)
1147 *
1148 * Returns virt pointer to message frame.
1149 */
1150void *
1151mpt2sas_base_get_msg_frame(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1152{
1153 return (void *)(ioc->request + (smid * ioc->request_sz));
1154}
1155
1156/**
1157 * mpt2sas_base_get_sense_buffer - obtain a sense buffer assigned to a mf request
1158 * @ioc: per adapter object
1159 * @smid: system request message index
1160 *
1161 * Returns virt pointer to sense buffer.
1162 */
1163void *
1164mpt2sas_base_get_sense_buffer(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1165{
1166 return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
1167}
1168
1169/**
1170 * mpt2sas_base_get_sense_buffer_dma - obtain a sense buffer assigned to a mf request
1171 * @ioc: per adapter object
1172 * @smid: system request message index
1173 *
1174 * Returns phys pointer to sense buffer.
1175 */
1176dma_addr_t
1177mpt2sas_base_get_sense_buffer_dma(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1178{
1179 return ioc->sense_dma + ((smid - 1) * SCSI_SENSE_BUFFERSIZE);
1180}
1181
1182/**
1183 * mpt2sas_base_get_reply_virt_addr - obtain reply frames virt address
1184 * @ioc: per adapter object
1185 * @phys_addr: lower 32 physical addr of the reply
1186 *
1187 * Converts 32bit lower physical addr into a virt address.
1188 */
1189void *
1190mpt2sas_base_get_reply_virt_addr(struct MPT2SAS_ADAPTER *ioc, u32 phys_addr)
1191{
1192 if (!phys_addr)
1193 return NULL;
1194 return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
1195}
1196
1197/**
1198 * mpt2sas_base_get_smid - obtain a free smid
1199 * @ioc: per adapter object
1200 * @cb_idx: callback index
1201 *
1202 * Returns smid (zero is invalid)
1203 */
1204u16
1205mpt2sas_base_get_smid(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx)
1206{
1207 unsigned long flags;
1208 struct request_tracker *request;
1209 u16 smid;
1210
1211 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1212 if (list_empty(&ioc->free_list)) {
1213 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1214 printk(MPT2SAS_ERR_FMT "%s: smid not available\n",
1215 ioc->name, __func__);
1216 return 0;
1217 }
1218
1219 request = list_entry(ioc->free_list.next,
1220 struct request_tracker, tracker_list);
1221 request->cb_idx = cb_idx;
1222 smid = request->smid;
1223 list_del(&request->tracker_list);
1224 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1225 return smid;
1226}
1227
1228
1229/**
1230 * mpt2sas_base_free_smid - put smid back on free_list
1231 * @ioc: per adapter object
1232 * @smid: system request message index
1233 *
1234 * Return nothing.
1235 */
1236void
1237mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1238{
1239 unsigned long flags;
1240
1241 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1242 ioc->scsi_lookup[smid - 1].cb_idx = 0xFF;
1243 list_add_tail(&ioc->scsi_lookup[smid - 1].tracker_list,
1244 &ioc->free_list);
1245 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1246
1247 /*
1248 * See _wait_for_commands_to_complete() call with regards to this code.
1249 */
1250 if (ioc->shost_recovery && ioc->pending_io_count) {
1251 if (ioc->pending_io_count == 1)
1252 wake_up(&ioc->reset_wq);
1253 ioc->pending_io_count--;
1254 }
1255}
1256
1257/**
1258 * _base_writeq - 64 bit write to MMIO
1259 * @ioc: per adapter object
1260 * @b: data payload
1261 * @addr: address in MMIO space
1262 * @writeq_lock: spin lock
1263 *
1264 * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
1265 * care of 32 bit environment where its not quarenteed to send the entire word
1266 * in one transfer.
1267 */
1268#ifndef writeq
1269static inline void _base_writeq(__u64 b, volatile void __iomem *addr,
1270 spinlock_t *writeq_lock)
1271{
1272 unsigned long flags;
1273 __u64 data_out = cpu_to_le64(b);
1274
1275 spin_lock_irqsave(writeq_lock, flags);
1276 writel((u32)(data_out), addr);
1277 writel((u32)(data_out >> 32), (addr + 4));
1278 spin_unlock_irqrestore(writeq_lock, flags);
1279}
1280#else
1281static inline void _base_writeq(__u64 b, volatile void __iomem *addr,
1282 spinlock_t *writeq_lock)
1283{
1284 writeq(cpu_to_le64(b), addr);
1285}
1286#endif
1287
1288/**
1289 * mpt2sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
1290 * @ioc: per adapter object
1291 * @smid: system request message index
1292 * @vf_id: virtual function id
1293 * @handle: device handle
1294 *
1295 * Return nothing.
1296 */
1297void
1298mpt2sas_base_put_smid_scsi_io(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 vf_id,
1299 u16 handle)
1300{
1301 Mpi2RequestDescriptorUnion_t descriptor;
1302 u64 *request = (u64 *)&descriptor;
1303
1304
1305 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1306 descriptor.SCSIIO.VF_ID = vf_id;
1307 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
1308 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
1309 descriptor.SCSIIO.LMID = 0;
1310 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
1311 &ioc->scsi_lookup_lock);
1312}
1313
1314
1315/**
1316 * mpt2sas_base_put_smid_hi_priority - send Task Managment request to firmware
1317 * @ioc: per adapter object
1318 * @smid: system request message index
1319 * @vf_id: virtual function id
1320 *
1321 * Return nothing.
1322 */
1323void
1324mpt2sas_base_put_smid_hi_priority(struct MPT2SAS_ADAPTER *ioc, u16 smid,
1325 u8 vf_id)
1326{
1327 Mpi2RequestDescriptorUnion_t descriptor;
1328 u64 *request = (u64 *)&descriptor;
1329
1330 descriptor.HighPriority.RequestFlags =
1331 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1332 descriptor.HighPriority.VF_ID = vf_id;
1333 descriptor.HighPriority.SMID = cpu_to_le16(smid);
1334 descriptor.HighPriority.LMID = 0;
1335 descriptor.HighPriority.Reserved1 = 0;
1336 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
1337 &ioc->scsi_lookup_lock);
1338}
1339
1340/**
1341 * mpt2sas_base_put_smid_default - Default, primarily used for config pages
1342 * @ioc: per adapter object
1343 * @smid: system request message index
1344 * @vf_id: virtual function id
1345 *
1346 * Return nothing.
1347 */
1348void
1349mpt2sas_base_put_smid_default(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 vf_id)
1350{
1351 Mpi2RequestDescriptorUnion_t descriptor;
1352 u64 *request = (u64 *)&descriptor;
1353
1354 descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
1355 descriptor.Default.VF_ID = vf_id;
1356 descriptor.Default.SMID = cpu_to_le16(smid);
1357 descriptor.Default.LMID = 0;
1358 descriptor.Default.DescriptorTypeDependent = 0;
1359 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
1360 &ioc->scsi_lookup_lock);
1361}
1362
1363/**
1364 * mpt2sas_base_put_smid_target_assist - send Target Assist/Status to firmware
1365 * @ioc: per adapter object
1366 * @smid: system request message index
1367 * @vf_id: virtual function id
1368 * @io_index: value used to track the IO
1369 *
1370 * Return nothing.
1371 */
1372void
1373mpt2sas_base_put_smid_target_assist(struct MPT2SAS_ADAPTER *ioc, u16 smid,
1374 u8 vf_id, u16 io_index)
1375{
1376 Mpi2RequestDescriptorUnion_t descriptor;
1377 u64 *request = (u64 *)&descriptor;
1378
1379 descriptor.SCSITarget.RequestFlags =
1380 MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET;
1381 descriptor.SCSITarget.VF_ID = vf_id;
1382 descriptor.SCSITarget.SMID = cpu_to_le16(smid);
1383 descriptor.SCSITarget.LMID = 0;
1384 descriptor.SCSITarget.IoIndex = cpu_to_le16(io_index);
1385 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
1386 &ioc->scsi_lookup_lock);
1387}
1388
1389/**
1390 * _base_display_ioc_capabilities - Disply IOC's capabilities.
1391 * @ioc: per adapter object
1392 *
1393 * Return nothing.
1394 */
1395static void
1396_base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
1397{
1398 int i = 0;
1399 char desc[16];
1400 u8 revision;
1401 u32 iounit_pg1_flags;
1402
1403 pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision);
1404 strncpy(desc, ioc->manu_pg0.ChipName, 16);
1405 printk(MPT2SAS_INFO_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "
1406 "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
1407 ioc->name, desc,
1408 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
1409 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
1410 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
1411 ioc->facts.FWVersion.Word & 0x000000FF,
1412 revision,
1413 (ioc->bios_pg3.BiosVersion & 0xFF000000) >> 24,
1414 (ioc->bios_pg3.BiosVersion & 0x00FF0000) >> 16,
1415 (ioc->bios_pg3.BiosVersion & 0x0000FF00) >> 8,
1416 ioc->bios_pg3.BiosVersion & 0x000000FF);
1417
1418 printk(MPT2SAS_INFO_FMT "Protocol=(", ioc->name);
1419
1420 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
1421 printk("Initiator");
1422 i++;
1423 }
1424
1425 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
1426 printk("%sTarget", i ? "," : "");
1427 i++;
1428 }
1429
1430 i = 0;
1431 printk("), ");
1432 printk("Capabilities=(");
1433
1434 if (ioc->facts.IOCCapabilities &
1435 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
1436 printk("Raid");
1437 i++;
1438 }
1439
1440 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
1441 printk("%sTLR", i ? "," : "");
1442 i++;
1443 }
1444
1445 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
1446 printk("%sMulticast", i ? "," : "");
1447 i++;
1448 }
1449
1450 if (ioc->facts.IOCCapabilities &
1451 MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
1452 printk("%sBIDI Target", i ? "," : "");
1453 i++;
1454 }
1455
1456 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
1457 printk("%sEEDP", i ? "," : "");
1458 i++;
1459 }
1460
1461 if (ioc->facts.IOCCapabilities &
1462 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
1463 printk("%sSnapshot Buffer", i ? "," : "");
1464 i++;
1465 }
1466
1467 if (ioc->facts.IOCCapabilities &
1468 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
1469 printk("%sDiag Trace Buffer", i ? "," : "");
1470 i++;
1471 }
1472
1473 if (ioc->facts.IOCCapabilities &
1474 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
1475 printk("%sTask Set Full", i ? "," : "");
1476 i++;
1477 }
1478
1479 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
1480 if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
1481 printk("%sNCQ", i ? "," : "");
1482 i++;
1483 }
1484
1485 printk(")\n");
1486}
1487
1488/**
1489 * _base_static_config_pages - static start of day config pages
1490 * @ioc: per adapter object
1491 *
1492 * Return nothing.
1493 */
1494static void
1495_base_static_config_pages(struct MPT2SAS_ADAPTER *ioc)
1496{
1497 Mpi2ConfigReply_t mpi_reply;
1498 u32 iounit_pg1_flags;
1499
1500 mpt2sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
1501 mpt2sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
1502 mpt2sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
1503 mpt2sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
1504 mpt2sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
1505 mpt2sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
1506 _base_display_ioc_capabilities(ioc);
1507
1508 /*
1509 * Enable task_set_full handling in iounit_pg1 when the
1510 * facts capabilities indicate that its supported.
1511 */
1512 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
1513 if ((ioc->facts.IOCCapabilities &
1514 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
1515 iounit_pg1_flags &=
1516 ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
1517 else
1518 iounit_pg1_flags |=
1519 MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
1520 ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
1521 mpt2sas_config_set_iounit_pg1(ioc, &mpi_reply, ioc->iounit_pg1);
1522}
1523
1524/**
1525 * _base_release_memory_pools - release memory
1526 * @ioc: per adapter object
1527 *
1528 * Free memory allocated from _base_allocate_memory_pools.
1529 *
1530 * Return nothing.
1531 */
1532static void
1533_base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
1534{
1535 dexitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
1536 __func__));
1537
1538 if (ioc->request) {
1539 pci_free_consistent(ioc->pdev, ioc->request_dma_sz,
1540 ioc->request, ioc->request_dma);
1541 dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "request_pool(0x%p)"
1542 ": free\n", ioc->name, ioc->request));
1543 ioc->request = NULL;
1544 }
1545
1546 if (ioc->sense) {
1547 pci_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
1548 if (ioc->sense_dma_pool)
1549 pci_pool_destroy(ioc->sense_dma_pool);
1550 dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "sense_pool(0x%p)"
1551 ": free\n", ioc->name, ioc->sense));
1552 ioc->sense = NULL;
1553 }
1554
1555 if (ioc->reply) {
1556 pci_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
1557 if (ioc->reply_dma_pool)
1558 pci_pool_destroy(ioc->reply_dma_pool);
1559 dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_pool(0x%p)"
1560 ": free\n", ioc->name, ioc->reply));
1561 ioc->reply = NULL;
1562 }
1563
1564 if (ioc->reply_free) {
1565 pci_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
1566 ioc->reply_free_dma);
1567 if (ioc->reply_free_dma_pool)
1568 pci_pool_destroy(ioc->reply_free_dma_pool);
1569 dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_free_pool"
1570 "(0x%p): free\n", ioc->name, ioc->reply_free));
1571 ioc->reply_free = NULL;
1572 }
1573
1574 if (ioc->reply_post_free) {
1575 pci_pool_free(ioc->reply_post_free_dma_pool,
1576 ioc->reply_post_free, ioc->reply_post_free_dma);
1577 if (ioc->reply_post_free_dma_pool)
1578 pci_pool_destroy(ioc->reply_post_free_dma_pool);
1579 dexitprintk(ioc, printk(MPT2SAS_INFO_FMT
1580 "reply_post_free_pool(0x%p): free\n", ioc->name,
1581 ioc->reply_post_free));
1582 ioc->reply_post_free = NULL;
1583 }
1584
1585 if (ioc->config_page) {
1586 dexitprintk(ioc, printk(MPT2SAS_INFO_FMT
1587 "config_page(0x%p): free\n", ioc->name,
1588 ioc->config_page));
1589 pci_free_consistent(ioc->pdev, ioc->config_page_sz,
1590 ioc->config_page, ioc->config_page_dma);
1591 }
1592
1593 kfree(ioc->scsi_lookup);
1594}
1595
1596
1597/**
1598 * _base_allocate_memory_pools - allocate start of day memory pools
1599 * @ioc: per adapter object
1600 * @sleep_flag: CAN_SLEEP or NO_SLEEP
1601 *
1602 * Returns 0 success, anything else error
1603 */
1604static int
1605_base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
1606{
1607 Mpi2IOCFactsReply_t *facts;
1608 u32 queue_size, queue_diff;
1609 u16 max_sge_elements;
1610 u16 num_of_reply_frames;
1611 u16 chains_needed_per_io;
1612 u32 sz, total_sz;
1613 u16 i;
1614 u32 retry_sz;
1615 u16 max_request_credit;
1616
1617 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
1618 __func__));
1619
1620 retry_sz = 0;
1621 facts = &ioc->facts;
1622
1623 /* command line tunables for max sgl entries */
1624 if (max_sgl_entries != -1) {
1625 ioc->shost->sg_tablesize = (max_sgl_entries <
1626 MPT2SAS_SG_DEPTH) ? max_sgl_entries :
1627 MPT2SAS_SG_DEPTH;
1628 } else {
1629 ioc->shost->sg_tablesize = MPT2SAS_SG_DEPTH;
1630 }
1631
1632 /* command line tunables for max controller queue depth */
1633 if (max_queue_depth != -1) {
1634 max_request_credit = (max_queue_depth < facts->RequestCredit)
1635 ? max_queue_depth : facts->RequestCredit;
1636 } else {
1637 max_request_credit = (facts->RequestCredit >
1638 MPT2SAS_MAX_REQUEST_QUEUE) ? MPT2SAS_MAX_REQUEST_QUEUE :
1639 facts->RequestCredit;
1640 }
1641 ioc->request_depth = max_request_credit;
1642
1643 /* request frame size */
1644 ioc->request_sz = facts->IOCRequestFrameSize * 4;
1645
1646 /* reply frame size */
1647 ioc->reply_sz = facts->ReplyFrameSize * 4;
1648
1649 retry_allocation:
1650 total_sz = 0;
1651 /* calculate number of sg elements left over in the 1st frame */
1652 max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
1653 sizeof(Mpi2SGEIOUnion_t)) + ioc->sge_size);
1654 ioc->max_sges_in_main_message = max_sge_elements/ioc->sge_size;
1655
1656 /* now do the same for a chain buffer */
1657 max_sge_elements = ioc->request_sz - ioc->sge_size;
1658 ioc->max_sges_in_chain_message = max_sge_elements/ioc->sge_size;
1659
1660 ioc->chain_offset_value_for_main_message =
1661 ((sizeof(Mpi2SCSIIORequest_t) - sizeof(Mpi2SGEIOUnion_t)) +
1662 (ioc->max_sges_in_chain_message * ioc->sge_size)) / 4;
1663
1664 /*
1665 * MPT2SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
1666 */
1667 chains_needed_per_io = ((ioc->shost->sg_tablesize -
1668 ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
1669 + 1;
1670 if (chains_needed_per_io > facts->MaxChainDepth) {
1671 chains_needed_per_io = facts->MaxChainDepth;
1672 ioc->shost->sg_tablesize = min_t(u16,
1673 ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
1674 * chains_needed_per_io), ioc->shost->sg_tablesize);
1675 }
1676 ioc->chains_needed_per_io = chains_needed_per_io;
1677
1678 /* reply free queue sizing - taking into account for events */
1679 num_of_reply_frames = ioc->request_depth + 32;
1680
1681 /* number of replies frames can't be a multiple of 16 */
1682 /* decrease number of reply frames by 1 */
1683 if (!(num_of_reply_frames % 16))
1684 num_of_reply_frames--;
1685
1686 /* calculate number of reply free queue entries
1687 * (must be multiple of 16)
1688 */
1689
1690 /* (we know reply_free_queue_depth is not a multiple of 16) */
1691 queue_size = num_of_reply_frames;
1692 queue_size += 16 - (queue_size % 16);
1693 ioc->reply_free_queue_depth = queue_size;
1694
1695 /* reply descriptor post queue sizing */
1696 /* this size should be the number of request frames + number of reply
1697 * frames
1698 */
1699
1700 queue_size = ioc->request_depth + num_of_reply_frames + 1;
1701 /* round up to 16 byte boundary */
1702 if (queue_size % 16)
1703 queue_size += 16 - (queue_size % 16);
1704
1705 /* check against IOC maximum reply post queue depth */
1706 if (queue_size > facts->MaxReplyDescriptorPostQueueDepth) {
1707 queue_diff = queue_size -
1708 facts->MaxReplyDescriptorPostQueueDepth;
1709
1710 /* round queue_diff up to multiple of 16 */
1711 if (queue_diff % 16)
1712 queue_diff += 16 - (queue_diff % 16);
1713
1714 /* adjust request_depth, reply_free_queue_depth,
1715 * and queue_size
1716 */
1717 ioc->request_depth -= queue_diff;
1718 ioc->reply_free_queue_depth -= queue_diff;
1719 queue_size -= queue_diff;
1720 }
1721 ioc->reply_post_queue_depth = queue_size;
1722
1723 /* max scsi host queue depth */
1724 ioc->shost->can_queue = ioc->request_depth - INTERNAL_CMDS_COUNT;
1725 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scsi host queue: depth"
1726 "(%d)\n", ioc->name, ioc->shost->can_queue));
1727
1728 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scatter gather: "
1729 "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
1730 "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message,
1731 ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
1732 ioc->chains_needed_per_io));
1733
1734 /* contiguous pool for request and chains, 16 byte align, one extra "
1735 * "frame for smid=0
1736 */
1737 ioc->chain_depth = ioc->chains_needed_per_io * ioc->request_depth;
1738 sz = ((ioc->request_depth + 1 + ioc->chain_depth) * ioc->request_sz);
1739
1740 ioc->request_dma_sz = sz;
1741 ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma);
1742 if (!ioc->request) {
1743 printk(MPT2SAS_ERR_FMT "request pool: pci_alloc_consistent "
1744 "failed: req_depth(%d), chains_per_io(%d), frame_sz(%d), "
1745 "total(%d kB)\n", ioc->name, ioc->request_depth,
1746 ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
1747 if (ioc->request_depth < MPT2SAS_SAS_QUEUE_DEPTH)
1748 goto out;
1749 retry_sz += 64;
1750 ioc->request_depth = max_request_credit - retry_sz;
1751 goto retry_allocation;
1752 }
1753
1754 if (retry_sz)
1755 printk(MPT2SAS_ERR_FMT "request pool: pci_alloc_consistent "
1756 "succeed: req_depth(%d), chains_per_io(%d), frame_sz(%d), "
1757 "total(%d kb)\n", ioc->name, ioc->request_depth,
1758 ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
1759
1760 ioc->chain = ioc->request + ((ioc->request_depth + 1) *
1761 ioc->request_sz);
1762 ioc->chain_dma = ioc->request_dma + ((ioc->request_depth + 1) *
1763 ioc->request_sz);
1764 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request pool(0x%p): "
1765 "depth(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name,
1766 ioc->request, ioc->request_depth, ioc->request_sz,
1767 ((ioc->request_depth + 1) * ioc->request_sz)/1024));
1768 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "chain pool(0x%p): depth"
1769 "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name, ioc->chain,
1770 ioc->chain_depth, ioc->request_sz, ((ioc->chain_depth *
1771 ioc->request_sz))/1024));
1772 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request pool: dma(0x%llx)\n",
1773 ioc->name, (unsigned long long) ioc->request_dma));
1774 total_sz += sz;
1775
1776 ioc->scsi_lookup = kcalloc(ioc->request_depth,
1777 sizeof(struct request_tracker), GFP_KERNEL);
1778 if (!ioc->scsi_lookup) {
1779 printk(MPT2SAS_ERR_FMT "scsi_lookup: kcalloc failed\n",
1780 ioc->name);
1781 goto out;
1782 }
1783
1784 /* initialize some bits */
1785 for (i = 0; i < ioc->request_depth; i++)
1786 ioc->scsi_lookup[i].smid = i + 1;
1787
1788 /* sense buffers, 4 byte align */
1789 sz = ioc->request_depth * SCSI_SENSE_BUFFERSIZE;
1790 ioc->sense_dma_pool = pci_pool_create("sense pool", ioc->pdev, sz, 4,
1791 0);
1792 if (!ioc->sense_dma_pool) {
1793 printk(MPT2SAS_ERR_FMT "sense pool: pci_pool_create failed\n",
1794 ioc->name);
1795 goto out;
1796 }
1797 ioc->sense = pci_pool_alloc(ioc->sense_dma_pool , GFP_KERNEL,
1798 &ioc->sense_dma);
1799 if (!ioc->sense) {
1800 printk(MPT2SAS_ERR_FMT "sense pool: pci_pool_alloc failed\n",
1801 ioc->name);
1802 goto out;
1803 }
1804 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT
1805 "sense pool(0x%p): depth(%d), element_size(%d), pool_size"
1806 "(%d kB)\n", ioc->name, ioc->sense, ioc->request_depth,
1807 SCSI_SENSE_BUFFERSIZE, sz/1024));
1808 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "sense_dma(0x%llx)\n",
1809 ioc->name, (unsigned long long)ioc->sense_dma));
1810 total_sz += sz;
1811
1812 /* reply pool, 4 byte align */
1813 sz = ioc->reply_free_queue_depth * ioc->reply_sz;
1814 ioc->reply_dma_pool = pci_pool_create("reply pool", ioc->pdev, sz, 4,
1815 0);
1816 if (!ioc->reply_dma_pool) {
1817 printk(MPT2SAS_ERR_FMT "reply pool: pci_pool_create failed\n",
1818 ioc->name);
1819 goto out;
1820 }
1821 ioc->reply = pci_pool_alloc(ioc->reply_dma_pool , GFP_KERNEL,
1822 &ioc->reply_dma);
1823 if (!ioc->reply) {
1824 printk(MPT2SAS_ERR_FMT "reply pool: pci_pool_alloc failed\n",
1825 ioc->name);
1826 goto out;
1827 }
1828 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply pool(0x%p): depth"
1829 "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name, ioc->reply,
1830 ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024));
1831 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_dma(0x%llx)\n",
1832 ioc->name, (unsigned long long)ioc->reply_dma));
1833 total_sz += sz;
1834
1835 /* reply free queue, 16 byte align */
1836 sz = ioc->reply_free_queue_depth * 4;
1837 ioc->reply_free_dma_pool = pci_pool_create("reply_free pool",
1838 ioc->pdev, sz, 16, 0);
1839 if (!ioc->reply_free_dma_pool) {
1840 printk(MPT2SAS_ERR_FMT "reply_free pool: pci_pool_create "
1841 "failed\n", ioc->name);
1842 goto out;
1843 }
1844 ioc->reply_free = pci_pool_alloc(ioc->reply_free_dma_pool , GFP_KERNEL,
1845 &ioc->reply_free_dma);
1846 if (!ioc->reply_free) {
1847 printk(MPT2SAS_ERR_FMT "reply_free pool: pci_pool_alloc "
1848 "failed\n", ioc->name);
1849 goto out;
1850 }
1851 memset(ioc->reply_free, 0, sz);
1852 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_free pool(0x%p): "
1853 "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name,
1854 ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
1855 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_free_dma"
1856 "(0x%llx)\n", ioc->name, (unsigned long long)ioc->reply_free_dma));
1857 total_sz += sz;
1858
1859 /* reply post queue, 16 byte align */
1860 sz = ioc->reply_post_queue_depth * sizeof(Mpi2DefaultReplyDescriptor_t);
1861 ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
1862 ioc->pdev, sz, 16, 0);
1863 if (!ioc->reply_post_free_dma_pool) {
1864 printk(MPT2SAS_ERR_FMT "reply_post_free pool: pci_pool_create "
1865 "failed\n", ioc->name);
1866 goto out;
1867 }
1868 ioc->reply_post_free = pci_pool_alloc(ioc->reply_post_free_dma_pool ,
1869 GFP_KERNEL, &ioc->reply_post_free_dma);
1870 if (!ioc->reply_post_free) {
1871 printk(MPT2SAS_ERR_FMT "reply_post_free pool: pci_pool_alloc "
1872 "failed\n", ioc->name);
1873 goto out;
1874 }
1875 memset(ioc->reply_post_free, 0, sz);
1876 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply post free pool"
1877 "(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
1878 ioc->name, ioc->reply_post_free, ioc->reply_post_queue_depth, 8,
1879 sz/1024));
1880 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_post_free_dma = "
1881 "(0x%llx)\n", ioc->name, (unsigned long long)
1882 ioc->reply_post_free_dma));
1883 total_sz += sz;
1884
1885 ioc->config_page_sz = 512;
1886 ioc->config_page = pci_alloc_consistent(ioc->pdev,
1887 ioc->config_page_sz, &ioc->config_page_dma);
1888 if (!ioc->config_page) {
1889 printk(MPT2SAS_ERR_FMT "config page: pci_pool_alloc "
1890 "failed\n", ioc->name);
1891 goto out;
1892 }
1893 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "config page(0x%p): size"
1894 "(%d)\n", ioc->name, ioc->config_page, ioc->config_page_sz));
1895 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "config_page_dma"
1896 "(0x%llx)\n", ioc->name, (unsigned long long)ioc->config_page_dma));
1897 total_sz += ioc->config_page_sz;
1898
1899 printk(MPT2SAS_INFO_FMT "Allocated physical memory: size(%d kB)\n",
1900 ioc->name, total_sz/1024);
1901 printk(MPT2SAS_INFO_FMT "Current Controller Queue Depth(%d), "
1902 "Max Controller Queue Depth(%d)\n",
1903 ioc->name, ioc->shost->can_queue, facts->RequestCredit);
1904 printk(MPT2SAS_INFO_FMT "Scatter Gather Elements per IO(%d)\n",
1905 ioc->name, ioc->shost->sg_tablesize);
1906 return 0;
1907
1908 out:
1909 _base_release_memory_pools(ioc);
1910 return -ENOMEM;
1911}
1912
1913
1914/**
1915 * mpt2sas_base_get_iocstate - Get the current state of a MPT adapter.
1916 * @ioc: Pointer to MPT_ADAPTER structure
1917 * @cooked: Request raw or cooked IOC state
1918 *
1919 * Returns all IOC Doorbell register bits if cooked==0, else just the
1920 * Doorbell bits in MPI_IOC_STATE_MASK.
1921 */
1922u32
1923mpt2sas_base_get_iocstate(struct MPT2SAS_ADAPTER *ioc, int cooked)
1924{
1925 u32 s, sc;
1926
1927 s = readl(&ioc->chip->Doorbell);
1928 sc = s & MPI2_IOC_STATE_MASK;
1929 return cooked ? sc : s;
1930}
1931
1932/**
1933 * _base_wait_on_iocstate - waiting on a particular ioc state
1934 * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
1935 * @timeout: timeout in second
1936 * @sleep_flag: CAN_SLEEP or NO_SLEEP
1937 *
1938 * Returns 0 for success, non-zero for failure.
1939 */
1940static int
1941_base_wait_on_iocstate(struct MPT2SAS_ADAPTER *ioc, u32 ioc_state, int timeout,
1942 int sleep_flag)
1943{
1944 u32 count, cntdn;
1945 u32 current_state;
1946
1947 count = 0;
1948 cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
1949 do {
1950 current_state = mpt2sas_base_get_iocstate(ioc, 1);
1951 if (current_state == ioc_state)
1952 return 0;
1953 if (count && current_state == MPI2_IOC_STATE_FAULT)
1954 break;
1955 if (sleep_flag == CAN_SLEEP)
1956 msleep(1);
1957 else
1958 udelay(500);
1959 count++;
1960 } while (--cntdn);
1961
1962 return current_state;
1963}
1964
1965/**
1966 * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
1967 * a write to the doorbell)
1968 * @ioc: per adapter object
1969 * @timeout: timeout in second
1970 * @sleep_flag: CAN_SLEEP or NO_SLEEP
1971 *
1972 * Returns 0 for success, non-zero for failure.
1973 *
1974 * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
1975 */
1976static int
1977_base_wait_for_doorbell_int(struct MPT2SAS_ADAPTER *ioc, int timeout,
1978 int sleep_flag)
1979{
1980 u32 cntdn, count;
1981 u32 int_status;
1982
1983 count = 0;
1984 cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
1985 do {
1986 int_status = readl(&ioc->chip->HostInterruptStatus);
1987 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
1988 dhsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
1989 "successfull count(%d), timeout(%d)\n", ioc->name,
1990 __func__, count, timeout));
1991 return 0;
1992 }
1993 if (sleep_flag == CAN_SLEEP)
1994 msleep(1);
1995 else
1996 udelay(500);
1997 count++;
1998 } while (--cntdn);
1999
2000 printk(MPT2SAS_ERR_FMT "%s: failed due to timeout count(%d), "
2001 "int_status(%x)!\n", ioc->name, __func__, count, int_status);
2002 return -EFAULT;
2003}
2004
2005/**
2006 * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
2007 * @ioc: per adapter object
2008 * @timeout: timeout in second
2009 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2010 *
2011 * Returns 0 for success, non-zero for failure.
2012 *
2013 * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
2014 * doorbell.
2015 */
2016static int
2017_base_wait_for_doorbell_ack(struct MPT2SAS_ADAPTER *ioc, int timeout,
2018 int sleep_flag)
2019{
2020 u32 cntdn, count;
2021 u32 int_status;
2022 u32 doorbell;
2023
2024 count = 0;
2025 cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
2026 do {
2027 int_status = readl(&ioc->chip->HostInterruptStatus);
2028 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
2029 dhsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
2030 "successfull count(%d), timeout(%d)\n", ioc->name,
2031 __func__, count, timeout));
2032 return 0;
2033 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
2034 doorbell = readl(&ioc->chip->Doorbell);
2035 if ((doorbell & MPI2_IOC_STATE_MASK) ==
2036 MPI2_IOC_STATE_FAULT) {
2037 mpt2sas_base_fault_info(ioc , doorbell);
2038 return -EFAULT;
2039 }
2040 } else if (int_status == 0xFFFFFFFF)
2041 goto out;
2042
2043 if (sleep_flag == CAN_SLEEP)
2044 msleep(1);
2045 else
2046 udelay(500);
2047 count++;
2048 } while (--cntdn);
2049
2050 out:
2051 printk(MPT2SAS_ERR_FMT "%s: failed due to timeout count(%d), "
2052 "int_status(%x)!\n", ioc->name, __func__, count, int_status);
2053 return -EFAULT;
2054}
2055
2056/**
2057 * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
2058 * @ioc: per adapter object
2059 * @timeout: timeout in second
2060 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2061 *
2062 * Returns 0 for success, non-zero for failure.
2063 *
2064 */
2065static int
2066_base_wait_for_doorbell_not_used(struct MPT2SAS_ADAPTER *ioc, int timeout,
2067 int sleep_flag)
2068{
2069 u32 cntdn, count;
2070 u32 doorbell_reg;
2071
2072 count = 0;
2073 cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
2074 do {
2075 doorbell_reg = readl(&ioc->chip->Doorbell);
2076 if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
2077 dhsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
2078 "successfull count(%d), timeout(%d)\n", ioc->name,
2079 __func__, count, timeout));
2080 return 0;
2081 }
2082 if (sleep_flag == CAN_SLEEP)
2083 msleep(1);
2084 else
2085 udelay(500);
2086 count++;
2087 } while (--cntdn);
2088
2089 printk(MPT2SAS_ERR_FMT "%s: failed due to timeout count(%d), "
2090 "doorbell_reg(%x)!\n", ioc->name, __func__, count, doorbell_reg);
2091 return -EFAULT;
2092}
2093
2094/**
2095 * _base_send_ioc_reset - send doorbell reset
2096 * @ioc: per adapter object
2097 * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
2098 * @timeout: timeout in second
2099 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2100 *
2101 * Returns 0 for success, non-zero for failure.
2102 */
2103static int
2104_base_send_ioc_reset(struct MPT2SAS_ADAPTER *ioc, u8 reset_type, int timeout,
2105 int sleep_flag)
2106{
2107 u32 ioc_state;
2108 int r = 0;
2109
2110 if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
2111 printk(MPT2SAS_ERR_FMT "%s: unknown reset_type\n",
2112 ioc->name, __func__);
2113 return -EFAULT;
2114 }
2115
2116 if (!(ioc->facts.IOCCapabilities &
2117 MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
2118 return -EFAULT;
2119
2120 printk(MPT2SAS_INFO_FMT "sending message unit reset !!\n", ioc->name);
2121
2122 writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
2123 &ioc->chip->Doorbell);
2124 if ((_base_wait_for_doorbell_ack(ioc, 15, sleep_flag))) {
2125 r = -EFAULT;
2126 goto out;
2127 }
2128 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
2129 timeout, sleep_flag);
2130 if (ioc_state) {
2131 printk(MPT2SAS_ERR_FMT "%s: failed going to ready state "
2132 " (ioc_state=0x%x)\n", ioc->name, __func__, ioc_state);
2133 r = -EFAULT;
2134 goto out;
2135 }
2136 out:
2137 printk(MPT2SAS_INFO_FMT "message unit reset: %s\n",
2138 ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
2139 return r;
2140}
2141
2142/**
2143 * _base_handshake_req_reply_wait - send request thru doorbell interface
2144 * @ioc: per adapter object
2145 * @request_bytes: request length
2146 * @request: pointer having request payload
2147 * @reply_bytes: reply length
2148 * @reply: pointer to reply payload
2149 * @timeout: timeout in second
2150 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2151 *
2152 * Returns 0 for success, non-zero for failure.
2153 */
2154static int
2155_base_handshake_req_reply_wait(struct MPT2SAS_ADAPTER *ioc, int request_bytes,
2156 u32 *request, int reply_bytes, u16 *reply, int timeout, int sleep_flag)
2157{
2158 MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
2159 int i;
2160 u8 failed;
2161 u16 dummy;
2162 u32 *mfp;
2163
2164 /* make sure doorbell is not in use */
2165 if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
2166 printk(MPT2SAS_ERR_FMT "doorbell is in use "
2167 " (line=%d)\n", ioc->name, __LINE__);
2168 return -EFAULT;
2169 }
2170
2171 /* clear pending doorbell interrupts from previous state changes */
2172 if (readl(&ioc->chip->HostInterruptStatus) &
2173 MPI2_HIS_IOC2SYS_DB_STATUS)
2174 writel(0, &ioc->chip->HostInterruptStatus);
2175
2176 /* send message to ioc */
2177 writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
2178 ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
2179 &ioc->chip->Doorbell);
2180
2181 if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
2182 printk(MPT2SAS_ERR_FMT "doorbell handshake "
2183 "int failed (line=%d)\n", ioc->name, __LINE__);
2184 return -EFAULT;
2185 }
2186 writel(0, &ioc->chip->HostInterruptStatus);
2187
2188 if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag))) {
2189 printk(MPT2SAS_ERR_FMT "doorbell handshake "
2190 "ack failed (line=%d)\n", ioc->name, __LINE__);
2191 return -EFAULT;
2192 }
2193
2194 /* send message 32-bits at a time */
2195 for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
2196 writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
2197 if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag)))
2198 failed = 1;
2199 }
2200
2201 if (failed) {
2202 printk(MPT2SAS_ERR_FMT "doorbell handshake "
2203 "sending request failed (line=%d)\n", ioc->name, __LINE__);
2204 return -EFAULT;
2205 }
2206
2207 /* now wait for the reply */
2208 if ((_base_wait_for_doorbell_int(ioc, timeout, sleep_flag))) {
2209 printk(MPT2SAS_ERR_FMT "doorbell handshake "
2210 "int failed (line=%d)\n", ioc->name, __LINE__);
2211 return -EFAULT;
2212 }
2213
2214 /* read the first two 16-bits, it gives the total length of the reply */
2215 reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
2216 & MPI2_DOORBELL_DATA_MASK);
2217 writel(0, &ioc->chip->HostInterruptStatus);
2218 if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
2219 printk(MPT2SAS_ERR_FMT "doorbell handshake "
2220 "int failed (line=%d)\n", ioc->name, __LINE__);
2221 return -EFAULT;
2222 }
2223 reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
2224 & MPI2_DOORBELL_DATA_MASK);
2225 writel(0, &ioc->chip->HostInterruptStatus);
2226
2227 for (i = 2; i < default_reply->MsgLength * 2; i++) {
2228 if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
2229 printk(MPT2SAS_ERR_FMT "doorbell "
2230 "handshake int failed (line=%d)\n", ioc->name,
2231 __LINE__);
2232 return -EFAULT;
2233 }
2234 if (i >= reply_bytes/2) /* overflow case */
2235 dummy = readl(&ioc->chip->Doorbell);
2236 else
2237 reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
2238 & MPI2_DOORBELL_DATA_MASK);
2239 writel(0, &ioc->chip->HostInterruptStatus);
2240 }
2241
2242 _base_wait_for_doorbell_int(ioc, 5, sleep_flag);
2243 if (_base_wait_for_doorbell_not_used(ioc, 5, sleep_flag) != 0) {
2244 dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "doorbell is in use "
2245 " (line=%d)\n", ioc->name, __LINE__));
2246 }
2247 writel(0, &ioc->chip->HostInterruptStatus);
2248
2249 if (ioc->logging_level & MPT_DEBUG_INIT) {
2250 mfp = (u32 *)reply;
2251 printk(KERN_DEBUG "\toffset:data\n");
2252 for (i = 0; i < reply_bytes/4; i++)
2253 printk(KERN_DEBUG "\t[0x%02x]:%08x\n", i*4,
2254 le32_to_cpu(mfp[i]));
2255 }
2256 return 0;
2257}
2258
2259/**
2260 * mpt2sas_base_sas_iounit_control - send sas iounit control to FW
2261 * @ioc: per adapter object
2262 * @mpi_reply: the reply payload from FW
2263 * @mpi_request: the request payload sent to FW
2264 *
2265 * The SAS IO Unit Control Request message allows the host to perform low-level
2266 * operations, such as resets on the PHYs of the IO Unit, also allows the host
2267 * to obtain the IOC assigned device handles for a device if it has other
2268 * identifying information about the device, in addition allows the host to
2269 * remove IOC resources associated with the device.
2270 *
2271 * Returns 0 for success, non-zero for failure.
2272 */
2273int
2274mpt2sas_base_sas_iounit_control(struct MPT2SAS_ADAPTER *ioc,
2275 Mpi2SasIoUnitControlReply_t *mpi_reply,
2276 Mpi2SasIoUnitControlRequest_t *mpi_request)
2277{
2278 u16 smid;
2279 u32 ioc_state;
2280 unsigned long timeleft;
2281 u8 issue_reset;
2282 int rc;
2283 void *request;
2284 u16 wait_state_count;
2285
2286 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
2287 __func__));
2288
2289 mutex_lock(&ioc->base_cmds.mutex);
2290
2291 if (ioc->base_cmds.status != MPT2_CMD_NOT_USED) {
2292 printk(MPT2SAS_ERR_FMT "%s: base_cmd in use\n",
2293 ioc->name, __func__);
2294 rc = -EAGAIN;
2295 goto out;
2296 }
2297
2298 wait_state_count = 0;
2299 ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
2300 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
2301 if (wait_state_count++ == 10) {
2302 printk(MPT2SAS_ERR_FMT
2303 "%s: failed due to ioc not operational\n",
2304 ioc->name, __func__);
2305 rc = -EFAULT;
2306 goto out;
2307 }
2308 ssleep(1);
2309 ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
2310 printk(MPT2SAS_INFO_FMT "%s: waiting for "
2311 "operational state(count=%d)\n", ioc->name,
2312 __func__, wait_state_count);
2313 }
2314
2315 smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
2316 if (!smid) {
2317 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
2318 ioc->name, __func__);
2319 rc = -EAGAIN;
2320 goto out;
2321 }
2322
2323 rc = 0;
2324 ioc->base_cmds.status = MPT2_CMD_PENDING;
2325 request = mpt2sas_base_get_msg_frame(ioc, smid);
2326 ioc->base_cmds.smid = smid;
2327 memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
2328 if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
2329 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
2330 ioc->ioc_link_reset_in_progress = 1;
2331 mpt2sas_base_put_smid_default(ioc, smid, mpi_request->VF_ID);
2332 timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
2333 msecs_to_jiffies(10000));
2334 if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
2335 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
2336 ioc->ioc_link_reset_in_progress)
2337 ioc->ioc_link_reset_in_progress = 0;
2338 if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
2339 printk(MPT2SAS_ERR_FMT "%s: timeout\n",
2340 ioc->name, __func__);
2341 _debug_dump_mf(mpi_request,
2342 sizeof(Mpi2SasIoUnitControlRequest_t)/4);
2343 if (!(ioc->base_cmds.status & MPT2_CMD_RESET))
2344 issue_reset = 1;
2345 goto issue_host_reset;
2346 }
2347 if (ioc->base_cmds.status & MPT2_CMD_REPLY_VALID)
2348 memcpy(mpi_reply, ioc->base_cmds.reply,
2349 sizeof(Mpi2SasIoUnitControlReply_t));
2350 else
2351 memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
2352 ioc->base_cmds.status = MPT2_CMD_NOT_USED;
2353 goto out;
2354
2355 issue_host_reset:
2356 if (issue_reset)
2357 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
2358 FORCE_BIG_HAMMER);
2359 ioc->base_cmds.status = MPT2_CMD_NOT_USED;
2360 rc = -EFAULT;
2361 out:
2362 mutex_unlock(&ioc->base_cmds.mutex);
2363 return rc;
2364}
2365
2366
2367/**
2368 * mpt2sas_base_scsi_enclosure_processor - sending request to sep device
2369 * @ioc: per adapter object
2370 * @mpi_reply: the reply payload from FW
2371 * @mpi_request: the request payload sent to FW
2372 *
2373 * The SCSI Enclosure Processor request message causes the IOC to
2374 * communicate with SES devices to control LED status signals.
2375 *
2376 * Returns 0 for success, non-zero for failure.
2377 */
2378int
2379mpt2sas_base_scsi_enclosure_processor(struct MPT2SAS_ADAPTER *ioc,
2380 Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
2381{
2382 u16 smid;
2383 u32 ioc_state;
2384 unsigned long timeleft;
2385 u8 issue_reset;
2386 int rc;
2387 void *request;
2388 u16 wait_state_count;
2389
2390 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
2391 __func__));
2392
2393 mutex_lock(&ioc->base_cmds.mutex);
2394
2395 if (ioc->base_cmds.status != MPT2_CMD_NOT_USED) {
2396 printk(MPT2SAS_ERR_FMT "%s: base_cmd in use\n",
2397 ioc->name, __func__);
2398 rc = -EAGAIN;
2399 goto out;
2400 }
2401
2402 wait_state_count = 0;
2403 ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
2404 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
2405 if (wait_state_count++ == 10) {
2406 printk(MPT2SAS_ERR_FMT
2407 "%s: failed due to ioc not operational\n",
2408 ioc->name, __func__);
2409 rc = -EFAULT;
2410 goto out;
2411 }
2412 ssleep(1);
2413 ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
2414 printk(MPT2SAS_INFO_FMT "%s: waiting for "
2415 "operational state(count=%d)\n", ioc->name,
2416 __func__, wait_state_count);
2417 }
2418
2419 smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
2420 if (!smid) {
2421 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
2422 ioc->name, __func__);
2423 rc = -EAGAIN;
2424 goto out;
2425 }
2426
2427 rc = 0;
2428 ioc->base_cmds.status = MPT2_CMD_PENDING;
2429 request = mpt2sas_base_get_msg_frame(ioc, smid);
2430 ioc->base_cmds.smid = smid;
2431 memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
2432 mpt2sas_base_put_smid_default(ioc, smid, mpi_request->VF_ID);
2433 timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
2434 msecs_to_jiffies(10000));
2435 if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
2436 printk(MPT2SAS_ERR_FMT "%s: timeout\n",
2437 ioc->name, __func__);
2438 _debug_dump_mf(mpi_request,
2439 sizeof(Mpi2SepRequest_t)/4);
2440 if (!(ioc->base_cmds.status & MPT2_CMD_RESET))
2441 issue_reset = 1;
2442 goto issue_host_reset;
2443 }
2444 if (ioc->base_cmds.status & MPT2_CMD_REPLY_VALID)
2445 memcpy(mpi_reply, ioc->base_cmds.reply,
2446 sizeof(Mpi2SepReply_t));
2447 else
2448 memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
2449 ioc->base_cmds.status = MPT2_CMD_NOT_USED;
2450 goto out;
2451
2452 issue_host_reset:
2453 if (issue_reset)
2454 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
2455 FORCE_BIG_HAMMER);
2456 ioc->base_cmds.status = MPT2_CMD_NOT_USED;
2457 rc = -EFAULT;
2458 out:
2459 mutex_unlock(&ioc->base_cmds.mutex);
2460 return rc;
2461}
2462
2463/**
2464 * _base_get_port_facts - obtain port facts reply and save in ioc
2465 * @ioc: per adapter object
2466 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2467 *
2468 * Returns 0 for success, non-zero for failure.
2469 */
2470static int
2471_base_get_port_facts(struct MPT2SAS_ADAPTER *ioc, int port, int sleep_flag)
2472{
2473 Mpi2PortFactsRequest_t mpi_request;
2474 Mpi2PortFactsReply_t mpi_reply, *pfacts;
2475 int mpi_reply_sz, mpi_request_sz, r;
2476
2477 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
2478 __func__));
2479
2480 mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
2481 mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
2482 memset(&mpi_request, 0, mpi_request_sz);
2483 mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
2484 mpi_request.PortNumber = port;
2485 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
2486 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
2487
2488 if (r != 0) {
2489 printk(MPT2SAS_ERR_FMT "%s: handshake failed (r=%d)\n",
2490 ioc->name, __func__, r);
2491 return r;
2492 }
2493
2494 pfacts = &ioc->pfacts[port];
2495 memset(pfacts, 0, sizeof(Mpi2PortFactsReply_t));
2496 pfacts->PortNumber = mpi_reply.PortNumber;
2497 pfacts->VP_ID = mpi_reply.VP_ID;
2498 pfacts->VF_ID = mpi_reply.VF_ID;
2499 pfacts->MaxPostedCmdBuffers =
2500 le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
2501
2502 return 0;
2503}
2504
2505/**
2506 * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
2507 * @ioc: per adapter object
2508 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2509 *
2510 * Returns 0 for success, non-zero for failure.
2511 */
2512static int
2513_base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2514{
2515 Mpi2IOCFactsRequest_t mpi_request;
2516 Mpi2IOCFactsReply_t mpi_reply, *facts;
2517 int mpi_reply_sz, mpi_request_sz, r;
2518
2519 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
2520 __func__));
2521
2522 mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
2523 mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
2524 memset(&mpi_request, 0, mpi_request_sz);
2525 mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
2526 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
2527 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
2528
2529 if (r != 0) {
2530 printk(MPT2SAS_ERR_FMT "%s: handshake failed (r=%d)\n",
2531 ioc->name, __func__, r);
2532 return r;
2533 }
2534
2535 facts = &ioc->facts;
2536 memset(facts, 0, sizeof(Mpi2IOCFactsReply_t));
2537 facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
2538 facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
2539 facts->VP_ID = mpi_reply.VP_ID;
2540 facts->VF_ID = mpi_reply.VF_ID;
2541 facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
2542 facts->MaxChainDepth = mpi_reply.MaxChainDepth;
2543 facts->WhoInit = mpi_reply.WhoInit;
2544 facts->NumberOfPorts = mpi_reply.NumberOfPorts;
2545 facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
2546 facts->MaxReplyDescriptorPostQueueDepth =
2547 le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
2548 facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
2549 facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
2550 if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
2551 ioc->ir_firmware = 1;
2552 facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
2553 facts->IOCRequestFrameSize =
2554 le16_to_cpu(mpi_reply.IOCRequestFrameSize);
2555 facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
2556 facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
2557 ioc->shost->max_id = -1;
2558 facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
2559 facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
2560 facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
2561 facts->HighPriorityCredit =
2562 le16_to_cpu(mpi_reply.HighPriorityCredit);
2563 facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
2564 facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
2565
2566 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "hba queue depth(%d), "
2567 "max chains per io(%d)\n", ioc->name, facts->RequestCredit,
2568 facts->MaxChainDepth));
2569 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request frame size(%d), "
2570 "reply frame size(%d)\n", ioc->name,
2571 facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4));
2572 return 0;
2573}
2574
2575/**
2576 * _base_send_ioc_init - send ioc_init to firmware
2577 * @ioc: per adapter object
2578 * @VF_ID: virtual function id
2579 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2580 *
2581 * Returns 0 for success, non-zero for failure.
2582 */
2583static int
2584_base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, int sleep_flag)
2585{
2586 Mpi2IOCInitRequest_t mpi_request;
2587 Mpi2IOCInitReply_t mpi_reply;
2588 int r;
2589
2590 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
2591 __func__));
2592
2593 memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
2594 mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
2595 mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
2596 mpi_request.VF_ID = VF_ID;
2597 mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION);
2598 mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
2599
2600 /* In MPI Revision I (0xA), the SystemReplyFrameSize(offset 0x18) was
2601 * removed and made reserved. For those with older firmware will need
2602 * this fix. It was decided that the Reply and Request frame sizes are
2603 * the same.
2604 */
2605 if ((ioc->facts.HeaderVersion >> 8) < 0xA) {
2606 mpi_request.Reserved7 = cpu_to_le16(ioc->reply_sz);
2607/* mpi_request.SystemReplyFrameSize =
2608 * cpu_to_le16(ioc->reply_sz);
2609 */
2610 }
2611
2612 mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
2613 mpi_request.ReplyDescriptorPostQueueDepth =
2614 cpu_to_le16(ioc->reply_post_queue_depth);
2615 mpi_request.ReplyFreeQueueDepth =
2616 cpu_to_le16(ioc->reply_free_queue_depth);
2617
2618#if BITS_PER_LONG > 32
2619 mpi_request.SenseBufferAddressHigh =
2620 cpu_to_le32(ioc->sense_dma >> 32);
2621 mpi_request.SystemReplyAddressHigh =
2622 cpu_to_le32(ioc->reply_dma >> 32);
2623 mpi_request.SystemRequestFrameBaseAddress =
2624 cpu_to_le64(ioc->request_dma);
2625 mpi_request.ReplyFreeQueueAddress =
2626 cpu_to_le64(ioc->reply_free_dma);
2627 mpi_request.ReplyDescriptorPostQueueAddress =
2628 cpu_to_le64(ioc->reply_post_free_dma);
2629#else
2630 mpi_request.SystemRequestFrameBaseAddress =
2631 cpu_to_le32(ioc->request_dma);
2632 mpi_request.ReplyFreeQueueAddress =
2633 cpu_to_le32(ioc->reply_free_dma);
2634 mpi_request.ReplyDescriptorPostQueueAddress =
2635 cpu_to_le32(ioc->reply_post_free_dma);
2636#endif
2637
2638 if (ioc->logging_level & MPT_DEBUG_INIT) {
2639 u32 *mfp;
2640 int i;
2641
2642 mfp = (u32 *)&mpi_request;
2643 printk(KERN_DEBUG "\toffset:data\n");
2644 for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
2645 printk(KERN_DEBUG "\t[0x%02x]:%08x\n", i*4,
2646 le32_to_cpu(mfp[i]));
2647 }
2648
2649 r = _base_handshake_req_reply_wait(ioc,
2650 sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
2651 sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10,
2652 sleep_flag);
2653
2654 if (r != 0) {
2655 printk(MPT2SAS_ERR_FMT "%s: handshake failed (r=%d)\n",
2656 ioc->name, __func__, r);
2657 return r;
2658 }
2659
2660 if (mpi_reply.IOCStatus != MPI2_IOCSTATUS_SUCCESS ||
2661 mpi_reply.IOCLogInfo) {
2662 printk(MPT2SAS_ERR_FMT "%s: failed\n", ioc->name, __func__);
2663 r = -EIO;
2664 }
2665
2666 return 0;
2667}
2668
2669/**
2670 * _base_send_port_enable - send port_enable(discovery stuff) to firmware
2671 * @ioc: per adapter object
2672 * @VF_ID: virtual function id
2673 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2674 *
2675 * Returns 0 for success, non-zero for failure.
2676 */
2677static int
2678_base_send_port_enable(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, int sleep_flag)
2679{
2680 Mpi2PortEnableRequest_t *mpi_request;
2681 u32 ioc_state;
2682 unsigned long timeleft;
2683 int r = 0;
2684 u16 smid;
2685
2686 printk(MPT2SAS_INFO_FMT "sending port enable !!\n", ioc->name);
2687
2688 if (ioc->base_cmds.status & MPT2_CMD_PENDING) {
2689 printk(MPT2SAS_ERR_FMT "%s: internal command already in use\n",
2690 ioc->name, __func__);
2691 return -EAGAIN;
2692 }
2693
2694 smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
2695 if (!smid) {
2696 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
2697 ioc->name, __func__);
2698 return -EAGAIN;
2699 }
2700
2701 ioc->base_cmds.status = MPT2_CMD_PENDING;
2702 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
2703 ioc->base_cmds.smid = smid;
2704 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
2705 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
2706 mpi_request->VF_ID = VF_ID;
2707
2708 mpt2sas_base_put_smid_default(ioc, smid, VF_ID);
2709 timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
2710 300*HZ);
2711 if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
2712 printk(MPT2SAS_ERR_FMT "%s: timeout\n",
2713 ioc->name, __func__);
2714 _debug_dump_mf(mpi_request,
2715 sizeof(Mpi2PortEnableRequest_t)/4);
2716 if (ioc->base_cmds.status & MPT2_CMD_RESET)
2717 r = -EFAULT;
2718 else
2719 r = -ETIME;
2720 goto out;
2721 } else
2722 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: complete\n",
2723 ioc->name, __func__));
2724
2725 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_OPERATIONAL,
2726 60, sleep_flag);
2727 if (ioc_state) {
2728 printk(MPT2SAS_ERR_FMT "%s: failed going to operational state "
2729 " (ioc_state=0x%x)\n", ioc->name, __func__, ioc_state);
2730 r = -EFAULT;
2731 }
2732 out:
2733 ioc->base_cmds.status = MPT2_CMD_NOT_USED;
2734 printk(MPT2SAS_INFO_FMT "port enable: %s\n",
2735 ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
2736 return r;
2737}
2738
2739/**
2740 * _base_unmask_events - turn on notification for this event
2741 * @ioc: per adapter object
2742 * @event: firmware event
2743 *
2744 * The mask is stored in ioc->event_masks.
2745 */
2746static void
2747_base_unmask_events(struct MPT2SAS_ADAPTER *ioc, u16 event)
2748{
2749 u32 desired_event;
2750
2751 if (event >= 128)
2752 return;
2753
2754 desired_event = (1 << (event % 32));
2755
2756 if (event < 32)
2757 ioc->event_masks[0] &= ~desired_event;
2758 else if (event < 64)
2759 ioc->event_masks[1] &= ~desired_event;
2760 else if (event < 96)
2761 ioc->event_masks[2] &= ~desired_event;
2762 else if (event < 128)
2763 ioc->event_masks[3] &= ~desired_event;
2764}
2765
2766/**
2767 * _base_event_notification - send event notification
2768 * @ioc: per adapter object
2769 * @VF_ID: virtual function id
2770 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2771 *
2772 * Returns 0 for success, non-zero for failure.
2773 */
2774static int
2775_base_event_notification(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, int sleep_flag)
2776{
2777 Mpi2EventNotificationRequest_t *mpi_request;
2778 unsigned long timeleft;
2779 u16 smid;
2780 int r = 0;
2781 int i;
2782
2783 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
2784 __func__));
2785
2786 if (ioc->base_cmds.status & MPT2_CMD_PENDING) {
2787 printk(MPT2SAS_ERR_FMT "%s: internal command already in use\n",
2788 ioc->name, __func__);
2789 return -EAGAIN;
2790 }
2791
2792 smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
2793 if (!smid) {
2794 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
2795 ioc->name, __func__);
2796 return -EAGAIN;
2797 }
2798 ioc->base_cmds.status = MPT2_CMD_PENDING;
2799 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
2800 ioc->base_cmds.smid = smid;
2801 memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
2802 mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
2803 mpi_request->VF_ID = VF_ID;
2804 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
2805 mpi_request->EventMasks[i] =
2806 le32_to_cpu(ioc->event_masks[i]);
2807 mpt2sas_base_put_smid_default(ioc, smid, VF_ID);
2808 timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
2809 if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
2810 printk(MPT2SAS_ERR_FMT "%s: timeout\n",
2811 ioc->name, __func__);
2812 _debug_dump_mf(mpi_request,
2813 sizeof(Mpi2EventNotificationRequest_t)/4);
2814 if (ioc->base_cmds.status & MPT2_CMD_RESET)
2815 r = -EFAULT;
2816 else
2817 r = -ETIME;
2818 } else
2819 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: complete\n",
2820 ioc->name, __func__));
2821 ioc->base_cmds.status = MPT2_CMD_NOT_USED;
2822 return r;
2823}
2824
2825/**
2826 * mpt2sas_base_validate_event_type - validating event types
2827 * @ioc: per adapter object
2828 * @event: firmware event
2829 *
2830 * This will turn on firmware event notification when application
2831 * ask for that event. We don't mask events that are already enabled.
2832 */
2833void
2834mpt2sas_base_validate_event_type(struct MPT2SAS_ADAPTER *ioc, u32 *event_type)
2835{
2836 int i, j;
2837 u32 event_mask, desired_event;
2838 u8 send_update_to_fw;
2839
2840 for (i = 0, send_update_to_fw = 0; i <
2841 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
2842 event_mask = ~event_type[i];
2843 desired_event = 1;
2844 for (j = 0; j < 32; j++) {
2845 if (!(event_mask & desired_event) &&
2846 (ioc->event_masks[i] & desired_event)) {
2847 ioc->event_masks[i] &= ~desired_event;
2848 send_update_to_fw = 1;
2849 }
2850 desired_event = (desired_event << 1);
2851 }
2852 }
2853
2854 if (!send_update_to_fw)
2855 return;
2856
2857 mutex_lock(&ioc->base_cmds.mutex);
2858 _base_event_notification(ioc, 0, CAN_SLEEP);
2859 mutex_unlock(&ioc->base_cmds.mutex);
2860}
2861
2862/**
2863 * _base_diag_reset - the "big hammer" start of day reset
2864 * @ioc: per adapter object
2865 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2866 *
2867 * Returns 0 for success, non-zero for failure.
2868 */
2869static int
2870_base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2871{
2872 u32 host_diagnostic;
2873 u32 ioc_state;
2874 u32 count;
2875 u32 hcb_size;
2876
2877 printk(MPT2SAS_INFO_FMT "sending diag reset !!\n", ioc->name);
2878
2879 _base_save_msix_table(ioc);
2880
2881 drsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "clear interrupts\n",
2882 ioc->name));
2883 writel(0, &ioc->chip->HostInterruptStatus);
2884
2885 count = 0;
2886 do {
2887 /* Write magic sequence to WriteSequence register
2888 * Loop until in diagnostic mode
2889 */
2890 drsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "write magic "
2891 "sequence\n", ioc->name));
2892 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
2893 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
2894 writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
2895 writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
2896 writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
2897 writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
2898 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
2899
2900 /* wait 100 msec */
2901 if (sleep_flag == CAN_SLEEP)
2902 msleep(100);
2903 else
2904 mdelay(100);
2905
2906 if (count++ > 20)
2907 goto out;
2908
2909 host_diagnostic = readl(&ioc->chip->HostDiagnostic);
2910 drsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "wrote magic "
2911 "sequence: count(%d), host_diagnostic(0x%08x)\n",
2912 ioc->name, count, host_diagnostic));
2913
2914 } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
2915
2916 hcb_size = readl(&ioc->chip->HCBSize);
2917
2918 drsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "diag reset: issued\n",
2919 ioc->name));
2920 writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
2921 &ioc->chip->HostDiagnostic);
2922
2923 /* don't access any registers for 50 milliseconds */
2924 msleep(50);
2925
2926 /* 300 second max wait */
2927 for (count = 0; count < 3000000 ; count++) {
2928
2929 host_diagnostic = readl(&ioc->chip->HostDiagnostic);
2930
2931 if (host_diagnostic == 0xFFFFFFFF)
2932 goto out;
2933 if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
2934 break;
2935
2936 /* wait 100 msec */
2937 if (sleep_flag == CAN_SLEEP)
2938 msleep(1);
2939 else
2940 mdelay(1);
2941 }
2942
2943 if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
2944
2945 drsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "restart the adapter "
2946 "assuming the HCB Address points to good F/W\n",
2947 ioc->name));
2948 host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
2949 host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
2950 writel(host_diagnostic, &ioc->chip->HostDiagnostic);
2951
2952 drsprintk(ioc, printk(MPT2SAS_DEBUG_FMT
2953 "re-enable the HCDW\n", ioc->name));
2954 writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
2955 &ioc->chip->HCBSize);
2956 }
2957
2958 drsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "restart the adapter\n",
2959 ioc->name));
2960 writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
2961 &ioc->chip->HostDiagnostic);
2962
2963 drsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "disable writes to the "
2964 "diagnostic register\n", ioc->name));
2965 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
2966
2967 drsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "Wait for FW to go to the "
2968 "READY state\n", ioc->name));
2969 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20,
2970 sleep_flag);
2971 if (ioc_state) {
2972 printk(MPT2SAS_ERR_FMT "%s: failed going to ready state "
2973 " (ioc_state=0x%x)\n", ioc->name, __func__, ioc_state);
2974 goto out;
2975 }
2976
2977 _base_restore_msix_table(ioc);
2978 printk(MPT2SAS_INFO_FMT "diag reset: SUCCESS\n", ioc->name);
2979 return 0;
2980
2981 out:
2982 printk(MPT2SAS_ERR_FMT "diag reset: FAILED\n", ioc->name);
2983 return -EFAULT;
2984}
2985
2986/**
2987 * _base_make_ioc_ready - put controller in READY state
2988 * @ioc: per adapter object
2989 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2990 * @type: FORCE_BIG_HAMMER or SOFT_RESET
2991 *
2992 * Returns 0 for success, non-zero for failure.
2993 */
2994static int
2995_base_make_ioc_ready(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
2996 enum reset_type type)
2997{
2998 u32 ioc_state;
2999
3000 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
3001 __func__));
3002
3003 ioc_state = mpt2sas_base_get_iocstate(ioc, 0);
3004 dhsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: ioc_state(0x%08x)\n",
3005 ioc->name, __func__, ioc_state));
3006
3007 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
3008 return 0;
3009
3010 if (ioc_state & MPI2_DOORBELL_USED) {
3011 dhsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "unexpected doorbell "
3012 "active!\n", ioc->name));
3013 goto issue_diag_reset;
3014 }
3015
3016 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
3017 mpt2sas_base_fault_info(ioc, ioc_state &
3018 MPI2_DOORBELL_DATA_MASK);
3019 goto issue_diag_reset;
3020 }
3021
3022 if (type == FORCE_BIG_HAMMER)
3023 goto issue_diag_reset;
3024
3025 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
3026 if (!(_base_send_ioc_reset(ioc,
3027 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15, CAN_SLEEP)))
3028 return 0;
3029
3030 issue_diag_reset:
3031 return _base_diag_reset(ioc, CAN_SLEEP);
3032}
3033
3034/**
3035 * _base_make_ioc_operational - put controller in OPERATIONAL state
3036 * @ioc: per adapter object
3037 * @VF_ID: virtual function id
3038 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3039 *
3040 * Returns 0 for success, non-zero for failure.
3041 */
3042static int
3043_base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
3044 int sleep_flag)
3045{
3046 int r, i;
3047 unsigned long flags;
3048 u32 reply_address;
3049
3050 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
3051 __func__));
3052
3053 /* initialize the scsi lookup free list */
3054 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3055 INIT_LIST_HEAD(&ioc->free_list);
3056 for (i = 0; i < ioc->request_depth; i++) {
3057 ioc->scsi_lookup[i].cb_idx = 0xFF;
3058 list_add_tail(&ioc->scsi_lookup[i].tracker_list,
3059 &ioc->free_list);
3060 }
3061 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3062
3063 /* initialize Reply Free Queue */
3064 for (i = 0, reply_address = (u32)ioc->reply_dma ;
3065 i < ioc->reply_free_queue_depth ; i++, reply_address +=
3066 ioc->reply_sz)
3067 ioc->reply_free[i] = cpu_to_le32(reply_address);
3068
3069 /* initialize Reply Post Free Queue */
3070 for (i = 0; i < ioc->reply_post_queue_depth; i++)
3071 ioc->reply_post_free[i].Words = ~0ULL;
3072
3073 r = _base_send_ioc_init(ioc, VF_ID, sleep_flag);
3074 if (r)
3075 return r;
3076
3077 /* initialize the index's */
3078 ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
3079 ioc->reply_post_host_index = 0;
3080 writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
3081 writel(0, &ioc->chip->ReplyPostHostIndex);
3082
3083 _base_unmask_interrupts(ioc);
3084 r = _base_event_notification(ioc, VF_ID, sleep_flag);
3085 if (r)
3086 return r;
3087
3088 if (sleep_flag == CAN_SLEEP)
3089 _base_static_config_pages(ioc);
3090
3091 r = _base_send_port_enable(ioc, VF_ID, sleep_flag);
3092 if (r)
3093 return r;
3094
3095 return r;
3096}
3097
3098/**
3099 * mpt2sas_base_free_resources - free resources controller resources (io/irq/memap)
3100 * @ioc: per adapter object
3101 *
3102 * Return nothing.
3103 */
3104void
3105mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc)
3106{
3107 struct pci_dev *pdev = ioc->pdev;
3108
3109 dexitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
3110 __func__));
3111
3112 _base_mask_interrupts(ioc);
3113 _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
3114 if (ioc->pci_irq) {
3115 synchronize_irq(pdev->irq);
3116 free_irq(ioc->pci_irq, ioc);
3117 }
3118 _base_disable_msix(ioc);
3119 if (ioc->chip_phys)
3120 iounmap(ioc->chip);
3121 ioc->pci_irq = -1;
3122 ioc->chip_phys = 0;
3123 pci_release_selected_regions(ioc->pdev, ioc->bars);
3124 pci_disable_device(pdev);
3125 pci_set_drvdata(pdev, NULL);
3126 return;
3127}
3128
3129/**
3130 * mpt2sas_base_attach - attach controller instance
3131 * @ioc: per adapter object
3132 *
3133 * Returns 0 for success, non-zero for failure.
3134 */
3135int
3136mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
3137{
3138 int r, i;
3139 unsigned long flags;
3140
3141 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
3142 __func__));
3143
3144 r = mpt2sas_base_map_resources(ioc);
3145 if (r)
3146 return r;
3147
3148 r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
3149 if (r)
3150 goto out_free_resources;
3151
3152 r = _base_get_ioc_facts(ioc, CAN_SLEEP);
3153 if (r)
3154 goto out_free_resources;
3155
3156 r = _base_allocate_memory_pools(ioc, CAN_SLEEP);
3157 if (r)
3158 goto out_free_resources;
3159
3160 init_waitqueue_head(&ioc->reset_wq);
3161
3162 /* base internal command bits */
3163 mutex_init(&ioc->base_cmds.mutex);
3164 init_completion(&ioc->base_cmds.done);
3165 ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
3166 ioc->base_cmds.status = MPT2_CMD_NOT_USED;
3167
3168 /* transport internal command bits */
3169 ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
3170 ioc->transport_cmds.status = MPT2_CMD_NOT_USED;
3171 mutex_init(&ioc->transport_cmds.mutex);
3172 init_completion(&ioc->transport_cmds.done);
3173
3174 /* task management internal command bits */
3175 ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
3176 ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
3177 mutex_init(&ioc->tm_cmds.mutex);
3178 init_completion(&ioc->tm_cmds.done);
3179
3180 /* config page internal command bits */
3181 ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
3182 ioc->config_cmds.status = MPT2_CMD_NOT_USED;
3183 mutex_init(&ioc->config_cmds.mutex);
3184 init_completion(&ioc->config_cmds.done);
3185
3186 /* ctl module internal command bits */
3187 ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
3188 ioc->ctl_cmds.status = MPT2_CMD_NOT_USED;
3189 mutex_init(&ioc->ctl_cmds.mutex);
3190 init_completion(&ioc->ctl_cmds.done);
3191
3192 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
3193 ioc->event_masks[i] = -1;
3194
3195 /* here we enable the events we care about */
3196 _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
3197 _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
3198 _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
3199 _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
3200 _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
3201 _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
3202 _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
3203 _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
3204 _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
3205 _base_unmask_events(ioc, MPI2_EVENT_TASK_SET_FULL);
3206 _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
3207
3208 ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
3209 sizeof(Mpi2PortFactsReply_t), GFP_KERNEL);
3210 if (!ioc->pfacts)
3211 goto out_free_resources;
3212
3213 for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
3214 r = _base_get_port_facts(ioc, i, CAN_SLEEP);
3215 if (r)
3216 goto out_free_resources;
3217 }
3218 r = _base_make_ioc_operational(ioc, 0, CAN_SLEEP);
3219 if (r)
3220 goto out_free_resources;
3221
3222 /* initialize fault polling */
3223 INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
3224 snprintf(ioc->fault_reset_work_q_name,
3225 sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id);
3226 ioc->fault_reset_work_q =
3227 create_singlethread_workqueue(ioc->fault_reset_work_q_name);
3228 if (!ioc->fault_reset_work_q) {
3229 printk(MPT2SAS_ERR_FMT "%s: failed (line=%d)\n",
3230 ioc->name, __func__, __LINE__);
3231 goto out_free_resources;
3232 }
3233 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
3234 if (ioc->fault_reset_work_q)
3235 queue_delayed_work(ioc->fault_reset_work_q,
3236 &ioc->fault_reset_work,
3237 msecs_to_jiffies(FAULT_POLLING_INTERVAL));
3238 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
3239 return 0;
3240
3241 out_free_resources:
3242
3243 ioc->remove_host = 1;
3244 mpt2sas_base_free_resources(ioc);
3245 _base_release_memory_pools(ioc);
3246 kfree(ioc->tm_cmds.reply);
3247 kfree(ioc->transport_cmds.reply);
3248 kfree(ioc->config_cmds.reply);
3249 kfree(ioc->base_cmds.reply);
3250 kfree(ioc->ctl_cmds.reply);
3251 kfree(ioc->pfacts);
3252 ioc->ctl_cmds.reply = NULL;
3253 ioc->base_cmds.reply = NULL;
3254 ioc->tm_cmds.reply = NULL;
3255 ioc->transport_cmds.reply = NULL;
3256 ioc->config_cmds.reply = NULL;
3257 ioc->pfacts = NULL;
3258 return r;
3259}
3260
3261
3262/**
3263 * mpt2sas_base_detach - remove controller instance
3264 * @ioc: per adapter object
3265 *
3266 * Return nothing.
3267 */
3268void
3269mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc)
3270{
3271 unsigned long flags;
3272 struct workqueue_struct *wq;
3273
3274 dexitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
3275 __func__));
3276
3277 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
3278 wq = ioc->fault_reset_work_q;
3279 ioc->fault_reset_work_q = NULL;
3280 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
3281 if (!cancel_delayed_work(&ioc->fault_reset_work))
3282 flush_workqueue(wq);
3283 destroy_workqueue(wq);
3284
3285 mpt2sas_base_free_resources(ioc);
3286 _base_release_memory_pools(ioc);
3287 kfree(ioc->pfacts);
3288 kfree(ioc->ctl_cmds.reply);
3289 kfree(ioc->base_cmds.reply);
3290 kfree(ioc->tm_cmds.reply);
3291 kfree(ioc->transport_cmds.reply);
3292 kfree(ioc->config_cmds.reply);
3293}
3294
3295/**
3296 * _base_reset_handler - reset callback handler (for base)
3297 * @ioc: per adapter object
3298 * @reset_phase: phase
3299 *
3300 * The handler for doing any required cleanup or initialization.
3301 *
3302 * The reset phase can be MPT2_IOC_PRE_RESET, MPT2_IOC_AFTER_RESET,
3303 * MPT2_IOC_DONE_RESET
3304 *
3305 * Return nothing.
3306 */
3307static void
3308_base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
3309{
3310 switch (reset_phase) {
3311 case MPT2_IOC_PRE_RESET:
3312 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
3313 "MPT2_IOC_PRE_RESET\n", ioc->name, __func__));
3314 break;
3315 case MPT2_IOC_AFTER_RESET:
3316 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
3317 "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__));
3318 if (ioc->transport_cmds.status & MPT2_CMD_PENDING) {
3319 ioc->transport_cmds.status |= MPT2_CMD_RESET;
3320 mpt2sas_base_free_smid(ioc, ioc->transport_cmds.smid);
3321 complete(&ioc->transport_cmds.done);
3322 }
3323 if (ioc->base_cmds.status & MPT2_CMD_PENDING) {
3324 ioc->base_cmds.status |= MPT2_CMD_RESET;
3325 mpt2sas_base_free_smid(ioc, ioc->base_cmds.smid);
3326 complete(&ioc->base_cmds.done);
3327 }
3328 if (ioc->config_cmds.status & MPT2_CMD_PENDING) {
3329 ioc->config_cmds.status |= MPT2_CMD_RESET;
3330 mpt2sas_base_free_smid(ioc, ioc->config_cmds.smid);
3331 complete(&ioc->config_cmds.done);
3332 }
3333 break;
3334 case MPT2_IOC_DONE_RESET:
3335 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
3336 "MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
3337 break;
3338 }
3339 mpt2sas_scsih_reset_handler(ioc, reset_phase);
3340 mpt2sas_ctl_reset_handler(ioc, reset_phase);
3341}
3342
3343/**
3344 * _wait_for_commands_to_complete - reset controller
3345 * @ioc: Pointer to MPT_ADAPTER structure
3346 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3347 *
3348 * This function waiting(3s) for all pending commands to complete
3349 * prior to putting controller in reset.
3350 */
3351static void
3352_wait_for_commands_to_complete(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3353{
3354 u32 ioc_state;
3355 unsigned long flags;
3356 u16 i;
3357
3358 ioc->pending_io_count = 0;
3359 if (sleep_flag != CAN_SLEEP)
3360 return;
3361
3362 ioc_state = mpt2sas_base_get_iocstate(ioc, 0);
3363 if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
3364 return;
3365
3366 /* pending command count */
3367 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3368 for (i = 0; i < ioc->request_depth; i++)
3369 if (ioc->scsi_lookup[i].cb_idx != 0xFF)
3370 ioc->pending_io_count++;
3371 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3372
3373 if (!ioc->pending_io_count)
3374 return;
3375
3376 /* wait for pending commands to complete */
3377 wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 3 * HZ);
3378}
3379
3380/**
3381 * mpt2sas_base_hard_reset_handler - reset controller
3382 * @ioc: Pointer to MPT_ADAPTER structure
3383 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3384 * @type: FORCE_BIG_HAMMER or SOFT_RESET
3385 *
3386 * Returns 0 for success, non-zero for failure.
3387 */
3388int
3389mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
3390 enum reset_type type)
3391{
3392 int r, i;
3393 unsigned long flags;
3394
3395 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: enter\n", ioc->name,
3396 __func__));
3397
3398 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
3399 if (ioc->ioc_reset_in_progress) {
3400 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
3401 printk(MPT2SAS_ERR_FMT "%s: busy\n",
3402 ioc->name, __func__);
3403 return -EBUSY;
3404 }
3405 ioc->ioc_reset_in_progress = 1;
3406 ioc->shost_recovery = 1;
3407 if (ioc->shost->shost_state == SHOST_RUNNING) {
3408 /* set back to SHOST_RUNNING in mpt2sas_scsih.c */
3409 scsi_host_set_state(ioc->shost, SHOST_RECOVERY);
3410 printk(MPT2SAS_INFO_FMT "putting controller into "
3411 "SHOST_RECOVERY\n", ioc->name);
3412 }
3413 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
3414
3415 _base_reset_handler(ioc, MPT2_IOC_PRE_RESET);
3416 _wait_for_commands_to_complete(ioc, sleep_flag);
3417 _base_mask_interrupts(ioc);
3418 r = _base_make_ioc_ready(ioc, sleep_flag, type);
3419 if (r)
3420 goto out;
3421 _base_reset_handler(ioc, MPT2_IOC_AFTER_RESET);
3422 for (i = 0 ; i < ioc->facts.NumberOfPorts; i++)
3423 r = _base_make_ioc_operational(ioc, ioc->pfacts[i].VF_ID,
3424 sleep_flag);
3425 if (!r)
3426 _base_reset_handler(ioc, MPT2_IOC_DONE_RESET);
3427 out:
3428 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: %s\n",
3429 ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
3430
3431 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
3432 ioc->ioc_reset_in_progress = 0;
3433 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
3434 return r;
3435}
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
new file mode 100644
index 000000000000..6945ff4d382e
--- /dev/null
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -0,0 +1,779 @@
1/*
2 * This is the Fusion MPT base driver providing common API layer interface
3 * for access to MPT (Message Passing Technology) firmware.
4 *
5 * This code is based on drivers/scsi/mpt2sas/mpt2_base.h
6 * Copyright (C) 2007-2008 LSI Corporation
7 * (mailto:DL-MPTFusionLinux@lsi.com)
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * NO WARRANTY
20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 * solely responsible for determining the appropriateness of using and
25 * distributing the Program and assumes all risks associated with its
26 * exercise of rights under this Agreement, including but not limited to
27 * the risks and costs of program errors, damage to or loss of data,
28 * programs or equipment, and unavailability or interruption of operations.
29
30 * DISCLAIMER OF LIABILITY
31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38
39 * You should have received a copy of the GNU General Public License
40 * along with this program; if not, write to the Free Software
41 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
42 * USA.
43 */
44
45#ifndef MPT2SAS_BASE_H_INCLUDED
46#define MPT2SAS_BASE_H_INCLUDED
47
48#include "mpi/mpi2_type.h"
49#include "mpi/mpi2.h"
50#include "mpi/mpi2_ioc.h"
51#include "mpi/mpi2_cnfg.h"
52#include "mpi/mpi2_init.h"
53#include "mpi/mpi2_raid.h"
54#include "mpi/mpi2_tool.h"
55#include "mpi/mpi2_sas.h"
56
57#include <scsi/scsi.h>
58#include <scsi/scsi_cmnd.h>
59#include <scsi/scsi_device.h>
60#include <scsi/scsi_host.h>
61#include <scsi/scsi_tcq.h>
62#include <scsi/scsi_transport_sas.h>
63#include <scsi/scsi_dbg.h>
64
65#include "mpt2sas_debug.h"
66
67/* driver versioning info */
68#define MPT2SAS_DRIVER_NAME "mpt2sas"
69#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
70#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
71#define MPT2SAS_DRIVER_VERSION "00.100.11.16"
72#define MPT2SAS_MAJOR_VERSION 00
73#define MPT2SAS_MINOR_VERSION 100
74#define MPT2SAS_BUILD_VERSION 11
75#define MPT2SAS_RELEASE_VERSION 16
76
77/*
78 * Set MPT2SAS_SG_DEPTH value based on user input.
79 */
80#ifdef CONFIG_SCSI_MPT2SAS_MAX_SGE
81#if CONFIG_SCSI_MPT2SAS_MAX_SGE < 16
82#define MPT2SAS_SG_DEPTH 16
83#elif CONFIG_SCSI_MPT2SAS_MAX_SGE > 128
84#define MPT2SAS_SG_DEPTH 128
85#else
86#define MPT2SAS_SG_DEPTH CONFIG_SCSI_MPT2SAS_MAX_SGE
87#endif
88#else
89#define MPT2SAS_SG_DEPTH 128 /* MAX_HW_SEGMENTS */
90#endif
91
92
93/*
94 * Generic Defines
95 */
96#define MPT2SAS_SATA_QUEUE_DEPTH 32
97#define MPT2SAS_SAS_QUEUE_DEPTH 254
98#define MPT2SAS_RAID_QUEUE_DEPTH 128
99
100#define MPT_NAME_LENGTH 32 /* generic length of strings */
101#define MPT_STRING_LENGTH 64
102
103#define MPT_MAX_CALLBACKS 16
104
105#define CAN_SLEEP 1
106#define NO_SLEEP 0
107
108#define INTERNAL_CMDS_COUNT 10 /* reserved cmds */
109
110#define MPI2_HIM_MASK 0xFFFFFFFF /* mask every bit*/
111
112#define MPT2SAS_INVALID_DEVICE_HANDLE 0xFFFF
113
114
115/*
116 * reset phases
117 */
118#define MPT2_IOC_PRE_RESET 1 /* prior to host reset */
119#define MPT2_IOC_AFTER_RESET 2 /* just after host reset */
120#define MPT2_IOC_DONE_RESET 3 /* links re-initialized */
121
122/*
123 * logging format
124 */
125#define MPT2SAS_FMT "%s: "
126#define MPT2SAS_DEBUG_FMT KERN_DEBUG MPT2SAS_FMT
127#define MPT2SAS_INFO_FMT KERN_INFO MPT2SAS_FMT
128#define MPT2SAS_NOTE_FMT KERN_NOTICE MPT2SAS_FMT
129#define MPT2SAS_WARN_FMT KERN_WARNING MPT2SAS_FMT
130#define MPT2SAS_ERR_FMT KERN_ERR MPT2SAS_FMT
131
132/*
133 * per target private data
134 */
135#define MPT_TARGET_FLAGS_RAID_COMPONENT 0x01
136#define MPT_TARGET_FLAGS_VOLUME 0x02
137#define MPT_TARGET_FLAGS_DELETED 0x04
138
139/**
140 * struct MPT2SAS_TARGET - starget private hostdata
141 * @starget: starget object
142 * @sas_address: target sas address
143 * @handle: device handle
144 * @num_luns: number luns
145 * @flags: MPT_TARGET_FLAGS_XXX flags
146 * @deleted: target flaged for deletion
147 * @tm_busy: target is busy with TM request.
148 */
149struct MPT2SAS_TARGET {
150 struct scsi_target *starget;
151 u64 sas_address;
152 u16 handle;
153 int num_luns;
154 u32 flags;
155 u8 deleted;
156 u8 tm_busy;
157};
158
159/*
160 * per device private data
161 */
162#define MPT_DEVICE_FLAGS_INIT 0x01
163#define MPT_DEVICE_TLR_ON 0x02
164
165/**
166 * struct MPT2SAS_DEVICE - sdev private hostdata
167 * @sas_target: starget private hostdata
168 * @lun: lun number
169 * @flags: MPT_DEVICE_XXX flags
170 * @configured_lun: lun is configured
171 * @block: device is in SDEV_BLOCK state
172 * @tlr_snoop_check: flag used in determining whether to disable TLR
173 */
174struct MPT2SAS_DEVICE {
175 struct MPT2SAS_TARGET *sas_target;
176 unsigned int lun;
177 u32 flags;
178 u8 configured_lun;
179 u8 block;
180 u8 tlr_snoop_check;
181};
182
183#define MPT2_CMD_NOT_USED 0x8000 /* free */
184#define MPT2_CMD_COMPLETE 0x0001 /* completed */
185#define MPT2_CMD_PENDING 0x0002 /* pending */
186#define MPT2_CMD_REPLY_VALID 0x0004 /* reply is valid */
187#define MPT2_CMD_RESET 0x0008 /* host reset dropped the command */
188
189/**
190 * struct _internal_cmd - internal commands struct
191 * @mutex: mutex
192 * @done: completion
193 * @reply: reply message pointer
194 * @status: MPT2_CMD_XXX status
195 * @smid: system message id
196 */
197struct _internal_cmd {
198 struct mutex mutex;
199 struct completion done;
200 void *reply;
201 u16 status;
202 u16 smid;
203};
204
205/*
206 * SAS Topology Structures
207 */
208
209/**
210 * struct _sas_device - attached device information
211 * @list: sas device list
212 * @starget: starget object
213 * @sas_address: device sas address
214 * @device_name: retrieved from the SAS IDENTIFY frame.
215 * @handle: device handle
216 * @parent_handle: handle to parent device
217 * @enclosure_handle: enclosure handle
218 * @enclosure_logical_id: enclosure logical identifier
219 * @volume_handle: volume handle (valid when hidden raid member)
220 * @volume_wwid: volume unique identifier
221 * @device_info: bitfield provides detailed info about the device
222 * @id: target id
223 * @channel: target channel
224 * @slot: number number
225 * @hidden_raid_component: set to 1 when this is a raid member
226 * @responding: used in _scsih_sas_device_mark_responding
227 */
228struct _sas_device {
229 struct list_head list;
230 struct scsi_target *starget;
231 u64 sas_address;
232 u64 device_name;
233 u16 handle;
234 u16 parent_handle;
235 u16 enclosure_handle;
236 u64 enclosure_logical_id;
237 u16 volume_handle;
238 u64 volume_wwid;
239 u32 device_info;
240 int id;
241 int channel;
242 u16 slot;
243 u8 hidden_raid_component;
244 u8 responding;
245};
246
247/**
248 * struct _raid_device - raid volume link list
249 * @list: sas device list
250 * @starget: starget object
251 * @sdev: scsi device struct (volumes are single lun)
252 * @wwid: unique identifier for the volume
253 * @handle: device handle
254 * @id: target id
255 * @channel: target channel
256 * @volume_type: the raid level
257 * @device_info: bitfield provides detailed info about the hidden components
258 * @num_pds: number of hidden raid components
259 * @responding: used in _scsih_raid_device_mark_responding
260 */
261struct _raid_device {
262 struct list_head list;
263 struct scsi_target *starget;
264 struct scsi_device *sdev;
265 u64 wwid;
266 u16 handle;
267 int id;
268 int channel;
269 u8 volume_type;
270 u32 device_info;
271 u8 num_pds;
272 u8 responding;
273};
274
275/**
276 * struct _boot_device - boot device info
277 * @is_raid: flag to indicate whether this is volume
278 * @device: holds pointer for either struct _sas_device or
279 * struct _raid_device
280 */
281struct _boot_device {
282 u8 is_raid;
283 void *device;
284};
285
286/**
287 * struct _sas_port - wide/narrow sas port information
288 * @port_list: list of ports belonging to expander
289 * @handle: device handle for this port
290 * @sas_address: sas address of this port
291 * @num_phys: number of phys belonging to this port
292 * @remote_identify: attached device identification
293 * @rphy: sas transport rphy object
294 * @port: sas transport wide/narrow port object
295 * @phy_list: _sas_phy list objects belonging to this port
296 */
297struct _sas_port {
298 struct list_head port_list;
299 u16 handle;
300 u64 sas_address;
301 u8 num_phys;
302 struct sas_identify remote_identify;
303 struct sas_rphy *rphy;
304 struct sas_port *port;
305 struct list_head phy_list;
306};
307
308/**
309 * struct _sas_phy - phy information
310 * @port_siblings: list of phys belonging to a port
311 * @identify: phy identification
312 * @remote_identify: attached device identification
313 * @phy: sas transport phy object
314 * @phy_id: unique phy id
315 * @handle: device handle for this phy
316 * @attached_handle: device handle for attached device
317 */
318struct _sas_phy {
319 struct list_head port_siblings;
320 struct sas_identify identify;
321 struct sas_identify remote_identify;
322 struct sas_phy *phy;
323 u8 phy_id;
324 u16 handle;
325 u16 attached_handle;
326};
327
328/**
329 * struct _sas_node - sas_host/expander information
330 * @list: list of expanders
331 * @parent_dev: parent device class
332 * @num_phys: number phys belonging to this sas_host/expander
333 * @sas_address: sas address of this sas_host/expander
334 * @handle: handle for this sas_host/expander
335 * @parent_handle: parent handle
336 * @enclosure_handle: handle for this a member of an enclosure
337 * @device_info: bitwise defining capabilities of this sas_host/expander
338 * @responding: used in _scsih_expander_device_mark_responding
339 * @phy: a list of phys that make up this sas_host/expander
340 * @sas_port_list: list of ports attached to this sas_host/expander
341 */
342struct _sas_node {
343 struct list_head list;
344 struct device *parent_dev;
345 u8 num_phys;
346 u64 sas_address;
347 u16 handle;
348 u16 parent_handle;
349 u16 enclosure_handle;
350 u64 enclosure_logical_id;
351 u8 responding;
352 struct _sas_phy *phy;
353 struct list_head sas_port_list;
354};
355
356/**
357 * enum reset_type - reset state
358 * @FORCE_BIG_HAMMER: issue diagnostic reset
359 * @SOFT_RESET: issue message_unit_reset, if fails to to big hammer
360 */
361enum reset_type {
362 FORCE_BIG_HAMMER,
363 SOFT_RESET,
364};
365
366/**
367 * struct request_tracker - firmware request tracker
368 * @smid: system message id
369 * @scmd: scsi request pointer
370 * @cb_idx: callback index
371 * @chain_list: list of chains associated to this IO
372 * @tracker_list: list of free request (ioc->free_list)
373 */
374struct request_tracker {
375 u16 smid;
376 struct scsi_cmnd *scmd;
377 u8 cb_idx;
378 struct list_head tracker_list;
379};
380
381typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
382
383/**
384 * struct MPT2SAS_ADAPTER - per adapter struct
385 * @list: ioc_list
386 * @shost: shost object
387 * @id: unique adapter id
388 * @pci_irq: irq number
389 * @name: generic ioc string
390 * @tmp_string: tmp string used for logging
391 * @pdev: pci pdev object
392 * @chip: memory mapped register space
393 * @chip_phys: physical addrss prior to mapping
394 * @pio_chip: I/O mapped register space
395 * @logging_level: see mpt2sas_debug.h
396 * @ir_firmware: IR firmware present
397 * @bars: bitmask of BAR's that must be configured
398 * @mask_interrupts: ignore interrupt
399 * @fault_reset_work_q_name: fw fault work queue
400 * @fault_reset_work_q: ""
401 * @fault_reset_work: ""
402 * @firmware_event_name: fw event work queue
403 * @firmware_event_thread: ""
404 * @fw_events_off: flag to turn off fw event handling
405 * @fw_event_lock:
406 * @fw_event_list: list of fw events
407 * @aen_event_read_flag: event log was read
408 * @broadcast_aen_busy: broadcast aen waiting to be serviced
409 * @ioc_reset_in_progress: host reset in progress
410 * @ioc_reset_in_progress_lock:
411 * @ioc_link_reset_in_progress: phy/hard reset in progress
412 * @ignore_loginfos: ignore loginfos during task managment
413 * @remove_host: flag for when driver unloads, to avoid sending dev resets
414 * @wait_for_port_enable_to_complete:
415 * @msix_enable: flag indicating msix is enabled
416 * @msix_vector_count: number msix vectors
417 * @msix_table: virt address to the msix table
418 * @msix_table_backup: backup msix table
419 * @scsi_io_cb_idx: shost generated commands
420 * @tm_cb_idx: task management commands
421 * @transport_cb_idx: transport internal commands
422 * @ctl_cb_idx: clt internal commands
423 * @base_cb_idx: base internal commands
424 * @config_cb_idx: base internal commands
425 * @base_cmds:
426 * @transport_cmds:
427 * @tm_cmds:
428 * @ctl_cmds:
429 * @config_cmds:
430 * @base_add_sg_single: handler for either 32/64 bit sgl's
431 * @event_type: bits indicating which events to log
432 * @event_context: unique id for each logged event
433 * @event_log: event log pointer
434 * @event_masks: events that are masked
435 * @facts: static facts data
436 * @pfacts: static port facts data
437 * @manu_pg0: static manufacturing page 0
438 * @bios_pg2: static bios page 2
439 * @bios_pg3: static bios page 3
440 * @ioc_pg8: static ioc page 8
441 * @iounit_pg0: static iounit page 0
442 * @iounit_pg1: static iounit page 1
443 * @sas_hba: sas host object
444 * @sas_expander_list: expander object list
445 * @sas_node_lock:
446 * @sas_device_list: sas device object list
447 * @sas_device_init_list: sas device object list (used only at init time)
448 * @sas_device_lock:
449 * @io_missing_delay: time for IO completed by fw when PDR enabled
450 * @device_missing_delay: time for device missing by fw when PDR enabled
451 * @config_page_sz: config page size
452 * @config_page: reserve memory for config page payload
453 * @config_page_dma:
454 * @sge_size: sg element size for either 32/64 bit
455 * @request_depth: hba request queue depth
456 * @request_sz: per request frame size
457 * @request: pool of request frames
458 * @request_dma:
459 * @request_dma_sz:
460 * @scsi_lookup: firmware request tracker list
461 * @scsi_lookup_lock:
462 * @free_list: free list of request
463 * @chain: pool of chains
464 * @pending_io_count:
465 * @reset_wq:
466 * @chain_dma:
467 * @max_sges_in_main_message: number sg elements in main message
468 * @max_sges_in_chain_message: number sg elements per chain
469 * @chains_needed_per_io: max chains per io
470 * @chain_offset_value_for_main_message: location 1st sg in main
471 * @chain_depth: total chains allocated
472 * @sense: pool of sense
473 * @sense_dma:
474 * @sense_dma_pool:
475 * @reply_depth: hba reply queue depth:
476 * @reply_sz: per reply frame size:
477 * @reply: pool of replys:
478 * @reply_dma:
479 * @reply_dma_pool:
480 * @reply_free_queue_depth: reply free depth
481 * @reply_free: pool for reply free queue (32 bit addr)
482 * @reply_free_dma:
483 * @reply_free_dma_pool:
484 * @reply_free_host_index: tail index in pool to insert free replys
485 * @reply_post_queue_depth: reply post queue depth
486 * @reply_post_free: pool for reply post (64bit descriptor)
487 * @reply_post_free_dma:
488 * @reply_post_free_dma_pool:
489 * @reply_post_host_index: head index in the pool where FW completes IO
490 */
491struct MPT2SAS_ADAPTER {
492 struct list_head list;
493 struct Scsi_Host *shost;
494 u8 id;
495 u32 pci_irq;
496 char name[MPT_NAME_LENGTH];
497 char tmp_string[MPT_STRING_LENGTH];
498 struct pci_dev *pdev;
499 Mpi2SystemInterfaceRegs_t __iomem *chip;
500 unsigned long chip_phys;
501 unsigned long pio_chip;
502 int logging_level;
503 u8 ir_firmware;
504 int bars;
505 u8 mask_interrupts;
506
507 /* fw fault handler */
508 char fault_reset_work_q_name[20];
509 struct workqueue_struct *fault_reset_work_q;
510 struct delayed_work fault_reset_work;
511
512 /* fw event handler */
513 char firmware_event_name[20];
514 struct workqueue_struct *firmware_event_thread;
515 u8 fw_events_off;
516 spinlock_t fw_event_lock;
517 struct list_head fw_event_list;
518
519 /* misc flags */
520 int aen_event_read_flag;
521 u8 broadcast_aen_busy;
522 u8 ioc_reset_in_progress;
523 u8 shost_recovery;
524 spinlock_t ioc_reset_in_progress_lock;
525 u8 ioc_link_reset_in_progress;
526 u8 ignore_loginfos;
527 u8 remove_host;
528 u8 wait_for_port_enable_to_complete;
529
530 u8 msix_enable;
531 u16 msix_vector_count;
532 u32 *msix_table;
533 u32 *msix_table_backup;
534
535 /* internal commands, callback index */
536 u8 scsi_io_cb_idx;
537 u8 tm_cb_idx;
538 u8 transport_cb_idx;
539 u8 ctl_cb_idx;
540 u8 base_cb_idx;
541 u8 config_cb_idx;
542 struct _internal_cmd base_cmds;
543 struct _internal_cmd transport_cmds;
544 struct _internal_cmd tm_cmds;
545 struct _internal_cmd ctl_cmds;
546 struct _internal_cmd config_cmds;
547
548 MPT_ADD_SGE base_add_sg_single;
549
550 /* event log */
551 u32 event_type[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
552 u32 event_context;
553 void *event_log;
554 u32 event_masks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
555
556 /* static config pages */
557 Mpi2IOCFactsReply_t facts;
558 Mpi2PortFactsReply_t *pfacts;
559 Mpi2ManufacturingPage0_t manu_pg0;
560 Mpi2BiosPage2_t bios_pg2;
561 Mpi2BiosPage3_t bios_pg3;
562 Mpi2IOCPage8_t ioc_pg8;
563 Mpi2IOUnitPage0_t iounit_pg0;
564 Mpi2IOUnitPage1_t iounit_pg1;
565
566 struct _boot_device req_boot_device;
567 struct _boot_device req_alt_boot_device;
568 struct _boot_device current_boot_device;
569
570 /* sas hba, expander, and device list */
571 struct _sas_node sas_hba;
572 struct list_head sas_expander_list;
573 spinlock_t sas_node_lock;
574 struct list_head sas_device_list;
575 struct list_head sas_device_init_list;
576 spinlock_t sas_device_lock;
577 struct list_head raid_device_list;
578 spinlock_t raid_device_lock;
579 u8 io_missing_delay;
580 u16 device_missing_delay;
581 int sas_id;
582
583 /* config page */
584 u16 config_page_sz;
585 void *config_page;
586 dma_addr_t config_page_dma;
587
588 /* request */
589 u16 sge_size;
590 u16 request_depth;
591 u16 request_sz;
592 u8 *request;
593 dma_addr_t request_dma;
594 u32 request_dma_sz;
595 struct request_tracker *scsi_lookup;
596 spinlock_t scsi_lookup_lock;
597 struct list_head free_list;
598 int pending_io_count;
599 wait_queue_head_t reset_wq;
600
601 /* chain */
602 u8 *chain;
603 dma_addr_t chain_dma;
604 u16 max_sges_in_main_message;
605 u16 max_sges_in_chain_message;
606 u16 chains_needed_per_io;
607 u16 chain_offset_value_for_main_message;
608 u16 chain_depth;
609
610 /* sense */
611 u8 *sense;
612 dma_addr_t sense_dma;
613 struct dma_pool *sense_dma_pool;
614
615 /* reply */
616 u16 reply_sz;
617 u8 *reply;
618 dma_addr_t reply_dma;
619 struct dma_pool *reply_dma_pool;
620
621 /* reply free queue */
622 u16 reply_free_queue_depth;
623 u32 *reply_free;
624 dma_addr_t reply_free_dma;
625 struct dma_pool *reply_free_dma_pool;
626 u32 reply_free_host_index;
627
628 /* reply post queue */
629 u16 reply_post_queue_depth;
630 Mpi2ReplyDescriptorsUnion_t *reply_post_free;
631 dma_addr_t reply_post_free_dma;
632 struct dma_pool *reply_post_free_dma_pool;
633 u32 reply_post_host_index;
634
635 /* diag buffer support */
636 u8 *diag_buffer[MPI2_DIAG_BUF_TYPE_COUNT];
637 u32 diag_buffer_sz[MPI2_DIAG_BUF_TYPE_COUNT];
638 dma_addr_t diag_buffer_dma[MPI2_DIAG_BUF_TYPE_COUNT];
639 u8 diag_buffer_status[MPI2_DIAG_BUF_TYPE_COUNT];
640 u32 unique_id[MPI2_DIAG_BUF_TYPE_COUNT];
641 u32 product_specific[MPI2_DIAG_BUF_TYPE_COUNT][23];
642 u32 diagnostic_flags[MPI2_DIAG_BUF_TYPE_COUNT];
643};
644
645typedef void (*MPT_CALLBACK)(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID,
646 u32 reply);
647
648
649/* base shared API */
650extern struct list_head mpt2sas_ioc_list;
651
652int mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc);
653void mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc);
654int mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc);
655void mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc);
656int mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
657 enum reset_type type);
658
659void *mpt2sas_base_get_msg_frame(struct MPT2SAS_ADAPTER *ioc, u16 smid);
660void *mpt2sas_base_get_sense_buffer(struct MPT2SAS_ADAPTER *ioc, u16 smid);
661void mpt2sas_base_build_zero_len_sge(struct MPT2SAS_ADAPTER *ioc, void *paddr);
662dma_addr_t mpt2sas_base_get_msg_frame_dma(struct MPT2SAS_ADAPTER *ioc, u16 smid);
663dma_addr_t mpt2sas_base_get_sense_buffer_dma(struct MPT2SAS_ADAPTER *ioc, u16 smid);
664
665u16 mpt2sas_base_get_smid(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx);
666void mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid);
667void mpt2sas_base_put_smid_scsi_io(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 vf_id,
668 u16 handle);
669void mpt2sas_base_put_smid_hi_priority(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 vf_id);
670void mpt2sas_base_put_smid_target_assist(struct MPT2SAS_ADAPTER *ioc, u16 smid,
671 u8 vf_id, u16 io_index);
672void mpt2sas_base_put_smid_default(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 vf_id);
673void mpt2sas_base_initialize_callback_handler(void);
674u8 mpt2sas_base_register_callback_handler(MPT_CALLBACK cb_func);
675void mpt2sas_base_release_callback_handler(u8 cb_idx);
676
677void mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply);
678void *mpt2sas_base_get_reply_virt_addr(struct MPT2SAS_ADAPTER *ioc, u32 phys_addr);
679
680u32 mpt2sas_base_get_iocstate(struct MPT2SAS_ADAPTER *ioc, int cooked);
681
682void mpt2sas_base_fault_info(struct MPT2SAS_ADAPTER *ioc , u16 fault_code);
683int mpt2sas_base_sas_iounit_control(struct MPT2SAS_ADAPTER *ioc,
684 Mpi2SasIoUnitControlReply_t *mpi_reply, Mpi2SasIoUnitControlRequest_t
685 *mpi_request);
686int mpt2sas_base_scsi_enclosure_processor(struct MPT2SAS_ADAPTER *ioc,
687 Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request);
688void mpt2sas_base_validate_event_type(struct MPT2SAS_ADAPTER *ioc, u32 *event_type);
689
690/* scsih shared API */
691void mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun,
692 u8 type, u16 smid_task, ulong timeout);
693void mpt2sas_scsih_set_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);
694void mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);
695struct _sas_node *mpt2sas_scsih_expander_find_by_handle(struct MPT2SAS_ADAPTER *ioc,
696 u16 handle);
697struct _sas_node *mpt2sas_scsih_expander_find_by_sas_address(struct MPT2SAS_ADAPTER
698 *ioc, u64 sas_address);
699struct _sas_device *mpt2sas_scsih_sas_device_find_by_sas_address(
700 struct MPT2SAS_ADAPTER *ioc, u64 sas_address);
701
702void mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, u32 reply);
703void mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase);
704
705/* config shared API */
706void mpt2sas_config_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply);
707int mpt2sas_config_get_number_hba_phys(struct MPT2SAS_ADAPTER *ioc, u8 *num_phys);
708int mpt2sas_config_get_manufacturing_pg0(struct MPT2SAS_ADAPTER *ioc,
709 Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage0_t *config_page);
710int mpt2sas_config_get_bios_pg2(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
711 *mpi_reply, Mpi2BiosPage2_t *config_page);
712int mpt2sas_config_get_bios_pg3(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
713 *mpi_reply, Mpi2BiosPage3_t *config_page);
714int mpt2sas_config_get_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
715 *mpi_reply, Mpi2IOUnitPage0_t *config_page);
716int mpt2sas_config_get_sas_device_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
717 *mpi_reply, Mpi2SasDevicePage0_t *config_page, u32 form, u32 handle);
718int mpt2sas_config_get_sas_device_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
719 *mpi_reply, Mpi2SasDevicePage1_t *config_page, u32 form, u32 handle);
720int mpt2sas_config_get_sas_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
721 *mpi_reply, Mpi2SasIOUnitPage0_t *config_page, u16 sz);
722int mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
723 *mpi_reply, Mpi2IOUnitPage1_t *config_page);
724int mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
725 *mpi_reply, Mpi2IOUnitPage1_t config_page);
726int mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
727 *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz);
728int mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
729 *mpi_reply, Mpi2IOCPage8_t *config_page);
730int mpt2sas_config_get_expander_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
731 *mpi_reply, Mpi2ExpanderPage0_t *config_page, u32 form, u32 handle);
732int mpt2sas_config_get_expander_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
733 *mpi_reply, Mpi2ExpanderPage1_t *config_page, u32 phy_number, u16 handle);
734int mpt2sas_config_get_enclosure_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
735 *mpi_reply, Mpi2SasEnclosurePage0_t *config_page, u32 form, u32 handle);
736int mpt2sas_config_get_phy_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
737 *mpi_reply, Mpi2SasPhyPage0_t *config_page, u32 phy_number);
738int mpt2sas_config_get_phy_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
739 *mpi_reply, Mpi2SasPhyPage1_t *config_page, u32 phy_number);
740int mpt2sas_config_get_raid_volume_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
741 *mpi_reply, Mpi2RaidVolPage1_t *config_page, u32 form, u32 handle);
742int mpt2sas_config_get_number_pds(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 *num_pds);
743int mpt2sas_config_get_raid_volume_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
744 *mpi_reply, Mpi2RaidVolPage0_t *config_page, u32 form, u32 handle, u16 sz);
745int mpt2sas_config_get_phys_disk_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
746 *mpi_reply, Mpi2RaidPhysDiskPage0_t *config_page, u32 form,
747 u32 form_specific);
748int mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle,
749 u16 *volume_handle);
750int mpt2sas_config_get_volume_wwid(struct MPT2SAS_ADAPTER *ioc, u16 volume_handle,
751 u64 *wwid);
752
753/* ctl shared API */
754extern struct device_attribute *mpt2sas_host_attrs[];
755extern struct device_attribute *mpt2sas_dev_attrs[];
756void mpt2sas_ctl_init(void);
757void mpt2sas_ctl_exit(void);
758void mpt2sas_ctl_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply);
759void mpt2sas_ctl_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase);
760void mpt2sas_ctl_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, u32 reply);
761void mpt2sas_ctl_add_to_event_log(struct MPT2SAS_ADAPTER *ioc,
762 Mpi2EventNotificationReply_t *mpi_reply);
763
764/* transport shared API */
765void mpt2sas_transport_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply);
766struct _sas_port *mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc,
767 u16 handle, u16 parent_handle);
768void mpt2sas_transport_port_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
769 u16 parent_handle);
770int mpt2sas_transport_add_host_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy
771 *mpt2sas_phy, Mpi2SasPhyPage0_t phy_pg0, struct device *parent_dev);
772int mpt2sas_transport_add_expander_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy
773 *mpt2sas_phy, Mpi2ExpanderPage1_t expander_pg1, struct device *parent_dev);
774void mpt2sas_transport_update_phy_link_change(struct MPT2SAS_ADAPTER *ioc, u16 handle,
775 u16 attached_handle, u8 phy_number, u8 link_rate);
776extern struct sas_function_template mpt2sas_transport_functions;
777extern struct scsi_transport_template *mpt2sas_transport_template;
778
779#endif /* MPT2SAS_BASE_H_INCLUDED */
diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c
new file mode 100644
index 000000000000..58cfb97846f7
--- /dev/null
+++ b/drivers/scsi/mpt2sas/mpt2sas_config.c
@@ -0,0 +1,1873 @@
1/*
2 * This module provides common API for accessing firmware configuration pages
3 *
4 * This code is based on drivers/scsi/mpt2sas/mpt2_base.c
5 * Copyright (C) 2007-2008 LSI Corporation
6 * (mailto:DL-MPTFusionLinux@lsi.com)
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * NO WARRANTY
19 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
20 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
21 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
23 * solely responsible for determining the appropriateness of using and
24 * distributing the Program and assumes all risks associated with its
25 * exercise of rights under this Agreement, including but not limited to
26 * the risks and costs of program errors, damage to or loss of data,
27 * programs or equipment, and unavailability or interruption of operations.
28
29 * DISCLAIMER OF LIABILITY
30 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
33 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
34 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
36 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
37
38 * You should have received a copy of the GNU General Public License
39 * along with this program; if not, write to the Free Software
40 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
41 * USA.
42 */
43
44#include <linux/version.h>
45#include <linux/module.h>
46#include <linux/kernel.h>
47#include <linux/init.h>
48#include <linux/errno.h>
49#include <linux/blkdev.h>
50#include <linux/sched.h>
51#include <linux/workqueue.h>
52#include <linux/delay.h>
53#include <linux/pci.h>
54
55#include "mpt2sas_base.h"
56
57/* local definitions */
58
59/* Timeout for config page request (in seconds) */
60#define MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT 15
61
62/* Common sgl flags for READING a config page. */
63#define MPT2_CONFIG_COMMON_SGLFLAGS ((MPI2_SGE_FLAGS_SIMPLE_ELEMENT | \
64 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER \
65 | MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT)
66
67/* Common sgl flags for WRITING a config page. */
68#define MPT2_CONFIG_COMMON_WRITE_SGLFLAGS ((MPI2_SGE_FLAGS_SIMPLE_ELEMENT | \
69 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER \
70 | MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC) \
71 << MPI2_SGE_FLAGS_SHIFT)
72
73/**
74 * struct config_request - obtain dma memory via routine
75 * @config_page_sz: size
76 * @config_page: virt pointer
77 * @config_page_dma: phys pointer
78 *
79 */
80struct config_request{
81 u16 config_page_sz;
82 void *config_page;
83 dma_addr_t config_page_dma;
84};
85
86#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
87/**
88 * _config_display_some_debug - debug routine
89 * @ioc: per adapter object
90 * @smid: system request message index
91 * @calling_function_name: string pass from calling function
92 * @mpi_reply: reply message frame
93 * Context: none.
94 *
95 * Function for displaying debug info helpfull when debugging issues
96 * in this module.
97 */
98static void
99_config_display_some_debug(struct MPT2SAS_ADAPTER *ioc, u16 smid,
100 char *calling_function_name, MPI2DefaultReply_t *mpi_reply)
101{
102 Mpi2ConfigRequest_t *mpi_request;
103 char *desc = NULL;
104
105 if (!(ioc->logging_level & MPT_DEBUG_CONFIG))
106 return;
107
108 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
109 switch (mpi_request->Header.PageType & MPI2_CONFIG_PAGETYPE_MASK) {
110 case MPI2_CONFIG_PAGETYPE_IO_UNIT:
111 desc = "io_unit";
112 break;
113 case MPI2_CONFIG_PAGETYPE_IOC:
114 desc = "ioc";
115 break;
116 case MPI2_CONFIG_PAGETYPE_BIOS:
117 desc = "bios";
118 break;
119 case MPI2_CONFIG_PAGETYPE_RAID_VOLUME:
120 desc = "raid_volume";
121 break;
122 case MPI2_CONFIG_PAGETYPE_MANUFACTURING:
123 desc = "manufaucturing";
124 break;
125 case MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK:
126 desc = "physdisk";
127 break;
128 case MPI2_CONFIG_PAGETYPE_EXTENDED:
129 switch (mpi_request->ExtPageType) {
130 case MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT:
131 desc = "sas_io_unit";
132 break;
133 case MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER:
134 desc = "sas_expander";
135 break;
136 case MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE:
137 desc = "sas_device";
138 break;
139 case MPI2_CONFIG_EXTPAGETYPE_SAS_PHY:
140 desc = "sas_phy";
141 break;
142 case MPI2_CONFIG_EXTPAGETYPE_LOG:
143 desc = "log";
144 break;
145 case MPI2_CONFIG_EXTPAGETYPE_ENCLOSURE:
146 desc = "enclosure";
147 break;
148 case MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG:
149 desc = "raid_config";
150 break;
151 case MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING:
152 desc = "driver_mappping";
153 break;
154 }
155 break;
156 }
157
158 if (!desc)
159 return;
160
161 printk(MPT2SAS_DEBUG_FMT "%s: %s(%d), action(%d), form(0x%08x), "
162 "smid(%d)\n", ioc->name, calling_function_name, desc,
163 mpi_request->Header.PageNumber, mpi_request->Action,
164 le32_to_cpu(mpi_request->PageAddress), smid);
165
166 if (!mpi_reply)
167 return;
168
169 if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo)
170 printk(MPT2SAS_DEBUG_FMT
171 "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
172 ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
173 le32_to_cpu(mpi_reply->IOCLogInfo));
174}
175#endif
176
177/**
178 * mpt2sas_config_done - config page completion routine
179 * @ioc: per adapter object
180 * @smid: system request message index
181 * @VF_ID: virtual function id
182 * @reply: reply message frame(lower 32bit addr)
183 * Context: none.
184 *
185 * The callback handler when using _config_request.
186 *
187 * Return nothing.
188 */
189void
190mpt2sas_config_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
191{
192 MPI2DefaultReply_t *mpi_reply;
193
194 if (ioc->config_cmds.status == MPT2_CMD_NOT_USED)
195 return;
196 if (ioc->config_cmds.smid != smid)
197 return;
198 ioc->config_cmds.status |= MPT2_CMD_COMPLETE;
199 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
200 if (mpi_reply) {
201 ioc->config_cmds.status |= MPT2_CMD_REPLY_VALID;
202 memcpy(ioc->config_cmds.reply, mpi_reply,
203 mpi_reply->MsgLength*4);
204 }
205 ioc->config_cmds.status &= ~MPT2_CMD_PENDING;
206#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
207 _config_display_some_debug(ioc, smid, "config_done", mpi_reply);
208#endif
209 complete(&ioc->config_cmds.done);
210}
211
212/**
213 * _config_request - main routine for sending config page requests
214 * @ioc: per adapter object
215 * @mpi_request: request message frame
216 * @mpi_reply: reply mf payload returned from firmware
217 * @timeout: timeout in seconds
218 * Context: sleep, the calling function needs to acquire the config_cmds.mutex
219 *
220 * A generic API for config page requests to firmware.
221 *
222 * The ioc->config_cmds.status flag should be MPT2_CMD_NOT_USED before calling
223 * this API.
224 *
225 * The callback index is set inside `ioc->config_cb_idx.
226 *
227 * Returns 0 for success, non-zero for failure.
228 */
229static int
230_config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
231 *mpi_request, Mpi2ConfigReply_t *mpi_reply, int timeout)
232{
233 u16 smid;
234 u32 ioc_state;
235 unsigned long timeleft;
236 Mpi2ConfigRequest_t *config_request;
237 int r;
238 u8 retry_count;
239 u8 issue_reset;
240 u16 wait_state_count;
241
242 if (ioc->config_cmds.status != MPT2_CMD_NOT_USED) {
243 printk(MPT2SAS_ERR_FMT "%s: config_cmd in use\n",
244 ioc->name, __func__);
245 return -EAGAIN;
246 }
247 retry_count = 0;
248
249 retry_config:
250 wait_state_count = 0;
251 ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
252 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
253 if (wait_state_count++ == MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT) {
254 printk(MPT2SAS_ERR_FMT
255 "%s: failed due to ioc not operational\n",
256 ioc->name, __func__);
257 ioc->config_cmds.status = MPT2_CMD_NOT_USED;
258 return -EFAULT;
259 }
260 ssleep(1);
261 ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
262 printk(MPT2SAS_INFO_FMT "%s: waiting for "
263 "operational state(count=%d)\n", ioc->name,
264 __func__, wait_state_count);
265 }
266 if (wait_state_count)
267 printk(MPT2SAS_INFO_FMT "%s: ioc is operational\n",
268 ioc->name, __func__);
269
270 smid = mpt2sas_base_get_smid(ioc, ioc->config_cb_idx);
271 if (!smid) {
272 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
273 ioc->name, __func__);
274 ioc->config_cmds.status = MPT2_CMD_NOT_USED;
275 return -EAGAIN;
276 }
277
278 r = 0;
279 memset(mpi_reply, 0, sizeof(Mpi2ConfigReply_t));
280 ioc->config_cmds.status = MPT2_CMD_PENDING;
281 config_request = mpt2sas_base_get_msg_frame(ioc, smid);
282 ioc->config_cmds.smid = smid;
283 memcpy(config_request, mpi_request, sizeof(Mpi2ConfigRequest_t));
284#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
285 _config_display_some_debug(ioc, smid, "config_request", NULL);
286#endif
287 mpt2sas_base_put_smid_default(ioc, smid, config_request->VF_ID);
288 timeleft = wait_for_completion_timeout(&ioc->config_cmds.done,
289 timeout*HZ);
290 if (!(ioc->config_cmds.status & MPT2_CMD_COMPLETE)) {
291 printk(MPT2SAS_ERR_FMT "%s: timeout\n",
292 ioc->name, __func__);
293 _debug_dump_mf(mpi_request,
294 sizeof(Mpi2ConfigRequest_t)/4);
295 if (!(ioc->config_cmds.status & MPT2_CMD_RESET))
296 issue_reset = 1;
297 goto issue_host_reset;
298 }
299 if (ioc->config_cmds.status & MPT2_CMD_REPLY_VALID)
300 memcpy(mpi_reply, ioc->config_cmds.reply,
301 sizeof(Mpi2ConfigReply_t));
302 if (retry_count)
303 printk(MPT2SAS_INFO_FMT "%s: retry completed!!\n",
304 ioc->name, __func__);
305 ioc->config_cmds.status = MPT2_CMD_NOT_USED;
306 return r;
307
308 issue_host_reset:
309 if (issue_reset)
310 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
311 FORCE_BIG_HAMMER);
312 ioc->config_cmds.status = MPT2_CMD_NOT_USED;
313 if (!retry_count) {
314 printk(MPT2SAS_INFO_FMT "%s: attempting retry\n",
315 ioc->name, __func__);
316 retry_count++;
317 goto retry_config;
318 }
319 return -EFAULT;
320}
321
322/**
323 * _config_alloc_config_dma_memory - obtain physical memory
324 * @ioc: per adapter object
325 * @mem: struct config_request
326 *
327 * A wrapper for obtaining dma-able memory for config page request.
328 *
329 * Returns 0 for success, non-zero for failure.
330 */
331static int
332_config_alloc_config_dma_memory(struct MPT2SAS_ADAPTER *ioc,
333 struct config_request *mem)
334{
335 int r = 0;
336
337 mem->config_page = pci_alloc_consistent(ioc->pdev, mem->config_page_sz,
338 &mem->config_page_dma);
339 if (!mem->config_page)
340 r = -ENOMEM;
341 return r;
342}
343
344/**
345 * _config_free_config_dma_memory - wrapper to free the memory
346 * @ioc: per adapter object
347 * @mem: struct config_request
348 *
349 * A wrapper to free dma-able memory when using _config_alloc_config_dma_memory.
350 *
351 * Returns 0 for success, non-zero for failure.
352 */
353static void
354_config_free_config_dma_memory(struct MPT2SAS_ADAPTER *ioc,
355 struct config_request *mem)
356{
357 pci_free_consistent(ioc->pdev, mem->config_page_sz, mem->config_page,
358 mem->config_page_dma);
359}
360
361/**
362 * mpt2sas_config_get_manufacturing_pg0 - obtain manufacturing page 0
363 * @ioc: per adapter object
364 * @mpi_reply: reply mf payload returned from firmware
365 * @config_page: contents of the config page
366 * Context: sleep.
367 *
368 * Returns 0 for success, non-zero for failure.
369 */
370int
371mpt2sas_config_get_manufacturing_pg0(struct MPT2SAS_ADAPTER *ioc,
372 Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage0_t *config_page)
373{
374 Mpi2ConfigRequest_t mpi_request;
375 int r;
376 struct config_request mem;
377
378 mutex_lock(&ioc->config_cmds.mutex);
379 memset(config_page, 0, sizeof(Mpi2ManufacturingPage0_t));
380 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
381 mpi_request.Function = MPI2_FUNCTION_CONFIG;
382 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
383 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING;
384 mpi_request.Header.PageNumber = 0;
385 mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION;
386 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
387 r = _config_request(ioc, &mpi_request, mpi_reply,
388 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
389 if (r)
390 goto out;
391
392 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
393 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
394 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
395 mpi_request.Header.PageType = mpi_reply->Header.PageType;
396 mpi_request.Header.PageLength = mpi_reply->Header.PageLength;
397 mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4;
398 if (mem.config_page_sz > ioc->config_page_sz) {
399 r = _config_alloc_config_dma_memory(ioc, &mem);
400 if (r)
401 goto out;
402 } else {
403 mem.config_page_dma = ioc->config_page_dma;
404 mem.config_page = ioc->config_page;
405 }
406 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
407 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
408 mem.config_page_dma);
409 r = _config_request(ioc, &mpi_request, mpi_reply,
410 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
411 if (!r)
412 memcpy(config_page, mem.config_page,
413 min_t(u16, mem.config_page_sz,
414 sizeof(Mpi2ManufacturingPage0_t)));
415
416 if (mem.config_page_sz > ioc->config_page_sz)
417 _config_free_config_dma_memory(ioc, &mem);
418
419 out:
420 mutex_unlock(&ioc->config_cmds.mutex);
421 return r;
422}
423
424/**
425 * mpt2sas_config_get_bios_pg2 - obtain bios page 2
426 * @ioc: per adapter object
427 * @mpi_reply: reply mf payload returned from firmware
428 * @config_page: contents of the config page
429 * Context: sleep.
430 *
431 * Returns 0 for success, non-zero for failure.
432 */
433int
434mpt2sas_config_get_bios_pg2(struct MPT2SAS_ADAPTER *ioc,
435 Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage2_t *config_page)
436{
437 Mpi2ConfigRequest_t mpi_request;
438 int r;
439 struct config_request mem;
440
441 mutex_lock(&ioc->config_cmds.mutex);
442 memset(config_page, 0, sizeof(Mpi2BiosPage2_t));
443 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
444 mpi_request.Function = MPI2_FUNCTION_CONFIG;
445 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
446 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS;
447 mpi_request.Header.PageNumber = 2;
448 mpi_request.Header.PageVersion = MPI2_BIOSPAGE2_PAGEVERSION;
449 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
450 r = _config_request(ioc, &mpi_request, mpi_reply,
451 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
452 if (r)
453 goto out;
454
455 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
456 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
457 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
458 mpi_request.Header.PageType = mpi_reply->Header.PageType;
459 mpi_request.Header.PageLength = mpi_reply->Header.PageLength;
460 mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4;
461 if (mem.config_page_sz > ioc->config_page_sz) {
462 r = _config_alloc_config_dma_memory(ioc, &mem);
463 if (r)
464 goto out;
465 } else {
466 mem.config_page_dma = ioc->config_page_dma;
467 mem.config_page = ioc->config_page;
468 }
469 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
470 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
471 mem.config_page_dma);
472 r = _config_request(ioc, &mpi_request, mpi_reply,
473 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
474 if (!r)
475 memcpy(config_page, mem.config_page,
476 min_t(u16, mem.config_page_sz,
477 sizeof(Mpi2BiosPage2_t)));
478
479 if (mem.config_page_sz > ioc->config_page_sz)
480 _config_free_config_dma_memory(ioc, &mem);
481
482 out:
483 mutex_unlock(&ioc->config_cmds.mutex);
484 return r;
485}
486
487/**
488 * mpt2sas_config_get_bios_pg3 - obtain bios page 3
489 * @ioc: per adapter object
490 * @mpi_reply: reply mf payload returned from firmware
491 * @config_page: contents of the config page
492 * Context: sleep.
493 *
494 * Returns 0 for success, non-zero for failure.
495 */
496int
497mpt2sas_config_get_bios_pg3(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
498 *mpi_reply, Mpi2BiosPage3_t *config_page)
499{
500 Mpi2ConfigRequest_t mpi_request;
501 int r;
502 struct config_request mem;
503
504 mutex_lock(&ioc->config_cmds.mutex);
505 memset(config_page, 0, sizeof(Mpi2BiosPage3_t));
506 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
507 mpi_request.Function = MPI2_FUNCTION_CONFIG;
508 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
509 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS;
510 mpi_request.Header.PageNumber = 3;
511 mpi_request.Header.PageVersion = MPI2_BIOSPAGE3_PAGEVERSION;
512 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
513 r = _config_request(ioc, &mpi_request, mpi_reply,
514 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
515 if (r)
516 goto out;
517
518 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
519 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
520 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
521 mpi_request.Header.PageType = mpi_reply->Header.PageType;
522 mpi_request.Header.PageLength = mpi_reply->Header.PageLength;
523 mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4;
524 if (mem.config_page_sz > ioc->config_page_sz) {
525 r = _config_alloc_config_dma_memory(ioc, &mem);
526 if (r)
527 goto out;
528 } else {
529 mem.config_page_dma = ioc->config_page_dma;
530 mem.config_page = ioc->config_page;
531 }
532 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
533 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
534 mem.config_page_dma);
535 r = _config_request(ioc, &mpi_request, mpi_reply,
536 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
537 if (!r)
538 memcpy(config_page, mem.config_page,
539 min_t(u16, mem.config_page_sz,
540 sizeof(Mpi2BiosPage3_t)));
541
542 if (mem.config_page_sz > ioc->config_page_sz)
543 _config_free_config_dma_memory(ioc, &mem);
544
545 out:
546 mutex_unlock(&ioc->config_cmds.mutex);
547 return r;
548}
549
550/**
551 * mpt2sas_config_get_iounit_pg0 - obtain iounit page 0
552 * @ioc: per adapter object
553 * @mpi_reply: reply mf payload returned from firmware
554 * @config_page: contents of the config page
555 * Context: sleep.
556 *
557 * Returns 0 for success, non-zero for failure.
558 */
559int
560mpt2sas_config_get_iounit_pg0(struct MPT2SAS_ADAPTER *ioc,
561 Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage0_t *config_page)
562{
563 Mpi2ConfigRequest_t mpi_request;
564 int r;
565 struct config_request mem;
566
567 mutex_lock(&ioc->config_cmds.mutex);
568 memset(config_page, 0, sizeof(Mpi2IOUnitPage0_t));
569 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
570 mpi_request.Function = MPI2_FUNCTION_CONFIG;
571 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
572 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT;
573 mpi_request.Header.PageNumber = 0;
574 mpi_request.Header.PageVersion = MPI2_IOUNITPAGE0_PAGEVERSION;
575 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
576 r = _config_request(ioc, &mpi_request, mpi_reply,
577 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
578 if (r)
579 goto out;
580
581 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
582 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
583 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
584 mpi_request.Header.PageType = mpi_reply->Header.PageType;
585 mpi_request.Header.PageLength = mpi_reply->Header.PageLength;
586 mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4;
587 if (mem.config_page_sz > ioc->config_page_sz) {
588 r = _config_alloc_config_dma_memory(ioc, &mem);
589 if (r)
590 goto out;
591 } else {
592 mem.config_page_dma = ioc->config_page_dma;
593 mem.config_page = ioc->config_page;
594 }
595 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
596 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
597 mem.config_page_dma);
598 r = _config_request(ioc, &mpi_request, mpi_reply,
599 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
600 if (!r)
601 memcpy(config_page, mem.config_page,
602 min_t(u16, mem.config_page_sz,
603 sizeof(Mpi2IOUnitPage0_t)));
604
605 if (mem.config_page_sz > ioc->config_page_sz)
606 _config_free_config_dma_memory(ioc, &mem);
607
608 out:
609 mutex_unlock(&ioc->config_cmds.mutex);
610 return r;
611}
612
613/**
614 * mpt2sas_config_get_iounit_pg1 - obtain iounit page 1
615 * @ioc: per adapter object
616 * @mpi_reply: reply mf payload returned from firmware
617 * @config_page: contents of the config page
618 * Context: sleep.
619 *
620 * Returns 0 for success, non-zero for failure.
621 */
622int
623mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
624 Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage1_t *config_page)
625{
626 Mpi2ConfigRequest_t mpi_request;
627 int r;
628 struct config_request mem;
629
630 mutex_lock(&ioc->config_cmds.mutex);
631 memset(config_page, 0, sizeof(Mpi2IOUnitPage1_t));
632 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
633 mpi_request.Function = MPI2_FUNCTION_CONFIG;
634 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
635 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT;
636 mpi_request.Header.PageNumber = 1;
637 mpi_request.Header.PageVersion = MPI2_IOUNITPAGE1_PAGEVERSION;
638 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
639 r = _config_request(ioc, &mpi_request, mpi_reply,
640 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
641 if (r)
642 goto out;
643
644 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
645 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
646 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
647 mpi_request.Header.PageType = mpi_reply->Header.PageType;
648 mpi_request.Header.PageLength = mpi_reply->Header.PageLength;
649 mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4;
650 if (mem.config_page_sz > ioc->config_page_sz) {
651 r = _config_alloc_config_dma_memory(ioc, &mem);
652 if (r)
653 goto out;
654 } else {
655 mem.config_page_dma = ioc->config_page_dma;
656 mem.config_page = ioc->config_page;
657 }
658 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
659 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
660 mem.config_page_dma);
661 r = _config_request(ioc, &mpi_request, mpi_reply,
662 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
663 if (!r)
664 memcpy(config_page, mem.config_page,
665 min_t(u16, mem.config_page_sz,
666 sizeof(Mpi2IOUnitPage1_t)));
667
668 if (mem.config_page_sz > ioc->config_page_sz)
669 _config_free_config_dma_memory(ioc, &mem);
670
671 out:
672 mutex_unlock(&ioc->config_cmds.mutex);
673 return r;
674}
675
676/**
677 * mpt2sas_config_set_iounit_pg1 - set iounit page 1
678 * @ioc: per adapter object
679 * @mpi_reply: reply mf payload returned from firmware
680 * @config_page: contents of the config page
681 * Context: sleep.
682 *
683 * Returns 0 for success, non-zero for failure.
684 */
685int
686mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
687 Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage1_t config_page)
688{
689 Mpi2ConfigRequest_t mpi_request;
690 int r;
691 struct config_request mem;
692
693 mutex_lock(&ioc->config_cmds.mutex);
694 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
695 mpi_request.Function = MPI2_FUNCTION_CONFIG;
696 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
697 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT;
698 mpi_request.Header.PageNumber = 1;
699 mpi_request.Header.PageVersion = MPI2_IOUNITPAGE1_PAGEVERSION;
700 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
701 r = _config_request(ioc, &mpi_request, mpi_reply,
702 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
703 if (r)
704 goto out;
705
706 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
707 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
708 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
709 mpi_request.Header.PageType = mpi_reply->Header.PageType;
710 mpi_request.Header.PageLength = mpi_reply->Header.PageLength;
711 mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4;
712 if (mem.config_page_sz > ioc->config_page_sz) {
713 r = _config_alloc_config_dma_memory(ioc, &mem);
714 if (r)
715 goto out;
716 } else {
717 mem.config_page_dma = ioc->config_page_dma;
718 mem.config_page = ioc->config_page;
719 }
720
721 memset(mem.config_page, 0, mem.config_page_sz);
722 memcpy(mem.config_page, &config_page,
723 sizeof(Mpi2IOUnitPage1_t));
724
725 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
726 MPT2_CONFIG_COMMON_WRITE_SGLFLAGS | mem.config_page_sz,
727 mem.config_page_dma);
728 r = _config_request(ioc, &mpi_request, mpi_reply,
729 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
730
731 if (mem.config_page_sz > ioc->config_page_sz)
732 _config_free_config_dma_memory(ioc, &mem);
733
734 out:
735 mutex_unlock(&ioc->config_cmds.mutex);
736 return r;
737}
738
739/**
740 * mpt2sas_config_get_ioc_pg8 - obtain ioc page 8
741 * @ioc: per adapter object
742 * @mpi_reply: reply mf payload returned from firmware
743 * @config_page: contents of the config page
744 * Context: sleep.
745 *
746 * Returns 0 for success, non-zero for failure.
747 */
748int
749mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc,
750 Mpi2ConfigReply_t *mpi_reply, Mpi2IOCPage8_t *config_page)
751{
752 Mpi2ConfigRequest_t mpi_request;
753 int r;
754 struct config_request mem;
755
756 mutex_lock(&ioc->config_cmds.mutex);
757 memset(config_page, 0, sizeof(Mpi2IOCPage8_t));
758 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
759 mpi_request.Function = MPI2_FUNCTION_CONFIG;
760 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
761 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IOC;
762 mpi_request.Header.PageNumber = 8;
763 mpi_request.Header.PageVersion = MPI2_IOCPAGE8_PAGEVERSION;
764 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
765 r = _config_request(ioc, &mpi_request, mpi_reply,
766 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
767 if (r)
768 goto out;
769
770 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
771 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
772 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
773 mpi_request.Header.PageType = mpi_reply->Header.PageType;
774 mpi_request.Header.PageLength = mpi_reply->Header.PageLength;
775 mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4;
776 if (mem.config_page_sz > ioc->config_page_sz) {
777 r = _config_alloc_config_dma_memory(ioc, &mem);
778 if (r)
779 goto out;
780 } else {
781 mem.config_page_dma = ioc->config_page_dma;
782 mem.config_page = ioc->config_page;
783 }
784 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
785 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
786 mem.config_page_dma);
787 r = _config_request(ioc, &mpi_request, mpi_reply,
788 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
789 if (!r)
790 memcpy(config_page, mem.config_page,
791 min_t(u16, mem.config_page_sz,
792 sizeof(Mpi2IOCPage8_t)));
793
794 if (mem.config_page_sz > ioc->config_page_sz)
795 _config_free_config_dma_memory(ioc, &mem);
796
797 out:
798 mutex_unlock(&ioc->config_cmds.mutex);
799 return r;
800}
801
802/**
803 * mpt2sas_config_get_sas_device_pg0 - obtain sas device page 0
804 * @ioc: per adapter object
805 * @mpi_reply: reply mf payload returned from firmware
806 * @config_page: contents of the config page
807 * @form: GET_NEXT_HANDLE or HANDLE
808 * @handle: device handle
809 * Context: sleep.
810 *
811 * Returns 0 for success, non-zero for failure.
812 */
813int
814mpt2sas_config_get_sas_device_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
815 *mpi_reply, Mpi2SasDevicePage0_t *config_page, u32 form, u32 handle)
816{
817 Mpi2ConfigRequest_t mpi_request;
818 int r;
819 struct config_request mem;
820
821 mutex_lock(&ioc->config_cmds.mutex);
822 memset(config_page, 0, sizeof(Mpi2SasDevicePage0_t));
823 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
824 mpi_request.Function = MPI2_FUNCTION_CONFIG;
825 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
826 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
827 mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE;
828 mpi_request.Header.PageVersion = MPI2_SASDEVICE0_PAGEVERSION;
829 mpi_request.Header.PageNumber = 0;
830 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
831 r = _config_request(ioc, &mpi_request, mpi_reply,
832 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
833 if (r)
834 goto out;
835
836 mpi_request.PageAddress = cpu_to_le32(form | handle);
837 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
838 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
839 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
840 mpi_request.Header.PageType = mpi_reply->Header.PageType;
841 mpi_request.ExtPageLength = mpi_reply->ExtPageLength;
842 mpi_request.ExtPageType = mpi_reply->ExtPageType;
843 mem.config_page_sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4;
844 if (mem.config_page_sz > ioc->config_page_sz) {
845 r = _config_alloc_config_dma_memory(ioc, &mem);
846 if (r)
847 goto out;
848 } else {
849 mem.config_page_dma = ioc->config_page_dma;
850 mem.config_page = ioc->config_page;
851 }
852 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
853 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
854 mem.config_page_dma);
855 r = _config_request(ioc, &mpi_request, mpi_reply,
856 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
857 if (!r)
858 memcpy(config_page, mem.config_page,
859 min_t(u16, mem.config_page_sz,
860 sizeof(Mpi2SasDevicePage0_t)));
861
862 if (mem.config_page_sz > ioc->config_page_sz)
863 _config_free_config_dma_memory(ioc, &mem);
864
865 out:
866 mutex_unlock(&ioc->config_cmds.mutex);
867 return r;
868}
869
870/**
871 * mpt2sas_config_get_sas_device_pg1 - obtain sas device page 1
872 * @ioc: per adapter object
873 * @mpi_reply: reply mf payload returned from firmware
874 * @config_page: contents of the config page
875 * @form: GET_NEXT_HANDLE or HANDLE
876 * @handle: device handle
877 * Context: sleep.
878 *
879 * Returns 0 for success, non-zero for failure.
880 */
881int
882mpt2sas_config_get_sas_device_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
883 *mpi_reply, Mpi2SasDevicePage1_t *config_page, u32 form, u32 handle)
884{
885 Mpi2ConfigRequest_t mpi_request;
886 int r;
887 struct config_request mem;
888
889 mutex_lock(&ioc->config_cmds.mutex);
890 memset(config_page, 0, sizeof(Mpi2SasDevicePage1_t));
891 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
892 mpi_request.Function = MPI2_FUNCTION_CONFIG;
893 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
894 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
895 mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE;
896 mpi_request.Header.PageVersion = MPI2_SASDEVICE1_PAGEVERSION;
897 mpi_request.Header.PageNumber = 1;
898 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
899 r = _config_request(ioc, &mpi_request, mpi_reply,
900 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
901 if (r)
902 goto out;
903
904 mpi_request.PageAddress = cpu_to_le32(form | handle);
905 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
906 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
907 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
908 mpi_request.Header.PageType = mpi_reply->Header.PageType;
909 mpi_request.ExtPageLength = mpi_reply->ExtPageLength;
910 mpi_request.ExtPageType = mpi_reply->ExtPageType;
911 mem.config_page_sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4;
912 if (mem.config_page_sz > ioc->config_page_sz) {
913 r = _config_alloc_config_dma_memory(ioc, &mem);
914 if (r)
915 goto out;
916 } else {
917 mem.config_page_dma = ioc->config_page_dma;
918 mem.config_page = ioc->config_page;
919 }
920 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
921 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
922 mem.config_page_dma);
923 r = _config_request(ioc, &mpi_request, mpi_reply,
924 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
925 if (!r)
926 memcpy(config_page, mem.config_page,
927 min_t(u16, mem.config_page_sz,
928 sizeof(Mpi2SasDevicePage1_t)));
929
930 if (mem.config_page_sz > ioc->config_page_sz)
931 _config_free_config_dma_memory(ioc, &mem);
932
933 out:
934 mutex_unlock(&ioc->config_cmds.mutex);
935 return r;
936}
937
938/**
939 * mpt2sas_config_get_number_hba_phys - obtain number of phys on the host
940 * @ioc: per adapter object
941 * @num_phys: pointer returned with the number of phys
942 * Context: sleep.
943 *
944 * Returns 0 for success, non-zero for failure.
945 */
946int
947mpt2sas_config_get_number_hba_phys(struct MPT2SAS_ADAPTER *ioc, u8 *num_phys)
948{
949 Mpi2ConfigRequest_t mpi_request;
950 int r;
951 struct config_request mem;
952 u16 ioc_status;
953 Mpi2ConfigReply_t mpi_reply;
954 Mpi2SasIOUnitPage0_t config_page;
955
956 mutex_lock(&ioc->config_cmds.mutex);
957 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
958 mpi_request.Function = MPI2_FUNCTION_CONFIG;
959 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
960 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
961 mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
962 mpi_request.Header.PageNumber = 0;
963 mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION;
964 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
965 r = _config_request(ioc, &mpi_request, &mpi_reply,
966 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
967 if (r)
968 goto out;
969
970 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
971 mpi_request.Header.PageVersion = mpi_reply.Header.PageVersion;
972 mpi_request.Header.PageNumber = mpi_reply.Header.PageNumber;
973 mpi_request.Header.PageType = mpi_reply.Header.PageType;
974 mpi_request.ExtPageLength = mpi_reply.ExtPageLength;
975 mpi_request.ExtPageType = mpi_reply.ExtPageType;
976 mem.config_page_sz = le16_to_cpu(mpi_reply.ExtPageLength) * 4;
977 if (mem.config_page_sz > ioc->config_page_sz) {
978 r = _config_alloc_config_dma_memory(ioc, &mem);
979 if (r)
980 goto out;
981 } else {
982 mem.config_page_dma = ioc->config_page_dma;
983 mem.config_page = ioc->config_page;
984 }
985 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
986 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
987 mem.config_page_dma);
988 r = _config_request(ioc, &mpi_request, &mpi_reply,
989 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
990 if (!r) {
991 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
992 MPI2_IOCSTATUS_MASK;
993 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
994 memcpy(&config_page, mem.config_page,
995 min_t(u16, mem.config_page_sz,
996 sizeof(Mpi2SasIOUnitPage0_t)));
997 *num_phys = config_page.NumPhys;
998 }
999 }
1000
1001 if (mem.config_page_sz > ioc->config_page_sz)
1002 _config_free_config_dma_memory(ioc, &mem);
1003
1004 out:
1005 mutex_unlock(&ioc->config_cmds.mutex);
1006 return r;
1007}
1008
1009/**
1010 * mpt2sas_config_get_sas_iounit_pg0 - obtain sas iounit page 0
1011 * @ioc: per adapter object
1012 * @mpi_reply: reply mf payload returned from firmware
1013 * @config_page: contents of the config page
1014 * @sz: size of buffer passed in config_page
1015 * Context: sleep.
1016 *
1017 * Calling function should call config_get_number_hba_phys prior to
1018 * this function, so enough memory is allocated for config_page.
1019 *
1020 * Returns 0 for success, non-zero for failure.
1021 */
1022int
1023mpt2sas_config_get_sas_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1024 *mpi_reply, Mpi2SasIOUnitPage0_t *config_page, u16 sz)
1025{
1026 Mpi2ConfigRequest_t mpi_request;
1027 int r;
1028 struct config_request mem;
1029
1030 mutex_lock(&ioc->config_cmds.mutex);
1031 memset(config_page, 0, sz);
1032 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1033 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1034 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
1035 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
1036 mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
1037 mpi_request.Header.PageNumber = 0;
1038 mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION;
1039 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
1040 r = _config_request(ioc, &mpi_request, mpi_reply,
1041 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
1042 if (r)
1043 goto out;
1044
1045 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
1046 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
1047 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
1048 mpi_request.Header.PageType = mpi_reply->Header.PageType;
1049 mpi_request.ExtPageLength = mpi_reply->ExtPageLength;
1050 mpi_request.ExtPageType = mpi_reply->ExtPageType;
1051 mem.config_page_sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4;
1052 if (mem.config_page_sz > ioc->config_page_sz) {
1053 r = _config_alloc_config_dma_memory(ioc, &mem);
1054 if (r)
1055 goto out;
1056 } else {
1057 mem.config_page_dma = ioc->config_page_dma;
1058 mem.config_page = ioc->config_page;
1059 }
1060 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
1061 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
1062 mem.config_page_dma);
1063 r = _config_request(ioc, &mpi_request, mpi_reply,
1064 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
1065 if (!r)
1066 memcpy(config_page, mem.config_page,
1067 min_t(u16, sz, mem.config_page_sz));
1068
1069 if (mem.config_page_sz > ioc->config_page_sz)
1070 _config_free_config_dma_memory(ioc, &mem);
1071
1072 out:
1073 mutex_unlock(&ioc->config_cmds.mutex);
1074 return r;
1075}
1076
1077/**
1078 * mpt2sas_config_get_sas_iounit_pg1 - obtain sas iounit page 0
1079 * @ioc: per adapter object
1080 * @mpi_reply: reply mf payload returned from firmware
1081 * @config_page: contents of the config page
1082 * @sz: size of buffer passed in config_page
1083 * Context: sleep.
1084 *
1085 * Calling function should call config_get_number_hba_phys prior to
1086 * this function, so enough memory is allocated for config_page.
1087 *
1088 * Returns 0 for success, non-zero for failure.
1089 */
1090int
1091mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1092 *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz)
1093{
1094 Mpi2ConfigRequest_t mpi_request;
1095 int r;
1096 struct config_request mem;
1097
1098 mutex_lock(&ioc->config_cmds.mutex);
1099 memset(config_page, 0, sz);
1100 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1101 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1102 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
1103 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
1104 mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
1105 mpi_request.Header.PageNumber = 1;
1106 mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION;
1107 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
1108 r = _config_request(ioc, &mpi_request, mpi_reply,
1109 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
1110 if (r)
1111 goto out;
1112
1113 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
1114 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
1115 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
1116 mpi_request.Header.PageType = mpi_reply->Header.PageType;
1117 mpi_request.ExtPageLength = mpi_reply->ExtPageLength;
1118 mpi_request.ExtPageType = mpi_reply->ExtPageType;
1119 mem.config_page_sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4;
1120 if (mem.config_page_sz > ioc->config_page_sz) {
1121 r = _config_alloc_config_dma_memory(ioc, &mem);
1122 if (r)
1123 goto out;
1124 } else {
1125 mem.config_page_dma = ioc->config_page_dma;
1126 mem.config_page = ioc->config_page;
1127 }
1128 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
1129 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
1130 mem.config_page_dma);
1131 r = _config_request(ioc, &mpi_request, mpi_reply,
1132 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
1133 if (!r)
1134 memcpy(config_page, mem.config_page,
1135 min_t(u16, sz, mem.config_page_sz));
1136
1137 if (mem.config_page_sz > ioc->config_page_sz)
1138 _config_free_config_dma_memory(ioc, &mem);
1139
1140 out:
1141 mutex_unlock(&ioc->config_cmds.mutex);
1142 return r;
1143}
1144
1145/**
1146 * mpt2sas_config_get_expander_pg0 - obtain expander page 0
1147 * @ioc: per adapter object
1148 * @mpi_reply: reply mf payload returned from firmware
1149 * @config_page: contents of the config page
1150 * @form: GET_NEXT_HANDLE or HANDLE
1151 * @handle: expander handle
1152 * Context: sleep.
1153 *
1154 * Returns 0 for success, non-zero for failure.
1155 */
1156int
1157mpt2sas_config_get_expander_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1158 *mpi_reply, Mpi2ExpanderPage0_t *config_page, u32 form, u32 handle)
1159{
1160 Mpi2ConfigRequest_t mpi_request;
1161 int r;
1162 struct config_request mem;
1163
1164 mutex_lock(&ioc->config_cmds.mutex);
1165 memset(config_page, 0, sizeof(Mpi2ExpanderPage0_t));
1166 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1167 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1168 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
1169 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
1170 mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER;
1171 mpi_request.Header.PageNumber = 0;
1172 mpi_request.Header.PageVersion = MPI2_SASEXPANDER0_PAGEVERSION;
1173 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
1174 r = _config_request(ioc, &mpi_request, mpi_reply,
1175 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
1176 if (r)
1177 goto out;
1178
1179 mpi_request.PageAddress = cpu_to_le32(form | handle);
1180 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
1181 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
1182 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
1183 mpi_request.Header.PageType = mpi_reply->Header.PageType;
1184 mpi_request.ExtPageLength = mpi_reply->ExtPageLength;
1185 mpi_request.ExtPageType = mpi_reply->ExtPageType;
1186 mem.config_page_sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4;
1187 if (mem.config_page_sz > ioc->config_page_sz) {
1188 r = _config_alloc_config_dma_memory(ioc, &mem);
1189 if (r)
1190 goto out;
1191 } else {
1192 mem.config_page_dma = ioc->config_page_dma;
1193 mem.config_page = ioc->config_page;
1194 }
1195 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
1196 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
1197 mem.config_page_dma);
1198 r = _config_request(ioc, &mpi_request, mpi_reply,
1199 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
1200 if (!r)
1201 memcpy(config_page, mem.config_page,
1202 min_t(u16, mem.config_page_sz,
1203 sizeof(Mpi2ExpanderPage0_t)));
1204
1205 if (mem.config_page_sz > ioc->config_page_sz)
1206 _config_free_config_dma_memory(ioc, &mem);
1207
1208 out:
1209 mutex_unlock(&ioc->config_cmds.mutex);
1210 return r;
1211}
1212
1213/**
1214 * mpt2sas_config_get_expander_pg1 - obtain expander page 1
1215 * @ioc: per adapter object
1216 * @mpi_reply: reply mf payload returned from firmware
1217 * @config_page: contents of the config page
1218 * @phy_number: phy number
1219 * @handle: expander handle
1220 * Context: sleep.
1221 *
1222 * Returns 0 for success, non-zero for failure.
1223 */
1224int
1225mpt2sas_config_get_expander_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1226 *mpi_reply, Mpi2ExpanderPage1_t *config_page, u32 phy_number,
1227 u16 handle)
1228{
1229 Mpi2ConfigRequest_t mpi_request;
1230 int r;
1231 struct config_request mem;
1232
1233 mutex_lock(&ioc->config_cmds.mutex);
1234 memset(config_page, 0, sizeof(Mpi2ExpanderPage1_t));
1235 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1236 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1237 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
1238 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
1239 mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER;
1240 mpi_request.Header.PageNumber = 1;
1241 mpi_request.Header.PageVersion = MPI2_SASEXPANDER1_PAGEVERSION;
1242 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
1243 r = _config_request(ioc, &mpi_request, mpi_reply,
1244 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
1245 if (r)
1246 goto out;
1247
1248 mpi_request.PageAddress =
1249 cpu_to_le32(MPI2_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM |
1250 (phy_number << MPI2_SAS_EXPAND_PGAD_PHYNUM_SHIFT) | handle);
1251 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
1252 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
1253 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
1254 mpi_request.Header.PageType = mpi_reply->Header.PageType;
1255 mpi_request.ExtPageLength = mpi_reply->ExtPageLength;
1256 mpi_request.ExtPageType = mpi_reply->ExtPageType;
1257 mem.config_page_sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4;
1258 if (mem.config_page_sz > ioc->config_page_sz) {
1259 r = _config_alloc_config_dma_memory(ioc, &mem);
1260 if (r)
1261 goto out;
1262 } else {
1263 mem.config_page_dma = ioc->config_page_dma;
1264 mem.config_page = ioc->config_page;
1265 }
1266 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
1267 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
1268 mem.config_page_dma);
1269 r = _config_request(ioc, &mpi_request, mpi_reply,
1270 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
1271 if (!r)
1272 memcpy(config_page, mem.config_page,
1273 min_t(u16, mem.config_page_sz,
1274 sizeof(Mpi2ExpanderPage1_t)));
1275
1276 if (mem.config_page_sz > ioc->config_page_sz)
1277 _config_free_config_dma_memory(ioc, &mem);
1278
1279 out:
1280 mutex_unlock(&ioc->config_cmds.mutex);
1281 return r;
1282}
1283
1284/**
1285 * mpt2sas_config_get_enclosure_pg0 - obtain enclosure page 0
1286 * @ioc: per adapter object
1287 * @mpi_reply: reply mf payload returned from firmware
1288 * @config_page: contents of the config page
1289 * @form: GET_NEXT_HANDLE or HANDLE
1290 * @handle: expander handle
1291 * Context: sleep.
1292 *
1293 * Returns 0 for success, non-zero for failure.
1294 */
1295int
1296mpt2sas_config_get_enclosure_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1297 *mpi_reply, Mpi2SasEnclosurePage0_t *config_page, u32 form, u32 handle)
1298{
1299 Mpi2ConfigRequest_t mpi_request;
1300 int r;
1301 struct config_request mem;
1302
1303 mutex_lock(&ioc->config_cmds.mutex);
1304 memset(config_page, 0, sizeof(Mpi2SasEnclosurePage0_t));
1305 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1306 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1307 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
1308 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
1309 mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_ENCLOSURE;
1310 mpi_request.Header.PageNumber = 0;
1311 mpi_request.Header.PageVersion = MPI2_SASENCLOSURE0_PAGEVERSION;
1312 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
1313 r = _config_request(ioc, &mpi_request, mpi_reply,
1314 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
1315 if (r)
1316 goto out;
1317
1318 mpi_request.PageAddress = cpu_to_le32(form | handle);
1319 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
1320 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
1321 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
1322 mpi_request.Header.PageType = mpi_reply->Header.PageType;
1323 mpi_request.ExtPageLength = mpi_reply->ExtPageLength;
1324 mpi_request.ExtPageType = mpi_reply->ExtPageType;
1325 mem.config_page_sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4;
1326 if (mem.config_page_sz > ioc->config_page_sz) {
1327 r = _config_alloc_config_dma_memory(ioc, &mem);
1328 if (r)
1329 goto out;
1330 } else {
1331 mem.config_page_dma = ioc->config_page_dma;
1332 mem.config_page = ioc->config_page;
1333 }
1334 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
1335 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
1336 mem.config_page_dma);
1337 r = _config_request(ioc, &mpi_request, mpi_reply,
1338 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
1339 if (!r)
1340 memcpy(config_page, mem.config_page,
1341 min_t(u16, mem.config_page_sz,
1342 sizeof(Mpi2SasEnclosurePage0_t)));
1343
1344 if (mem.config_page_sz > ioc->config_page_sz)
1345 _config_free_config_dma_memory(ioc, &mem);
1346
1347 out:
1348 mutex_unlock(&ioc->config_cmds.mutex);
1349 return r;
1350}
1351
1352/**
1353 * mpt2sas_config_get_phy_pg0 - obtain phy page 0
1354 * @ioc: per adapter object
1355 * @mpi_reply: reply mf payload returned from firmware
1356 * @config_page: contents of the config page
1357 * @phy_number: phy number
1358 * Context: sleep.
1359 *
1360 * Returns 0 for success, non-zero for failure.
1361 */
1362int
1363mpt2sas_config_get_phy_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1364 *mpi_reply, Mpi2SasPhyPage0_t *config_page, u32 phy_number)
1365{
1366 Mpi2ConfigRequest_t mpi_request;
1367 int r;
1368 struct config_request mem;
1369
1370 mutex_lock(&ioc->config_cmds.mutex);
1371 memset(config_page, 0, sizeof(Mpi2SasPhyPage0_t));
1372 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1373 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1374 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
1375 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
1376 mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_PHY;
1377 mpi_request.Header.PageNumber = 0;
1378 mpi_request.Header.PageVersion = MPI2_SASPHY0_PAGEVERSION;
1379 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
1380 r = _config_request(ioc, &mpi_request, mpi_reply,
1381 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
1382 if (r)
1383 goto out;
1384
1385 mpi_request.PageAddress =
1386 cpu_to_le32(MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number);
1387 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
1388 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
1389 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
1390 mpi_request.Header.PageType = mpi_reply->Header.PageType;
1391 mpi_request.ExtPageLength = mpi_reply->ExtPageLength;
1392 mpi_request.ExtPageType = mpi_reply->ExtPageType;
1393 mem.config_page_sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4;
1394 if (mem.config_page_sz > ioc->config_page_sz) {
1395 r = _config_alloc_config_dma_memory(ioc, &mem);
1396 if (r)
1397 goto out;
1398 } else {
1399 mem.config_page_dma = ioc->config_page_dma;
1400 mem.config_page = ioc->config_page;
1401 }
1402 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
1403 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
1404 mem.config_page_dma);
1405 r = _config_request(ioc, &mpi_request, mpi_reply,
1406 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
1407 if (!r)
1408 memcpy(config_page, mem.config_page,
1409 min_t(u16, mem.config_page_sz,
1410 sizeof(Mpi2SasPhyPage0_t)));
1411
1412 if (mem.config_page_sz > ioc->config_page_sz)
1413 _config_free_config_dma_memory(ioc, &mem);
1414
1415 out:
1416 mutex_unlock(&ioc->config_cmds.mutex);
1417 return r;
1418}
1419
1420/**
1421 * mpt2sas_config_get_phy_pg1 - obtain phy page 1
1422 * @ioc: per adapter object
1423 * @mpi_reply: reply mf payload returned from firmware
1424 * @config_page: contents of the config page
1425 * @phy_number: phy number
1426 * Context: sleep.
1427 *
1428 * Returns 0 for success, non-zero for failure.
1429 */
1430int
1431mpt2sas_config_get_phy_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1432 *mpi_reply, Mpi2SasPhyPage1_t *config_page, u32 phy_number)
1433{
1434 Mpi2ConfigRequest_t mpi_request;
1435 int r;
1436 struct config_request mem;
1437
1438 mutex_lock(&ioc->config_cmds.mutex);
1439 memset(config_page, 0, sizeof(Mpi2SasPhyPage1_t));
1440 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1441 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1442 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
1443 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
1444 mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_PHY;
1445 mpi_request.Header.PageNumber = 1;
1446 mpi_request.Header.PageVersion = MPI2_SASPHY1_PAGEVERSION;
1447 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
1448 r = _config_request(ioc, &mpi_request, mpi_reply,
1449 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
1450 if (r)
1451 goto out;
1452
1453 mpi_request.PageAddress =
1454 cpu_to_le32(MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number);
1455 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
1456 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
1457 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
1458 mpi_request.Header.PageType = mpi_reply->Header.PageType;
1459 mpi_request.ExtPageLength = mpi_reply->ExtPageLength;
1460 mpi_request.ExtPageType = mpi_reply->ExtPageType;
1461 mem.config_page_sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4;
1462 if (mem.config_page_sz > ioc->config_page_sz) {
1463 r = _config_alloc_config_dma_memory(ioc, &mem);
1464 if (r)
1465 goto out;
1466 } else {
1467 mem.config_page_dma = ioc->config_page_dma;
1468 mem.config_page = ioc->config_page;
1469 }
1470 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
1471 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
1472 mem.config_page_dma);
1473 r = _config_request(ioc, &mpi_request, mpi_reply,
1474 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
1475 if (!r)
1476 memcpy(config_page, mem.config_page,
1477 min_t(u16, mem.config_page_sz,
1478 sizeof(Mpi2SasPhyPage1_t)));
1479
1480 if (mem.config_page_sz > ioc->config_page_sz)
1481 _config_free_config_dma_memory(ioc, &mem);
1482
1483 out:
1484 mutex_unlock(&ioc->config_cmds.mutex);
1485 return r;
1486}
1487
1488/**
1489 * mpt2sas_config_get_raid_volume_pg1 - obtain raid volume page 1
1490 * @ioc: per adapter object
1491 * @mpi_reply: reply mf payload returned from firmware
1492 * @config_page: contents of the config page
1493 * @form: GET_NEXT_HANDLE or HANDLE
1494 * @handle: volume handle
1495 * Context: sleep.
1496 *
1497 * Returns 0 for success, non-zero for failure.
1498 */
1499int
1500mpt2sas_config_get_raid_volume_pg1(struct MPT2SAS_ADAPTER *ioc,
1501 Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage1_t *config_page, u32 form,
1502 u32 handle)
1503{
1504 Mpi2ConfigRequest_t mpi_request;
1505 int r;
1506 struct config_request mem;
1507
1508 mutex_lock(&ioc->config_cmds.mutex);
1509 memset(config_page, 0, sizeof(Mpi2RaidVolPage1_t));
1510 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1511 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1512 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
1513 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME;
1514 mpi_request.Header.PageNumber = 1;
1515 mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE1_PAGEVERSION;
1516 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
1517 r = _config_request(ioc, &mpi_request, mpi_reply,
1518 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
1519 if (r)
1520 goto out;
1521
1522 mpi_request.PageAddress = cpu_to_le32(form | handle);
1523 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
1524 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
1525 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
1526 mpi_request.Header.PageType = mpi_reply->Header.PageType;
1527 mpi_request.Header.PageLength = mpi_reply->Header.PageLength;
1528 mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4;
1529 if (mem.config_page_sz > ioc->config_page_sz) {
1530 r = _config_alloc_config_dma_memory(ioc, &mem);
1531 if (r)
1532 goto out;
1533 } else {
1534 mem.config_page_dma = ioc->config_page_dma;
1535 mem.config_page = ioc->config_page;
1536 }
1537 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
1538 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
1539 mem.config_page_dma);
1540 r = _config_request(ioc, &mpi_request, mpi_reply,
1541 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
1542 if (!r)
1543 memcpy(config_page, mem.config_page,
1544 min_t(u16, mem.config_page_sz,
1545 sizeof(Mpi2RaidVolPage1_t)));
1546
1547 if (mem.config_page_sz > ioc->config_page_sz)
1548 _config_free_config_dma_memory(ioc, &mem);
1549
1550 out:
1551 mutex_unlock(&ioc->config_cmds.mutex);
1552 return r;
1553}
1554
1555/**
1556 * mpt2sas_config_get_number_pds - obtain number of phys disk assigned to volume
1557 * @ioc: per adapter object
1558 * @handle: volume handle
1559 * @num_pds: returns pds count
1560 * Context: sleep.
1561 *
1562 * Returns 0 for success, non-zero for failure.
1563 */
1564int
1565mpt2sas_config_get_number_pds(struct MPT2SAS_ADAPTER *ioc, u16 handle,
1566 u8 *num_pds)
1567{
1568 Mpi2ConfigRequest_t mpi_request;
1569 Mpi2RaidVolPage0_t *config_page;
1570 Mpi2ConfigReply_t mpi_reply;
1571 int r;
1572 struct config_request mem;
1573 u16 ioc_status;
1574
1575 mutex_lock(&ioc->config_cmds.mutex);
1576 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1577 *num_pds = 0;
1578 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1579 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
1580 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME;
1581 mpi_request.Header.PageNumber = 0;
1582 mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE0_PAGEVERSION;
1583 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
1584 r = _config_request(ioc, &mpi_request, &mpi_reply,
1585 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
1586 if (r)
1587 goto out;
1588
1589 mpi_request.PageAddress =
1590 cpu_to_le32(MPI2_RAID_VOLUME_PGAD_FORM_HANDLE | handle);
1591 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
1592 mpi_request.Header.PageVersion = mpi_reply.Header.PageVersion;
1593 mpi_request.Header.PageNumber = mpi_reply.Header.PageNumber;
1594 mpi_request.Header.PageType = mpi_reply.Header.PageType;
1595 mpi_request.Header.PageLength = mpi_reply.Header.PageLength;
1596 mem.config_page_sz = le16_to_cpu(mpi_reply.Header.PageLength) * 4;
1597 if (mem.config_page_sz > ioc->config_page_sz) {
1598 r = _config_alloc_config_dma_memory(ioc, &mem);
1599 if (r)
1600 goto out;
1601 } else {
1602 mem.config_page_dma = ioc->config_page_dma;
1603 mem.config_page = ioc->config_page;
1604 }
1605 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
1606 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
1607 mem.config_page_dma);
1608 r = _config_request(ioc, &mpi_request, &mpi_reply,
1609 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
1610 if (!r) {
1611 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1612 MPI2_IOCSTATUS_MASK;
1613 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
1614 config_page = mem.config_page;
1615 *num_pds = config_page->NumPhysDisks;
1616 }
1617 }
1618
1619 if (mem.config_page_sz > ioc->config_page_sz)
1620 _config_free_config_dma_memory(ioc, &mem);
1621
1622 out:
1623 mutex_unlock(&ioc->config_cmds.mutex);
1624 return r;
1625}
1626
1627/**
1628 * mpt2sas_config_get_raid_volume_pg0 - obtain raid volume page 0
1629 * @ioc: per adapter object
1630 * @mpi_reply: reply mf payload returned from firmware
1631 * @config_page: contents of the config page
1632 * @form: GET_NEXT_HANDLE or HANDLE
1633 * @handle: volume handle
1634 * @sz: size of buffer passed in config_page
1635 * Context: sleep.
1636 *
1637 * Returns 0 for success, non-zero for failure.
1638 */
1639int
1640mpt2sas_config_get_raid_volume_pg0(struct MPT2SAS_ADAPTER *ioc,
1641 Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage0_t *config_page, u32 form,
1642 u32 handle, u16 sz)
1643{
1644 Mpi2ConfigRequest_t mpi_request;
1645 int r;
1646 struct config_request mem;
1647
1648 mutex_lock(&ioc->config_cmds.mutex);
1649 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1650 memset(config_page, 0, sz);
1651 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1652 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
1653 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME;
1654 mpi_request.Header.PageNumber = 0;
1655 mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE0_PAGEVERSION;
1656 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
1657 r = _config_request(ioc, &mpi_request, mpi_reply,
1658 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
1659 if (r)
1660 goto out;
1661
1662 mpi_request.PageAddress = cpu_to_le32(form | handle);
1663 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
1664 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
1665 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
1666 mpi_request.Header.PageType = mpi_reply->Header.PageType;
1667 mpi_request.Header.PageLength = mpi_reply->Header.PageLength;
1668 mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4;
1669 if (mem.config_page_sz > ioc->config_page_sz) {
1670 r = _config_alloc_config_dma_memory(ioc, &mem);
1671 if (r)
1672 goto out;
1673 } else {
1674 mem.config_page_dma = ioc->config_page_dma;
1675 mem.config_page = ioc->config_page;
1676 }
1677 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
1678 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
1679 mem.config_page_dma);
1680 r = _config_request(ioc, &mpi_request, mpi_reply,
1681 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
1682 if (!r)
1683 memcpy(config_page, mem.config_page,
1684 min_t(u16, sz, mem.config_page_sz));
1685
1686 if (mem.config_page_sz > ioc->config_page_sz)
1687 _config_free_config_dma_memory(ioc, &mem);
1688
1689 out:
1690 mutex_unlock(&ioc->config_cmds.mutex);
1691 return r;
1692}
1693
1694/**
1695 * mpt2sas_config_get_phys_disk_pg0 - obtain phys disk page 0
1696 * @ioc: per adapter object
1697 * @mpi_reply: reply mf payload returned from firmware
1698 * @config_page: contents of the config page
1699 * @form: GET_NEXT_PHYSDISKNUM, PHYSDISKNUM, DEVHANDLE
1700 * @form_specific: specific to the form
1701 * Context: sleep.
1702 *
1703 * Returns 0 for success, non-zero for failure.
1704 */
1705int
1706mpt2sas_config_get_phys_disk_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1707 *mpi_reply, Mpi2RaidPhysDiskPage0_t *config_page, u32 form,
1708 u32 form_specific)
1709{
1710 Mpi2ConfigRequest_t mpi_request;
1711 int r;
1712 struct config_request mem;
1713
1714 mutex_lock(&ioc->config_cmds.mutex);
1715 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1716 memset(config_page, 0, sizeof(Mpi2RaidPhysDiskPage0_t));
1717 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1718 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
1719 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK;
1720 mpi_request.Header.PageNumber = 0;
1721 mpi_request.Header.PageVersion = MPI2_RAIDPHYSDISKPAGE0_PAGEVERSION;
1722 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
1723 r = _config_request(ioc, &mpi_request, mpi_reply,
1724 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
1725 if (r)
1726 goto out;
1727
1728 mpi_request.PageAddress = cpu_to_le32(form | form_specific);
1729 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
1730 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
1731 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
1732 mpi_request.Header.PageType = mpi_reply->Header.PageType;
1733 mpi_request.Header.PageLength = mpi_reply->Header.PageLength;
1734 mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4;
1735 if (mem.config_page_sz > ioc->config_page_sz) {
1736 r = _config_alloc_config_dma_memory(ioc, &mem);
1737 if (r)
1738 goto out;
1739 } else {
1740 mem.config_page_dma = ioc->config_page_dma;
1741 mem.config_page = ioc->config_page;
1742 }
1743 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
1744 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
1745 mem.config_page_dma);
1746 r = _config_request(ioc, &mpi_request, mpi_reply,
1747 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
1748 if (!r)
1749 memcpy(config_page, mem.config_page,
1750 min_t(u16, mem.config_page_sz,
1751 sizeof(Mpi2RaidPhysDiskPage0_t)));
1752
1753 if (mem.config_page_sz > ioc->config_page_sz)
1754 _config_free_config_dma_memory(ioc, &mem);
1755
1756 out:
1757 mutex_unlock(&ioc->config_cmds.mutex);
1758 return r;
1759}
1760
1761/**
1762 * mpt2sas_config_get_volume_handle - returns volume handle for give hidden raid components
1763 * @ioc: per adapter object
1764 * @pd_handle: phys disk handle
1765 * @volume_handle: volume handle
1766 * Context: sleep.
1767 *
1768 * Returns 0 for success, non-zero for failure.
1769 */
1770int
1771mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle,
1772 u16 *volume_handle)
1773{
1774 Mpi2RaidConfigurationPage0_t *config_page;
1775 Mpi2ConfigRequest_t mpi_request;
1776 Mpi2ConfigReply_t mpi_reply;
1777 int r, i;
1778 struct config_request mem;
1779 u16 ioc_status;
1780
1781 mutex_lock(&ioc->config_cmds.mutex);
1782 *volume_handle = 0;
1783 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1784 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1785 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
1786 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
1787 mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG;
1788 mpi_request.Header.PageVersion = MPI2_RAIDCONFIG0_PAGEVERSION;
1789 mpi_request.Header.PageNumber = 0;
1790 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
1791 r = _config_request(ioc, &mpi_request, &mpi_reply,
1792 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
1793 if (r)
1794 goto out;
1795
1796 mpi_request.PageAddress =
1797 cpu_to_le32(MPI2_RAID_PGAD_FORM_ACTIVE_CONFIG);
1798 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
1799 mpi_request.Header.PageVersion = mpi_reply.Header.PageVersion;
1800 mpi_request.Header.PageNumber = mpi_reply.Header.PageNumber;
1801 mpi_request.Header.PageType = mpi_reply.Header.PageType;
1802 mpi_request.ExtPageLength = mpi_reply.ExtPageLength;
1803 mpi_request.ExtPageType = mpi_reply.ExtPageType;
1804 mem.config_page_sz = le16_to_cpu(mpi_reply.ExtPageLength) * 4;
1805 if (mem.config_page_sz > ioc->config_page_sz) {
1806 r = _config_alloc_config_dma_memory(ioc, &mem);
1807 if (r)
1808 goto out;
1809 } else {
1810 mem.config_page_dma = ioc->config_page_dma;
1811 mem.config_page = ioc->config_page;
1812 }
1813 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
1814 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
1815 mem.config_page_dma);
1816 r = _config_request(ioc, &mpi_request, &mpi_reply,
1817 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
1818 if (r)
1819 goto out;
1820
1821 r = -1;
1822 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
1823 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
1824 goto done;
1825 config_page = mem.config_page;
1826 for (i = 0; i < config_page->NumElements; i++) {
1827 if ((config_page->ConfigElement[i].ElementFlags &
1828 MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE) !=
1829 MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT)
1830 continue;
1831 if (config_page->ConfigElement[i].PhysDiskDevHandle ==
1832 pd_handle) {
1833 *volume_handle = le16_to_cpu(config_page->
1834 ConfigElement[i].VolDevHandle);
1835 r = 0;
1836 goto done;
1837 }
1838 }
1839
1840 done:
1841 if (mem.config_page_sz > ioc->config_page_sz)
1842 _config_free_config_dma_memory(ioc, &mem);
1843
1844 out:
1845 mutex_unlock(&ioc->config_cmds.mutex);
1846 return r;
1847}
1848
1849/**
1850 * mpt2sas_config_get_volume_wwid - returns wwid given the volume handle
1851 * @ioc: per adapter object
1852 * @volume_handle: volume handle
1853 * @wwid: volume wwid
1854 * Context: sleep.
1855 *
1856 * Returns 0 for success, non-zero for failure.
1857 */
1858int
1859mpt2sas_config_get_volume_wwid(struct MPT2SAS_ADAPTER *ioc, u16 volume_handle,
1860 u64 *wwid)
1861{
1862 Mpi2ConfigReply_t mpi_reply;
1863 Mpi2RaidVolPage1_t raid_vol_pg1;
1864
1865 *wwid = 0;
1866 if (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
1867 &raid_vol_pg1, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE,
1868 volume_handle))) {
1869 *wwid = le64_to_cpu(raid_vol_pg1.WWID);
1870 return 0;
1871 } else
1872 return -1;
1873}
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
new file mode 100644
index 000000000000..2d4f85c9d7a1
--- /dev/null
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -0,0 +1,2516 @@
1/*
2 * Management Module Support for MPT (Message Passing Technology) based
3 * controllers
4 *
5 * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.c
6 * Copyright (C) 2007-2008 LSI Corporation
7 * (mailto:DL-MPTFusionLinux@lsi.com)
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * NO WARRANTY
20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 * solely responsible for determining the appropriateness of using and
25 * distributing the Program and assumes all risks associated with its
26 * exercise of rights under this Agreement, including but not limited to
27 * the risks and costs of program errors, damage to or loss of data,
28 * programs or equipment, and unavailability or interruption of operations.
29
30 * DISCLAIMER OF LIABILITY
31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38
39 * You should have received a copy of the GNU General Public License
40 * along with this program; if not, write to the Free Software
41 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
42 * USA.
43 */
44
45#include <linux/version.h>
46#include <linux/kernel.h>
47#include <linux/module.h>
48#include <linux/errno.h>
49#include <linux/init.h>
50#include <linux/slab.h>
51#include <linux/types.h>
52#include <linux/pci.h>
53#include <linux/delay.h>
54#include <linux/smp_lock.h>
55#include <linux/compat.h>
56#include <linux/poll.h>
57
58#include <linux/io.h>
59#include <linux/uaccess.h>
60
61#include "mpt2sas_base.h"
62#include "mpt2sas_ctl.h"
63
64static struct fasync_struct *async_queue;
65static DECLARE_WAIT_QUEUE_HEAD(ctl_poll_wait);
66
67/**
68 * enum block_state - blocking state
69 * @NON_BLOCKING: non blocking
70 * @BLOCKING: blocking
71 *
72 * These states are for ioctls that need to wait for a response
73 * from firmware, so they probably require sleep.
74 */
75enum block_state {
76 NON_BLOCKING,
77 BLOCKING,
78};
79
80#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
81/**
82 * _ctl_display_some_debug - debug routine
83 * @ioc: per adapter object
84 * @smid: system request message index
85 * @calling_function_name: string pass from calling function
86 * @mpi_reply: reply message frame
87 * Context: none.
88 *
89 * Function for displaying debug info helpfull when debugging issues
90 * in this module.
91 */
92static void
93_ctl_display_some_debug(struct MPT2SAS_ADAPTER *ioc, u16 smid,
94 char *calling_function_name, MPI2DefaultReply_t *mpi_reply)
95{
96 Mpi2ConfigRequest_t *mpi_request;
97 char *desc = NULL;
98
99 if (!(ioc->logging_level & MPT_DEBUG_IOCTL))
100 return;
101
102 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
103 switch (mpi_request->Function) {
104 case MPI2_FUNCTION_SCSI_IO_REQUEST:
105 {
106 Mpi2SCSIIORequest_t *scsi_request =
107 (Mpi2SCSIIORequest_t *)mpi_request;
108
109 snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
110 "scsi_io, cmd(0x%02x), cdb_len(%d)",
111 scsi_request->CDB.CDB32[0],
112 le16_to_cpu(scsi_request->IoFlags) & 0xF);
113 desc = ioc->tmp_string;
114 break;
115 }
116 case MPI2_FUNCTION_SCSI_TASK_MGMT:
117 desc = "task_mgmt";
118 break;
119 case MPI2_FUNCTION_IOC_INIT:
120 desc = "ioc_init";
121 break;
122 case MPI2_FUNCTION_IOC_FACTS:
123 desc = "ioc_facts";
124 break;
125 case MPI2_FUNCTION_CONFIG:
126 {
127 Mpi2ConfigRequest_t *config_request =
128 (Mpi2ConfigRequest_t *)mpi_request;
129
130 snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
131 "config, type(0x%02x), ext_type(0x%02x), number(%d)",
132 (config_request->Header.PageType &
133 MPI2_CONFIG_PAGETYPE_MASK), config_request->ExtPageType,
134 config_request->Header.PageNumber);
135 desc = ioc->tmp_string;
136 break;
137 }
138 case MPI2_FUNCTION_PORT_FACTS:
139 desc = "port_facts";
140 break;
141 case MPI2_FUNCTION_PORT_ENABLE:
142 desc = "port_enable";
143 break;
144 case MPI2_FUNCTION_EVENT_NOTIFICATION:
145 desc = "event_notification";
146 break;
147 case MPI2_FUNCTION_FW_DOWNLOAD:
148 desc = "fw_download";
149 break;
150 case MPI2_FUNCTION_FW_UPLOAD:
151 desc = "fw_upload";
152 break;
153 case MPI2_FUNCTION_RAID_ACTION:
154 desc = "raid_action";
155 break;
156 case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
157 {
158 Mpi2SCSIIORequest_t *scsi_request =
159 (Mpi2SCSIIORequest_t *)mpi_request;
160
161 snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
162 "raid_pass, cmd(0x%02x), cdb_len(%d)",
163 scsi_request->CDB.CDB32[0],
164 le16_to_cpu(scsi_request->IoFlags) & 0xF);
165 desc = ioc->tmp_string;
166 break;
167 }
168 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
169 desc = "sas_iounit_cntl";
170 break;
171 case MPI2_FUNCTION_SATA_PASSTHROUGH:
172 desc = "sata_pass";
173 break;
174 case MPI2_FUNCTION_DIAG_BUFFER_POST:
175 desc = "diag_buffer_post";
176 break;
177 case MPI2_FUNCTION_DIAG_RELEASE:
178 desc = "diag_release";
179 break;
180 case MPI2_FUNCTION_SMP_PASSTHROUGH:
181 desc = "smp_passthrough";
182 break;
183 }
184
185 if (!desc)
186 return;
187
188 printk(MPT2SAS_DEBUG_FMT "%s: %s, smid(%d)\n",
189 ioc->name, calling_function_name, desc, smid);
190
191 if (!mpi_reply)
192 return;
193
194 if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo)
195 printk(MPT2SAS_DEBUG_FMT
196 "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
197 ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
198 le32_to_cpu(mpi_reply->IOCLogInfo));
199
200 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
201 mpi_request->Function ==
202 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
203 Mpi2SCSIIOReply_t *scsi_reply =
204 (Mpi2SCSIIOReply_t *)mpi_reply;
205 if (scsi_reply->SCSIState || scsi_reply->SCSIStatus)
206 printk(MPT2SAS_DEBUG_FMT
207 "\tscsi_state(0x%02x), scsi_status"
208 "(0x%02x)\n", ioc->name,
209 scsi_reply->SCSIState,
210 scsi_reply->SCSIStatus);
211 }
212}
213#endif
214
215/**
216 * mpt2sas_ctl_done - ctl module completion routine
217 * @ioc: per adapter object
218 * @smid: system request message index
219 * @VF_ID: virtual function id
220 * @reply: reply message frame(lower 32bit addr)
221 * Context: none.
222 *
223 * The callback handler when using ioc->ctl_cb_idx.
224 *
225 * Return nothing.
226 */
227void
228mpt2sas_ctl_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
229{
230 MPI2DefaultReply_t *mpi_reply;
231
232 if (ioc->ctl_cmds.status == MPT2_CMD_NOT_USED)
233 return;
234 if (ioc->ctl_cmds.smid != smid)
235 return;
236 ioc->ctl_cmds.status |= MPT2_CMD_COMPLETE;
237 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
238 if (mpi_reply) {
239 memcpy(ioc->ctl_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
240 ioc->ctl_cmds.status |= MPT2_CMD_REPLY_VALID;
241 }
242#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
243 _ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply);
244#endif
245 ioc->ctl_cmds.status &= ~MPT2_CMD_PENDING;
246 complete(&ioc->ctl_cmds.done);
247}
248
249/**
250 * _ctl_check_event_type - determines when an event needs logging
251 * @ioc: per adapter object
252 * @event: firmware event
253 *
254 * The bitmask in ioc->event_type[] indicates which events should be
255 * be saved in the driver event_log. This bitmask is set by application.
256 *
257 * Returns 1 when event should be captured, or zero means no match.
258 */
259static int
260_ctl_check_event_type(struct MPT2SAS_ADAPTER *ioc, u16 event)
261{
262 u16 i;
263 u32 desired_event;
264
265 if (event >= 128 || !event || !ioc->event_log)
266 return 0;
267
268 desired_event = (1 << (event % 32));
269 if (!desired_event)
270 desired_event = 1;
271 i = event / 32;
272 return desired_event & ioc->event_type[i];
273}
274
275/**
276 * mpt2sas_ctl_add_to_event_log - add event
277 * @ioc: per adapter object
278 * @mpi_reply: reply message frame
279 *
280 * Return nothing.
281 */
282void
283mpt2sas_ctl_add_to_event_log(struct MPT2SAS_ADAPTER *ioc,
284 Mpi2EventNotificationReply_t *mpi_reply)
285{
286 struct MPT2_IOCTL_EVENTS *event_log;
287 u16 event;
288 int i;
289 u32 sz, event_data_sz;
290 u8 send_aen = 0;
291
292 if (!ioc->event_log)
293 return;
294
295 event = le16_to_cpu(mpi_reply->Event);
296
297 if (_ctl_check_event_type(ioc, event)) {
298
299 /* insert entry into circular event_log */
300 i = ioc->event_context % MPT2SAS_CTL_EVENT_LOG_SIZE;
301 event_log = ioc->event_log;
302 event_log[i].event = event;
303 event_log[i].context = ioc->event_context++;
304
305 event_data_sz = le16_to_cpu(mpi_reply->EventDataLength)*4;
306 sz = min_t(u32, event_data_sz, MPT2_EVENT_DATA_SIZE);
307 memset(event_log[i].data, 0, MPT2_EVENT_DATA_SIZE);
308 memcpy(event_log[i].data, mpi_reply->EventData, sz);
309 send_aen = 1;
310 }
311
312 /* This aen_event_read_flag flag is set until the
313 * application has read the event log.
314 * For MPI2_EVENT_LOG_ENTRY_ADDED, we always notify.
315 */
316 if (event == MPI2_EVENT_LOG_ENTRY_ADDED ||
317 (send_aen && !ioc->aen_event_read_flag)) {
318 ioc->aen_event_read_flag = 1;
319 wake_up_interruptible(&ctl_poll_wait);
320 if (async_queue)
321 kill_fasync(&async_queue, SIGIO, POLL_IN);
322 }
323}
324
325/**
326 * mpt2sas_ctl_event_callback - firmware event handler (called at ISR time)
327 * @ioc: per adapter object
328 * @VF_ID: virtual function id
329 * @reply: reply message frame(lower 32bit addr)
330 * Context: interrupt.
331 *
332 * This function merely adds a new work task into ioc->firmware_event_thread.
333 * The tasks are worked from _firmware_event_work in user context.
334 *
335 * Return nothing.
336 */
337void
338mpt2sas_ctl_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, u32 reply)
339{
340 Mpi2EventNotificationReply_t *mpi_reply;
341
342 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
343 mpt2sas_ctl_add_to_event_log(ioc, mpi_reply);
344}
345
346/**
347 * _ctl_verify_adapter - validates ioc_number passed from application
348 * @ioc: per adapter object
349 * @iocpp: The ioc pointer is returned in this.
350 *
351 * Return (-1) means error, else ioc_number.
352 */
353static int
354_ctl_verify_adapter(int ioc_number, struct MPT2SAS_ADAPTER **iocpp)
355{
356 struct MPT2SAS_ADAPTER *ioc;
357
358 list_for_each_entry(ioc, &mpt2sas_ioc_list, list) {
359 if (ioc->id != ioc_number)
360 continue;
361 *iocpp = ioc;
362 return ioc_number;
363 }
364 *iocpp = NULL;
365 return -1;
366}
367
368/**
369 * mpt2sas_ctl_reset_handler - reset callback handler (for ctl)
370 * @ioc: per adapter object
371 * @reset_phase: phase
372 *
373 * The handler for doing any required cleanup or initialization.
374 *
375 * The reset phase can be MPT2_IOC_PRE_RESET, MPT2_IOC_AFTER_RESET,
376 * MPT2_IOC_DONE_RESET
377 */
378void
379mpt2sas_ctl_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
380{
381 switch (reset_phase) {
382 case MPT2_IOC_PRE_RESET:
383 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
384 "MPT2_IOC_PRE_RESET\n", ioc->name, __func__));
385 break;
386 case MPT2_IOC_AFTER_RESET:
387 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
388 "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__));
389 if (ioc->ctl_cmds.status & MPT2_CMD_PENDING) {
390 ioc->ctl_cmds.status |= MPT2_CMD_RESET;
391 mpt2sas_base_free_smid(ioc, ioc->ctl_cmds.smid);
392 complete(&ioc->ctl_cmds.done);
393 }
394 break;
395 case MPT2_IOC_DONE_RESET:
396 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
397 "MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
398 break;
399 }
400}
401
402/**
403 * _ctl_fasync -
404 * @fd -
405 * @filep -
406 * @mode -
407 *
408 * Called when application request fasyn callback handler.
409 */
410static int
411_ctl_fasync(int fd, struct file *filep, int mode)
412{
413 return fasync_helper(fd, filep, mode, &async_queue);
414}
415
416/**
417 * _ctl_release -
418 * @inode -
419 * @filep -
420 *
421 * Called when application releases the fasyn callback handler.
422 */
423static int
424_ctl_release(struct inode *inode, struct file *filep)
425{
426 return fasync_helper(-1, filep, 0, &async_queue);
427}
428
429/**
430 * _ctl_poll -
431 * @file -
432 * @wait -
433 *
434 */
435static unsigned int
436_ctl_poll(struct file *filep, poll_table *wait)
437{
438 struct MPT2SAS_ADAPTER *ioc;
439
440 poll_wait(filep, &ctl_poll_wait, wait);
441
442 list_for_each_entry(ioc, &mpt2sas_ioc_list, list) {
443 if (ioc->aen_event_read_flag)
444 return POLLIN | POLLRDNORM;
445 }
446 return 0;
447}
448
449/**
450 * _ctl_do_task_abort - assign an active smid to the abort_task
451 * @ioc: per adapter object
452 * @karg - (struct mpt2_ioctl_command)
453 * @tm_request - pointer to mf from user space
454 *
455 * Returns 0 when an smid if found, else fail.
456 * during failure, the reply frame is filled.
457 */
458static int
459_ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
460 Mpi2SCSITaskManagementRequest_t *tm_request)
461{
462 u8 found = 0;
463 u16 i;
464 u16 handle;
465 struct scsi_cmnd *scmd;
466 struct MPT2SAS_DEVICE *priv_data;
467 unsigned long flags;
468 Mpi2SCSITaskManagementReply_t *tm_reply;
469 u32 sz;
470 u32 lun;
471
472 lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN);
473
474 handle = le16_to_cpu(tm_request->DevHandle);
475 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
476 for (i = ioc->request_depth; i && !found; i--) {
477 scmd = ioc->scsi_lookup[i - 1].scmd;
478 if (scmd == NULL || scmd->device == NULL ||
479 scmd->device->hostdata == NULL)
480 continue;
481 if (lun != scmd->device->lun)
482 continue;
483 priv_data = scmd->device->hostdata;
484 if (priv_data->sas_target == NULL)
485 continue;
486 if (priv_data->sas_target->handle != handle)
487 continue;
488 tm_request->TaskMID = cpu_to_le16(ioc->scsi_lookup[i - 1].smid);
489 found = 1;
490 }
491 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
492
493 if (!found) {
494 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "ABORT_TASK: "
495 "DevHandle(0x%04x), lun(%d), no active mid!!\n", ioc->name,
496 tm_request->DevHandle, lun));
497 tm_reply = ioc->ctl_cmds.reply;
498 tm_reply->DevHandle = tm_request->DevHandle;
499 tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
500 tm_reply->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
501 tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4;
502 tm_reply->VP_ID = tm_request->VP_ID;
503 tm_reply->VF_ID = tm_request->VF_ID;
504 sz = min_t(u32, karg->max_reply_bytes, ioc->reply_sz);
505 if (copy_to_user(karg->reply_frame_buf_ptr, ioc->ctl_cmds.reply,
506 sz))
507 printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__,
508 __LINE__, __func__);
509 return 1;
510 }
511
512 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "ABORT_TASK: "
513 "DevHandle(0x%04x), lun(%d), smid(%d)\n", ioc->name,
514 tm_request->DevHandle, lun, tm_request->TaskMID));
515 return 0;
516}
517
518/**
519 * _ctl_do_mpt_command - main handler for MPT2COMMAND opcode
520 * @ioc: per adapter object
521 * @karg - (struct mpt2_ioctl_command)
522 * @mf - pointer to mf in user space
523 * @state - NON_BLOCKING or BLOCKING
524 */
525static long
526_ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
527 struct mpt2_ioctl_command karg, void __user *mf, enum block_state state)
528{
529 MPI2RequestHeader_t *mpi_request;
530 MPI2DefaultReply_t *mpi_reply;
531 u32 ioc_state;
532 u16 ioc_status;
533 u16 smid;
534 unsigned long timeout, timeleft;
535 u8 issue_reset;
536 u32 sz;
537 void *psge;
538 void *priv_sense = NULL;
539 void *data_out = NULL;
540 dma_addr_t data_out_dma;
541 size_t data_out_sz = 0;
542 void *data_in = NULL;
543 dma_addr_t data_in_dma;
544 size_t data_in_sz = 0;
545 u32 sgl_flags;
546 long ret;
547 u16 wait_state_count;
548
549 issue_reset = 0;
550
551 if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex))
552 return -EAGAIN;
553 else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex))
554 return -ERESTARTSYS;
555
556 if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) {
557 printk(MPT2SAS_ERR_FMT "%s: ctl_cmd in use\n",
558 ioc->name, __func__);
559 ret = -EAGAIN;
560 goto out;
561 }
562
563 wait_state_count = 0;
564 ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
565 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
566 if (wait_state_count++ == 10) {
567 printk(MPT2SAS_ERR_FMT
568 "%s: failed due to ioc not operational\n",
569 ioc->name, __func__);
570 ret = -EFAULT;
571 goto out;
572 }
573 ssleep(1);
574 ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
575 printk(MPT2SAS_INFO_FMT "%s: waiting for "
576 "operational state(count=%d)\n", ioc->name,
577 __func__, wait_state_count);
578 }
579 if (wait_state_count)
580 printk(MPT2SAS_INFO_FMT "%s: ioc is operational\n",
581 ioc->name, __func__);
582
583 smid = mpt2sas_base_get_smid(ioc, ioc->ctl_cb_idx);
584 if (!smid) {
585 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
586 ioc->name, __func__);
587 ret = -EAGAIN;
588 goto out;
589 }
590
591 ret = 0;
592 ioc->ctl_cmds.status = MPT2_CMD_PENDING;
593 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
594 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
595 ioc->ctl_cmds.smid = smid;
596 data_out_sz = karg.data_out_size;
597 data_in_sz = karg.data_in_size;
598
599 /* copy in request message frame from user */
600 if (copy_from_user(mpi_request, mf, karg.data_sge_offset*4)) {
601 printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__,
602 __func__);
603 ret = -EFAULT;
604 mpt2sas_base_free_smid(ioc, smid);
605 goto out;
606 }
607
608 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
609 mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
610 if (!mpi_request->FunctionDependent1 ||
611 mpi_request->FunctionDependent1 >
612 cpu_to_le16(ioc->facts.MaxDevHandle)) {
613 ret = -EINVAL;
614 mpt2sas_base_free_smid(ioc, smid);
615 goto out;
616 }
617 }
618
619 /* obtain dma-able memory for data transfer */
620 if (data_out_sz) /* WRITE */ {
621 data_out = pci_alloc_consistent(ioc->pdev, data_out_sz,
622 &data_out_dma);
623 if (!data_out) {
624 printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__,
625 __LINE__, __func__);
626 ret = -ENOMEM;
627 mpt2sas_base_free_smid(ioc, smid);
628 goto out;
629 }
630 if (copy_from_user(data_out, karg.data_out_buf_ptr,
631 data_out_sz)) {
632 printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__,
633 __LINE__, __func__);
634 ret = -EFAULT;
635 mpt2sas_base_free_smid(ioc, smid);
636 goto out;
637 }
638 }
639
640 if (data_in_sz) /* READ */ {
641 data_in = pci_alloc_consistent(ioc->pdev, data_in_sz,
642 &data_in_dma);
643 if (!data_in) {
644 printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__,
645 __LINE__, __func__);
646 ret = -ENOMEM;
647 mpt2sas_base_free_smid(ioc, smid);
648 goto out;
649 }
650 }
651
652 /* add scatter gather elements */
653 psge = (void *)mpi_request + (karg.data_sge_offset*4);
654
655 if (!data_out_sz && !data_in_sz) {
656 mpt2sas_base_build_zero_len_sge(ioc, psge);
657 } else if (data_out_sz && data_in_sz) {
658 /* WRITE sgel first */
659 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
660 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
661 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
662 ioc->base_add_sg_single(psge, sgl_flags |
663 data_out_sz, data_out_dma);
664
665 /* incr sgel */
666 psge += ioc->sge_size;
667
668 /* READ sgel last */
669 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
670 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
671 MPI2_SGE_FLAGS_END_OF_LIST);
672 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
673 ioc->base_add_sg_single(psge, sgl_flags |
674 data_in_sz, data_in_dma);
675 } else if (data_out_sz) /* WRITE */ {
676 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
677 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
678 MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC);
679 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
680 ioc->base_add_sg_single(psge, sgl_flags |
681 data_out_sz, data_out_dma);
682 } else if (data_in_sz) /* READ */ {
683 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
684 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
685 MPI2_SGE_FLAGS_END_OF_LIST);
686 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
687 ioc->base_add_sg_single(psge, sgl_flags |
688 data_in_sz, data_in_dma);
689 }
690
691 /* send command to firmware */
692#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
693 _ctl_display_some_debug(ioc, smid, "ctl_request", NULL);
694#endif
695
696 switch (mpi_request->Function) {
697 case MPI2_FUNCTION_SCSI_IO_REQUEST:
698 case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
699 {
700 Mpi2SCSIIORequest_t *scsiio_request =
701 (Mpi2SCSIIORequest_t *)mpi_request;
702 scsiio_request->SenseBufferLowAddress =
703 (u32)mpt2sas_base_get_sense_buffer_dma(ioc, smid);
704 priv_sense = mpt2sas_base_get_sense_buffer(ioc, smid);
705 memset(priv_sense, 0, SCSI_SENSE_BUFFERSIZE);
706 mpt2sas_base_put_smid_scsi_io(ioc, smid, 0,
707 le16_to_cpu(mpi_request->FunctionDependent1));
708 break;
709 }
710 case MPI2_FUNCTION_SCSI_TASK_MGMT:
711 {
712 Mpi2SCSITaskManagementRequest_t *tm_request =
713 (Mpi2SCSITaskManagementRequest_t *)mpi_request;
714
715 if (tm_request->TaskType ==
716 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
717 if (_ctl_do_task_abort(ioc, &karg, tm_request))
718 goto out;
719 }
720
721 mutex_lock(&ioc->tm_cmds.mutex);
722 mpt2sas_scsih_set_tm_flag(ioc, le16_to_cpu(
723 tm_request->DevHandle));
724 mpt2sas_base_put_smid_hi_priority(ioc, smid,
725 mpi_request->VF_ID);
726 break;
727 }
728 case MPI2_FUNCTION_SMP_PASSTHROUGH:
729 {
730 Mpi2SmpPassthroughRequest_t *smp_request =
731 (Mpi2SmpPassthroughRequest_t *)mpi_request;
732 u8 *data;
733
734 /* ioc determines which port to use */
735 smp_request->PhysicalPort = 0xFF;
736 if (smp_request->PassthroughFlags &
737 MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE)
738 data = (u8 *)&smp_request->SGL;
739 else
740 data = data_out;
741
742 if (data[1] == 0x91 && (data[10] == 1 || data[10] == 2)) {
743 ioc->ioc_link_reset_in_progress = 1;
744 ioc->ignore_loginfos = 1;
745 }
746 mpt2sas_base_put_smid_default(ioc, smid, mpi_request->VF_ID);
747 break;
748 }
749 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
750 {
751 Mpi2SasIoUnitControlRequest_t *sasiounit_request =
752 (Mpi2SasIoUnitControlRequest_t *)mpi_request;
753
754 if (sasiounit_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET
755 || sasiounit_request->Operation ==
756 MPI2_SAS_OP_PHY_LINK_RESET) {
757 ioc->ioc_link_reset_in_progress = 1;
758 ioc->ignore_loginfos = 1;
759 }
760 mpt2sas_base_put_smid_default(ioc, smid, mpi_request->VF_ID);
761 break;
762 }
763 default:
764 mpt2sas_base_put_smid_default(ioc, smid, mpi_request->VF_ID);
765 break;
766 }
767
768 if (karg.timeout < MPT2_IOCTL_DEFAULT_TIMEOUT)
769 timeout = MPT2_IOCTL_DEFAULT_TIMEOUT;
770 else
771 timeout = karg.timeout;
772 timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
773 timeout*HZ);
774 if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
775 Mpi2SCSITaskManagementRequest_t *tm_request =
776 (Mpi2SCSITaskManagementRequest_t *)mpi_request;
777 mutex_unlock(&ioc->tm_cmds.mutex);
778 mpt2sas_scsih_clear_tm_flag(ioc, le16_to_cpu(
779 tm_request->DevHandle));
780 } else if ((mpi_request->Function == MPI2_FUNCTION_SMP_PASSTHROUGH ||
781 mpi_request->Function == MPI2_FUNCTION_SAS_IO_UNIT_CONTROL) &&
782 ioc->ioc_link_reset_in_progress) {
783 ioc->ioc_link_reset_in_progress = 0;
784 ioc->ignore_loginfos = 0;
785 }
786 if (!(ioc->ctl_cmds.status & MPT2_CMD_COMPLETE)) {
787 printk(MPT2SAS_ERR_FMT "%s: timeout\n", ioc->name,
788 __func__);
789 _debug_dump_mf(mpi_request, karg.data_sge_offset);
790 if (!(ioc->ctl_cmds.status & MPT2_CMD_RESET))
791 issue_reset = 1;
792 goto issue_host_reset;
793 }
794
795 mpi_reply = ioc->ctl_cmds.reply;
796 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
797
798#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
799 if (mpi_reply->Function == MPI2_FUNCTION_SCSI_TASK_MGMT &&
800 (ioc->logging_level & MPT_DEBUG_TM)) {
801 Mpi2SCSITaskManagementReply_t *tm_reply =
802 (Mpi2SCSITaskManagementReply_t *)mpi_reply;
803
804 printk(MPT2SAS_DEBUG_FMT "TASK_MGMT: "
805 "IOCStatus(0x%04x), IOCLogInfo(0x%08x), "
806 "TerminationCount(0x%08x)\n", ioc->name,
807 tm_reply->IOCStatus, tm_reply->IOCLogInfo,
808 tm_reply->TerminationCount);
809 }
810#endif
811 /* copy out xdata to user */
812 if (data_in_sz) {
813 if (copy_to_user(karg.data_in_buf_ptr, data_in,
814 data_in_sz)) {
815 printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__,
816 __LINE__, __func__);
817 ret = -ENODATA;
818 goto out;
819 }
820 }
821
822 /* copy out reply message frame to user */
823 if (karg.max_reply_bytes) {
824 sz = min_t(u32, karg.max_reply_bytes, ioc->reply_sz);
825 if (copy_to_user(karg.reply_frame_buf_ptr, ioc->ctl_cmds.reply,
826 sz)) {
827 printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__,
828 __LINE__, __func__);
829 ret = -ENODATA;
830 goto out;
831 }
832 }
833
834 /* copy out sense to user */
835 if (karg.max_sense_bytes && (mpi_request->Function ==
836 MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function ==
837 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
838 sz = min_t(u32, karg.max_sense_bytes, SCSI_SENSE_BUFFERSIZE);
839 if (copy_to_user(karg.sense_data_ptr, priv_sense, sz)) {
840 printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__,
841 __LINE__, __func__);
842 ret = -ENODATA;
843 goto out;
844 }
845 }
846
847 issue_host_reset:
848 if (issue_reset) {
849 if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
850 mpi_request->Function ==
851 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
852 printk(MPT2SAS_INFO_FMT "issue target reset: handle "
853 "= (0x%04x)\n", ioc->name,
854 mpi_request->FunctionDependent1);
855 mutex_lock(&ioc->tm_cmds.mutex);
856 mpt2sas_scsih_issue_tm(ioc,
857 mpi_request->FunctionDependent1, 0,
858 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 10);
859 ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
860 mutex_unlock(&ioc->tm_cmds.mutex);
861 } else
862 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
863 FORCE_BIG_HAMMER);
864 }
865
866 out:
867
868 /* free memory associated with sg buffers */
869 if (data_in)
870 pci_free_consistent(ioc->pdev, data_in_sz, data_in,
871 data_in_dma);
872
873 if (data_out)
874 pci_free_consistent(ioc->pdev, data_out_sz, data_out,
875 data_out_dma);
876
877 ioc->ctl_cmds.status = MPT2_CMD_NOT_USED;
878 mutex_unlock(&ioc->ctl_cmds.mutex);
879 return ret;
880}
881
882/**
883 * _ctl_getiocinfo - main handler for MPT2IOCINFO opcode
884 * @arg - user space buffer containing ioctl content
885 */
886static long
887_ctl_getiocinfo(void __user *arg)
888{
889 struct mpt2_ioctl_iocinfo karg;
890 struct MPT2SAS_ADAPTER *ioc;
891 u8 revision;
892
893 if (copy_from_user(&karg, arg, sizeof(karg))) {
894 printk(KERN_ERR "failure at %s:%d/%s()!\n",
895 __FILE__, __LINE__, __func__);
896 return -EFAULT;
897 }
898 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
899 return -ENODEV;
900
901 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: enter\n", ioc->name,
902 __func__));
903
904 memset(&karg, 0 , sizeof(karg));
905 karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2;
906 if (ioc->pfacts)
907 karg.port_number = ioc->pfacts[0].PortNumber;
908 pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision);
909 karg.hw_rev = revision;
910 karg.pci_id = ioc->pdev->device;
911 karg.subsystem_device = ioc->pdev->subsystem_device;
912 karg.subsystem_vendor = ioc->pdev->subsystem_vendor;
913 karg.pci_information.u.bits.bus = ioc->pdev->bus->number;
914 karg.pci_information.u.bits.device = PCI_SLOT(ioc->pdev->devfn);
915 karg.pci_information.u.bits.function = PCI_FUNC(ioc->pdev->devfn);
916 karg.pci_information.segment_id = pci_domain_nr(ioc->pdev->bus);
917 karg.firmware_version = ioc->facts.FWVersion.Word;
918 strncpy(karg.driver_version, MPT2SAS_DRIVER_VERSION,
919 MPT2_IOCTL_VERSION_LENGTH);
920 karg.driver_version[MPT2_IOCTL_VERSION_LENGTH - 1] = '\0';
921 karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
922
923 if (copy_to_user(arg, &karg, sizeof(karg))) {
924 printk(KERN_ERR "failure at %s:%d/%s()!\n",
925 __FILE__, __LINE__, __func__);
926 return -EFAULT;
927 }
928 return 0;
929}
930
931/**
932 * _ctl_eventquery - main handler for MPT2EVENTQUERY opcode
933 * @arg - user space buffer containing ioctl content
934 */
935static long
936_ctl_eventquery(void __user *arg)
937{
938 struct mpt2_ioctl_eventquery karg;
939 struct MPT2SAS_ADAPTER *ioc;
940
941 if (copy_from_user(&karg, arg, sizeof(karg))) {
942 printk(KERN_ERR "failure at %s:%d/%s()!\n",
943 __FILE__, __LINE__, __func__);
944 return -EFAULT;
945 }
946 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
947 return -ENODEV;
948
949 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: enter\n", ioc->name,
950 __func__));
951
952 karg.event_entries = MPT2SAS_CTL_EVENT_LOG_SIZE;
953 memcpy(karg.event_types, ioc->event_type,
954 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
955
956 if (copy_to_user(arg, &karg, sizeof(karg))) {
957 printk(KERN_ERR "failure at %s:%d/%s()!\n",
958 __FILE__, __LINE__, __func__);
959 return -EFAULT;
960 }
961 return 0;
962}
963
964/**
965 * _ctl_eventenable - main handler for MPT2EVENTENABLE opcode
966 * @arg - user space buffer containing ioctl content
967 */
968static long
969_ctl_eventenable(void __user *arg)
970{
971 struct mpt2_ioctl_eventenable karg;
972 struct MPT2SAS_ADAPTER *ioc;
973
974 if (copy_from_user(&karg, arg, sizeof(karg))) {
975 printk(KERN_ERR "failure at %s:%d/%s()!\n",
976 __FILE__, __LINE__, __func__);
977 return -EFAULT;
978 }
979 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
980 return -ENODEV;
981
982 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: enter\n", ioc->name,
983 __func__));
984
985 if (ioc->event_log)
986 return 0;
987 memcpy(ioc->event_type, karg.event_types,
988 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
989 mpt2sas_base_validate_event_type(ioc, ioc->event_type);
990
991 /* initialize event_log */
992 ioc->event_context = 0;
993 ioc->aen_event_read_flag = 0;
994 ioc->event_log = kcalloc(MPT2SAS_CTL_EVENT_LOG_SIZE,
995 sizeof(struct MPT2_IOCTL_EVENTS), GFP_KERNEL);
996 if (!ioc->event_log) {
997 printk(KERN_ERR "failure at %s:%d/%s()!\n",
998 __FILE__, __LINE__, __func__);
999 return -ENOMEM;
1000 }
1001 return 0;
1002}
1003
1004/**
1005 * _ctl_eventreport - main handler for MPT2EVENTREPORT opcode
1006 * @arg - user space buffer containing ioctl content
1007 */
1008static long
1009_ctl_eventreport(void __user *arg)
1010{
1011 struct mpt2_ioctl_eventreport karg;
1012 struct MPT2SAS_ADAPTER *ioc;
1013 u32 number_bytes, max_events, max;
1014 struct mpt2_ioctl_eventreport __user *uarg = arg;
1015
1016 if (copy_from_user(&karg, arg, sizeof(karg))) {
1017 printk(KERN_ERR "failure at %s:%d/%s()!\n",
1018 __FILE__, __LINE__, __func__);
1019 return -EFAULT;
1020 }
1021 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1022 return -ENODEV;
1023
1024 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: enter\n", ioc->name,
1025 __func__));
1026
1027 number_bytes = karg.hdr.max_data_size -
1028 sizeof(struct mpt2_ioctl_header);
1029 max_events = number_bytes/sizeof(struct MPT2_IOCTL_EVENTS);
1030 max = min_t(u32, MPT2SAS_CTL_EVENT_LOG_SIZE, max_events);
1031
1032 /* If fewer than 1 event is requested, there must have
1033 * been some type of error.
1034 */
1035 if (!max || !ioc->event_log)
1036 return -ENODATA;
1037
1038 number_bytes = max * sizeof(struct MPT2_IOCTL_EVENTS);
1039 if (copy_to_user(uarg->event_data, ioc->event_log, number_bytes)) {
1040 printk(KERN_ERR "failure at %s:%d/%s()!\n",
1041 __FILE__, __LINE__, __func__);
1042 return -EFAULT;
1043 }
1044
1045 /* reset flag so SIGIO can restart */
1046 ioc->aen_event_read_flag = 0;
1047 return 0;
1048}
1049
1050/**
1051 * _ctl_do_reset - main handler for MPT2HARDRESET opcode
1052 * @arg - user space buffer containing ioctl content
1053 */
1054static long
1055_ctl_do_reset(void __user *arg)
1056{
1057 struct mpt2_ioctl_diag_reset karg;
1058 struct MPT2SAS_ADAPTER *ioc;
1059 int retval;
1060
1061 if (copy_from_user(&karg, arg, sizeof(karg))) {
1062 printk(KERN_ERR "failure at %s:%d/%s()!\n",
1063 __FILE__, __LINE__, __func__);
1064 return -EFAULT;
1065 }
1066 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1067 return -ENODEV;
1068
1069 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: enter\n", ioc->name,
1070 __func__));
1071
1072 retval = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
1073 FORCE_BIG_HAMMER);
1074 printk(MPT2SAS_INFO_FMT "host reset: %s\n",
1075 ioc->name, ((!retval) ? "SUCCESS" : "FAILED"));
1076 return 0;
1077}
1078
1079/**
1080 * _ctl_btdh_search_sas_device - searching for sas device
1081 * @ioc: per adapter object
1082 * @btdh: btdh ioctl payload
1083 */
1084static int
1085_ctl_btdh_search_sas_device(struct MPT2SAS_ADAPTER *ioc,
1086 struct mpt2_ioctl_btdh_mapping *btdh)
1087{
1088 struct _sas_device *sas_device;
1089 unsigned long flags;
1090 int rc = 0;
1091
1092 if (list_empty(&ioc->sas_device_list))
1093 return rc;
1094
1095 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1096 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
1097 if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
1098 btdh->handle == sas_device->handle) {
1099 btdh->bus = sas_device->channel;
1100 btdh->id = sas_device->id;
1101 rc = 1;
1102 goto out;
1103 } else if (btdh->bus == sas_device->channel && btdh->id ==
1104 sas_device->id && btdh->handle == 0xFFFF) {
1105 btdh->handle = sas_device->handle;
1106 rc = 1;
1107 goto out;
1108 }
1109 }
1110 out:
1111 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1112 return rc;
1113}
1114
1115/**
1116 * _ctl_btdh_search_raid_device - searching for raid device
1117 * @ioc: per adapter object
1118 * @btdh: btdh ioctl payload
1119 */
1120static int
1121_ctl_btdh_search_raid_device(struct MPT2SAS_ADAPTER *ioc,
1122 struct mpt2_ioctl_btdh_mapping *btdh)
1123{
1124 struct _raid_device *raid_device;
1125 unsigned long flags;
1126 int rc = 0;
1127
1128 if (list_empty(&ioc->raid_device_list))
1129 return rc;
1130
1131 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1132 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1133 if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
1134 btdh->handle == raid_device->handle) {
1135 btdh->bus = raid_device->channel;
1136 btdh->id = raid_device->id;
1137 rc = 1;
1138 goto out;
1139 } else if (btdh->bus == raid_device->channel && btdh->id ==
1140 raid_device->id && btdh->handle == 0xFFFF) {
1141 btdh->handle = raid_device->handle;
1142 rc = 1;
1143 goto out;
1144 }
1145 }
1146 out:
1147 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1148 return rc;
1149}
1150
1151/**
1152 * _ctl_btdh_mapping - main handler for MPT2BTDHMAPPING opcode
1153 * @arg - user space buffer containing ioctl content
1154 */
1155static long
1156_ctl_btdh_mapping(void __user *arg)
1157{
1158 struct mpt2_ioctl_btdh_mapping karg;
1159 struct MPT2SAS_ADAPTER *ioc;
1160 int rc;
1161
1162 if (copy_from_user(&karg, arg, sizeof(karg))) {
1163 printk(KERN_ERR "failure at %s:%d/%s()!\n",
1164 __FILE__, __LINE__, __func__);
1165 return -EFAULT;
1166 }
1167 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1168 return -ENODEV;
1169
1170 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
1171 __func__));
1172
1173 rc = _ctl_btdh_search_sas_device(ioc, &karg);
1174 if (!rc)
1175 _ctl_btdh_search_raid_device(ioc, &karg);
1176
1177 if (copy_to_user(arg, &karg, sizeof(karg))) {
1178 printk(KERN_ERR "failure at %s:%d/%s()!\n",
1179 __FILE__, __LINE__, __func__);
1180 return -EFAULT;
1181 }
1182 return 0;
1183}
1184
1185/**
1186 * _ctl_diag_capability - return diag buffer capability
1187 * @ioc: per adapter object
1188 * @buffer_type: specifies either TRACE or SNAPSHOT
1189 *
1190 * returns 1 when diag buffer support is enabled in firmware
1191 */
1192static u8
1193_ctl_diag_capability(struct MPT2SAS_ADAPTER *ioc, u8 buffer_type)
1194{
1195 u8 rc = 0;
1196
1197 switch (buffer_type) {
1198 case MPI2_DIAG_BUF_TYPE_TRACE:
1199 if (ioc->facts.IOCCapabilities &
1200 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER)
1201 rc = 1;
1202 break;
1203 case MPI2_DIAG_BUF_TYPE_SNAPSHOT:
1204 if (ioc->facts.IOCCapabilities &
1205 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER)
1206 rc = 1;
1207 break;
1208 }
1209
1210 return rc;
1211}
1212
1213/**
1214 * _ctl_diag_register - application register with driver
1215 * @arg - user space buffer containing ioctl content
1216 * @state - NON_BLOCKING or BLOCKING
1217 *
1218 * This will allow the driver to setup any required buffers that will be
1219 * needed by firmware to communicate with the driver.
1220 */
1221static long
1222_ctl_diag_register(void __user *arg, enum block_state state)
1223{
1224 struct mpt2_diag_register karg;
1225 struct MPT2SAS_ADAPTER *ioc;
1226 int rc, i;
1227 void *request_data = NULL;
1228 dma_addr_t request_data_dma;
1229 u32 request_data_sz = 0;
1230 Mpi2DiagBufferPostRequest_t *mpi_request;
1231 Mpi2DiagBufferPostReply_t *mpi_reply;
1232 u8 buffer_type;
1233 unsigned long timeleft;
1234 u16 smid;
1235 u16 ioc_status;
1236 u8 issue_reset = 0;
1237
1238 if (copy_from_user(&karg, arg, sizeof(karg))) {
1239 printk(KERN_ERR "failure at %s:%d/%s()!\n",
1240 __FILE__, __LINE__, __func__);
1241 return -EFAULT;
1242 }
1243 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1244 return -ENODEV;
1245
1246 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
1247 __func__));
1248
1249 buffer_type = karg.buffer_type;
1250 if (!_ctl_diag_capability(ioc, buffer_type)) {
1251 printk(MPT2SAS_ERR_FMT "%s: doesn't have capability for "
1252 "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type);
1253 return -EPERM;
1254 }
1255
1256 if (ioc->diag_buffer_status[buffer_type] &
1257 MPT2_DIAG_BUFFER_IS_REGISTERED) {
1258 printk(MPT2SAS_ERR_FMT "%s: already has a registered "
1259 "buffer for buffer_type(0x%02x)\n", ioc->name, __func__,
1260 buffer_type);
1261 return -EINVAL;
1262 }
1263
1264 if (karg.requested_buffer_size % 4) {
1265 printk(MPT2SAS_ERR_FMT "%s: the requested_buffer_size "
1266 "is not 4 byte aligned\n", ioc->name, __func__);
1267 return -EINVAL;
1268 }
1269
1270 if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex))
1271 return -EAGAIN;
1272 else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex))
1273 return -ERESTARTSYS;
1274
1275 if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) {
1276 printk(MPT2SAS_ERR_FMT "%s: ctl_cmd in use\n",
1277 ioc->name, __func__);
1278 rc = -EAGAIN;
1279 goto out;
1280 }
1281
1282 smid = mpt2sas_base_get_smid(ioc, ioc->ctl_cb_idx);
1283 if (!smid) {
1284 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
1285 ioc->name, __func__);
1286 rc = -EAGAIN;
1287 goto out;
1288 }
1289
1290 rc = 0;
1291 ioc->ctl_cmds.status = MPT2_CMD_PENDING;
1292 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
1293 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
1294 ioc->ctl_cmds.smid = smid;
1295
1296 request_data = ioc->diag_buffer[buffer_type];
1297 request_data_sz = karg.requested_buffer_size;
1298 ioc->unique_id[buffer_type] = karg.unique_id;
1299 ioc->diag_buffer_status[buffer_type] = 0;
1300 memcpy(ioc->product_specific[buffer_type], karg.product_specific,
1301 MPT2_PRODUCT_SPECIFIC_DWORDS);
1302 ioc->diagnostic_flags[buffer_type] = karg.diagnostic_flags;
1303
1304 if (request_data) {
1305 request_data_dma = ioc->diag_buffer_dma[buffer_type];
1306 if (request_data_sz != ioc->diag_buffer_sz[buffer_type]) {
1307 pci_free_consistent(ioc->pdev,
1308 ioc->diag_buffer_sz[buffer_type],
1309 request_data, request_data_dma);
1310 request_data = NULL;
1311 }
1312 }
1313
1314 if (request_data == NULL) {
1315 ioc->diag_buffer_sz[buffer_type] = 0;
1316 ioc->diag_buffer_dma[buffer_type] = 0;
1317 request_data = pci_alloc_consistent(
1318 ioc->pdev, request_data_sz, &request_data_dma);
1319 if (request_data == NULL) {
1320 printk(MPT2SAS_ERR_FMT "%s: failed allocating memory"
1321 " for diag buffers, requested size(%d)\n",
1322 ioc->name, __func__, request_data_sz);
1323 mpt2sas_base_free_smid(ioc, smid);
1324 return -ENOMEM;
1325 }
1326 ioc->diag_buffer[buffer_type] = request_data;
1327 ioc->diag_buffer_sz[buffer_type] = request_data_sz;
1328 ioc->diag_buffer_dma[buffer_type] = request_data_dma;
1329 }
1330
1331 mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
1332 mpi_request->BufferType = karg.buffer_type;
1333 mpi_request->Flags = cpu_to_le32(karg.diagnostic_flags);
1334 mpi_request->BufferAddress = cpu_to_le64(request_data_dma);
1335 mpi_request->BufferLength = cpu_to_le32(request_data_sz);
1336
1337 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: diag_buffer(0x%p), "
1338 "dma(0x%llx), sz(%d)\n", ioc->name, __func__, request_data,
1339 (unsigned long long)request_data_dma, mpi_request->BufferLength));
1340
1341 for (i = 0; i < MPT2_PRODUCT_SPECIFIC_DWORDS; i++)
1342 mpi_request->ProductSpecific[i] =
1343 cpu_to_le32(ioc->product_specific[buffer_type][i]);
1344
1345 mpt2sas_base_put_smid_default(ioc, smid, mpi_request->VF_ID);
1346 timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
1347 MPT2_IOCTL_DEFAULT_TIMEOUT*HZ);
1348
1349 if (!(ioc->ctl_cmds.status & MPT2_CMD_COMPLETE)) {
1350 printk(MPT2SAS_ERR_FMT "%s: timeout\n", ioc->name,
1351 __func__);
1352 _debug_dump_mf(mpi_request,
1353 sizeof(Mpi2DiagBufferPostRequest_t)/4);
1354 if (!(ioc->ctl_cmds.status & MPT2_CMD_RESET))
1355 issue_reset = 1;
1356 goto issue_host_reset;
1357 }
1358
1359 /* process the completed Reply Message Frame */
1360 if ((ioc->ctl_cmds.status & MPT2_CMD_REPLY_VALID) == 0) {
1361 printk(MPT2SAS_ERR_FMT "%s: no reply message\n",
1362 ioc->name, __func__);
1363 rc = -EFAULT;
1364 goto out;
1365 }
1366
1367 mpi_reply = ioc->ctl_cmds.reply;
1368 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
1369
1370 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
1371 ioc->diag_buffer_status[buffer_type] |=
1372 MPT2_DIAG_BUFFER_IS_REGISTERED;
1373 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: success\n",
1374 ioc->name, __func__));
1375 } else {
1376 printk(MPT2SAS_DEBUG_FMT "%s: ioc_status(0x%04x) "
1377 "log_info(0x%08x)\n", ioc->name, __func__,
1378 ioc_status, mpi_reply->IOCLogInfo);
1379 rc = -EFAULT;
1380 }
1381
1382 issue_host_reset:
1383 if (issue_reset)
1384 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
1385 FORCE_BIG_HAMMER);
1386
1387 out:
1388
1389 if (rc && request_data)
1390 pci_free_consistent(ioc->pdev, request_data_sz,
1391 request_data, request_data_dma);
1392
1393 ioc->ctl_cmds.status = MPT2_CMD_NOT_USED;
1394 mutex_unlock(&ioc->ctl_cmds.mutex);
1395 return rc;
1396}
1397
1398/**
1399 * _ctl_diag_unregister - application unregister with driver
1400 * @arg - user space buffer containing ioctl content
1401 *
1402 * This will allow the driver to cleanup any memory allocated for diag
1403 * messages and to free up any resources.
1404 */
1405static long
1406_ctl_diag_unregister(void __user *arg)
1407{
1408 struct mpt2_diag_unregister karg;
1409 struct MPT2SAS_ADAPTER *ioc;
1410 void *request_data;
1411 dma_addr_t request_data_dma;
1412 u32 request_data_sz;
1413 u8 buffer_type;
1414
1415 if (copy_from_user(&karg, arg, sizeof(karg))) {
1416 printk(KERN_ERR "failure at %s:%d/%s()!\n",
1417 __FILE__, __LINE__, __func__);
1418 return -EFAULT;
1419 }
1420 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1421 return -ENODEV;
1422
1423 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
1424 __func__));
1425
1426 buffer_type = karg.unique_id & 0x000000ff;
1427 if (!_ctl_diag_capability(ioc, buffer_type)) {
1428 printk(MPT2SAS_ERR_FMT "%s: doesn't have capability for "
1429 "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type);
1430 return -EPERM;
1431 }
1432
1433 if ((ioc->diag_buffer_status[buffer_type] &
1434 MPT2_DIAG_BUFFER_IS_REGISTERED) == 0) {
1435 printk(MPT2SAS_ERR_FMT "%s: buffer_type(0x%02x) is not "
1436 "registered\n", ioc->name, __func__, buffer_type);
1437 return -EINVAL;
1438 }
1439 if ((ioc->diag_buffer_status[buffer_type] &
1440 MPT2_DIAG_BUFFER_IS_RELEASED) == 0) {
1441 printk(MPT2SAS_ERR_FMT "%s: buffer_type(0x%02x) has not been "
1442 "released\n", ioc->name, __func__, buffer_type);
1443 return -EINVAL;
1444 }
1445
1446 if (karg.unique_id != ioc->unique_id[buffer_type]) {
1447 printk(MPT2SAS_ERR_FMT "%s: unique_id(0x%08x) is not "
1448 "registered\n", ioc->name, __func__, karg.unique_id);
1449 return -EINVAL;
1450 }
1451
1452 request_data = ioc->diag_buffer[buffer_type];
1453 if (!request_data) {
1454 printk(MPT2SAS_ERR_FMT "%s: doesn't have memory allocated for "
1455 "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type);
1456 return -ENOMEM;
1457 }
1458
1459 request_data_sz = ioc->diag_buffer_sz[buffer_type];
1460 request_data_dma = ioc->diag_buffer_dma[buffer_type];
1461 pci_free_consistent(ioc->pdev, request_data_sz,
1462 request_data, request_data_dma);
1463 ioc->diag_buffer[buffer_type] = NULL;
1464 ioc->diag_buffer_status[buffer_type] = 0;
1465 return 0;
1466}
1467
1468/**
1469 * _ctl_diag_query - query relevant info associated with diag buffers
1470 * @arg - user space buffer containing ioctl content
1471 *
1472 * The application will send only buffer_type and unique_id. Driver will
1473 * inspect unique_id first, if valid, fill in all the info. If unique_id is
1474 * 0x00, the driver will return info specified by Buffer Type.
1475 */
1476static long
1477_ctl_diag_query(void __user *arg)
1478{
1479 struct mpt2_diag_query karg;
1480 struct MPT2SAS_ADAPTER *ioc;
1481 void *request_data;
1482 int i;
1483 u8 buffer_type;
1484
1485 if (copy_from_user(&karg, arg, sizeof(karg))) {
1486 printk(KERN_ERR "failure at %s:%d/%s()!\n",
1487 __FILE__, __LINE__, __func__);
1488 return -EFAULT;
1489 }
1490 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1491 return -ENODEV;
1492
1493 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
1494 __func__));
1495
1496 karg.application_flags = 0;
1497 buffer_type = karg.buffer_type;
1498
1499 if (!_ctl_diag_capability(ioc, buffer_type)) {
1500 printk(MPT2SAS_ERR_FMT "%s: doesn't have capability for "
1501 "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type);
1502 return -EPERM;
1503 }
1504
1505 if ((ioc->diag_buffer_status[buffer_type] &
1506 MPT2_DIAG_BUFFER_IS_REGISTERED) == 0) {
1507 printk(MPT2SAS_ERR_FMT "%s: buffer_type(0x%02x) is not "
1508 "registered\n", ioc->name, __func__, buffer_type);
1509 return -EINVAL;
1510 }
1511
1512 if (karg.unique_id & 0xffffff00) {
1513 if (karg.unique_id != ioc->unique_id[buffer_type]) {
1514 printk(MPT2SAS_ERR_FMT "%s: unique_id(0x%08x) is not "
1515 "registered\n", ioc->name, __func__,
1516 karg.unique_id);
1517 return -EINVAL;
1518 }
1519 }
1520
1521 request_data = ioc->diag_buffer[buffer_type];
1522 if (!request_data) {
1523 printk(MPT2SAS_ERR_FMT "%s: doesn't have buffer for "
1524 "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type);
1525 return -ENOMEM;
1526 }
1527
1528 if (ioc->diag_buffer_status[buffer_type] & MPT2_DIAG_BUFFER_IS_RELEASED)
1529 karg.application_flags = (MPT2_APP_FLAGS_APP_OWNED |
1530 MPT2_APP_FLAGS_BUFFER_VALID);
1531 else
1532 karg.application_flags = (MPT2_APP_FLAGS_APP_OWNED |
1533 MPT2_APP_FLAGS_BUFFER_VALID |
1534 MPT2_APP_FLAGS_FW_BUFFER_ACCESS);
1535
1536 for (i = 0; i < MPT2_PRODUCT_SPECIFIC_DWORDS; i++)
1537 karg.product_specific[i] =
1538 ioc->product_specific[buffer_type][i];
1539
1540 karg.total_buffer_size = ioc->diag_buffer_sz[buffer_type];
1541 karg.driver_added_buffer_size = 0;
1542 karg.unique_id = ioc->unique_id[buffer_type];
1543 karg.diagnostic_flags = ioc->diagnostic_flags[buffer_type];
1544
1545 if (copy_to_user(arg, &karg, sizeof(struct mpt2_diag_query))) {
1546 printk(MPT2SAS_ERR_FMT "%s: unable to write mpt2_diag_query "
1547 "data @ %p\n", ioc->name, __func__, arg);
1548 return -EFAULT;
1549 }
1550 return 0;
1551}
1552
1553/**
1554 * _ctl_diag_release - request to send Diag Release Message to firmware
1555 * @arg - user space buffer containing ioctl content
1556 * @state - NON_BLOCKING or BLOCKING
1557 *
1558 * This allows ownership of the specified buffer to returned to the driver,
1559 * allowing an application to read the buffer without fear that firmware is
1560 * overwritting information in the buffer.
1561 */
1562static long
1563_ctl_diag_release(void __user *arg, enum block_state state)
1564{
1565 struct mpt2_diag_release karg;
1566 struct MPT2SAS_ADAPTER *ioc;
1567 void *request_data;
1568 int rc;
1569 Mpi2DiagReleaseRequest_t *mpi_request;
1570 Mpi2DiagReleaseReply_t *mpi_reply;
1571 u8 buffer_type;
1572 unsigned long timeleft;
1573 u16 smid;
1574 u16 ioc_status;
1575 u8 issue_reset = 0;
1576
1577 if (copy_from_user(&karg, arg, sizeof(karg))) {
1578 printk(KERN_ERR "failure at %s:%d/%s()!\n",
1579 __FILE__, __LINE__, __func__);
1580 return -EFAULT;
1581 }
1582 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1583 return -ENODEV;
1584
1585 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
1586 __func__));
1587
1588 buffer_type = karg.unique_id & 0x000000ff;
1589 if (!_ctl_diag_capability(ioc, buffer_type)) {
1590 printk(MPT2SAS_ERR_FMT "%s: doesn't have capability for "
1591 "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type);
1592 return -EPERM;
1593 }
1594
1595 if ((ioc->diag_buffer_status[buffer_type] &
1596 MPT2_DIAG_BUFFER_IS_REGISTERED) == 0) {
1597 printk(MPT2SAS_ERR_FMT "%s: buffer_type(0x%02x) is not "
1598 "registered\n", ioc->name, __func__, buffer_type);
1599 return -EINVAL;
1600 }
1601
1602 if (karg.unique_id != ioc->unique_id[buffer_type]) {
1603 printk(MPT2SAS_ERR_FMT "%s: unique_id(0x%08x) is not "
1604 "registered\n", ioc->name, __func__, karg.unique_id);
1605 return -EINVAL;
1606 }
1607
1608 if (ioc->diag_buffer_status[buffer_type] &
1609 MPT2_DIAG_BUFFER_IS_RELEASED) {
1610 printk(MPT2SAS_ERR_FMT "%s: buffer_type(0x%02x) "
1611 "is already released\n", ioc->name, __func__,
1612 buffer_type);
1613 return 0;
1614 }
1615
1616 request_data = ioc->diag_buffer[buffer_type];
1617
1618 if (!request_data) {
1619 printk(MPT2SAS_ERR_FMT "%s: doesn't have memory allocated for "
1620 "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type);
1621 return -ENOMEM;
1622 }
1623
1624 if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex))
1625 return -EAGAIN;
1626 else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex))
1627 return -ERESTARTSYS;
1628
1629 if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) {
1630 printk(MPT2SAS_ERR_FMT "%s: ctl_cmd in use\n",
1631 ioc->name, __func__);
1632 rc = -EAGAIN;
1633 goto out;
1634 }
1635
1636 smid = mpt2sas_base_get_smid(ioc, ioc->ctl_cb_idx);
1637 if (!smid) {
1638 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
1639 ioc->name, __func__);
1640 rc = -EAGAIN;
1641 goto out;
1642 }
1643
1644 rc = 0;
1645 ioc->ctl_cmds.status = MPT2_CMD_PENDING;
1646 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
1647 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
1648 ioc->ctl_cmds.smid = smid;
1649
1650 mpi_request->Function = MPI2_FUNCTION_DIAG_RELEASE;
1651 mpi_request->BufferType = buffer_type;
1652
1653 mpt2sas_base_put_smid_default(ioc, smid, mpi_request->VF_ID);
1654 timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
1655 MPT2_IOCTL_DEFAULT_TIMEOUT*HZ);
1656
1657 if (!(ioc->ctl_cmds.status & MPT2_CMD_COMPLETE)) {
1658 printk(MPT2SAS_ERR_FMT "%s: timeout\n", ioc->name,
1659 __func__);
1660 _debug_dump_mf(mpi_request,
1661 sizeof(Mpi2DiagReleaseRequest_t)/4);
1662 if (!(ioc->ctl_cmds.status & MPT2_CMD_RESET))
1663 issue_reset = 1;
1664 goto issue_host_reset;
1665 }
1666
1667 /* process the completed Reply Message Frame */
1668 if ((ioc->ctl_cmds.status & MPT2_CMD_REPLY_VALID) == 0) {
1669 printk(MPT2SAS_ERR_FMT "%s: no reply message\n",
1670 ioc->name, __func__);
1671 rc = -EFAULT;
1672 goto out;
1673 }
1674
1675 mpi_reply = ioc->ctl_cmds.reply;
1676 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
1677
1678 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
1679 ioc->diag_buffer_status[buffer_type] |=
1680 MPT2_DIAG_BUFFER_IS_RELEASED;
1681 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: success\n",
1682 ioc->name, __func__));
1683 } else {
1684 printk(MPT2SAS_DEBUG_FMT "%s: ioc_status(0x%04x) "
1685 "log_info(0x%08x)\n", ioc->name, __func__,
1686 ioc_status, mpi_reply->IOCLogInfo);
1687 rc = -EFAULT;
1688 }
1689
1690 issue_host_reset:
1691 if (issue_reset)
1692 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
1693 FORCE_BIG_HAMMER);
1694
1695 out:
1696
1697 ioc->ctl_cmds.status = MPT2_CMD_NOT_USED;
1698 mutex_unlock(&ioc->ctl_cmds.mutex);
1699 return rc;
1700}
1701
1702/**
1703 * _ctl_diag_read_buffer - request for copy of the diag buffer
1704 * @arg - user space buffer containing ioctl content
1705 * @state - NON_BLOCKING or BLOCKING
1706 */
1707static long
1708_ctl_diag_read_buffer(void __user *arg, enum block_state state)
1709{
1710 struct mpt2_diag_read_buffer karg;
1711 struct mpt2_diag_read_buffer __user *uarg = arg;
1712 struct MPT2SAS_ADAPTER *ioc;
1713 void *request_data, *diag_data;
1714 Mpi2DiagBufferPostRequest_t *mpi_request;
1715 Mpi2DiagBufferPostReply_t *mpi_reply;
1716 int rc, i;
1717 u8 buffer_type;
1718 unsigned long timeleft;
1719 u16 smid;
1720 u16 ioc_status;
1721 u8 issue_reset = 0;
1722
1723 if (copy_from_user(&karg, arg, sizeof(karg))) {
1724 printk(KERN_ERR "failure at %s:%d/%s()!\n",
1725 __FILE__, __LINE__, __func__);
1726 return -EFAULT;
1727 }
1728 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1729 return -ENODEV;
1730
1731 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
1732 __func__));
1733
1734 buffer_type = karg.unique_id & 0x000000ff;
1735 if (!_ctl_diag_capability(ioc, buffer_type)) {
1736 printk(MPT2SAS_ERR_FMT "%s: doesn't have capability for "
1737 "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type);
1738 return -EPERM;
1739 }
1740
1741 if (karg.unique_id != ioc->unique_id[buffer_type]) {
1742 printk(MPT2SAS_ERR_FMT "%s: unique_id(0x%08x) is not "
1743 "registered\n", ioc->name, __func__, karg.unique_id);
1744 return -EINVAL;
1745 }
1746
1747 request_data = ioc->diag_buffer[buffer_type];
1748 if (!request_data) {
1749 printk(MPT2SAS_ERR_FMT "%s: doesn't have buffer for "
1750 "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type);
1751 return -ENOMEM;
1752 }
1753
1754 if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) {
1755 printk(MPT2SAS_ERR_FMT "%s: either the starting_offset "
1756 "or bytes_to_read are not 4 byte aligned\n", ioc->name,
1757 __func__);
1758 return -EINVAL;
1759 }
1760
1761 diag_data = (void *)(request_data + karg.starting_offset);
1762 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: diag_buffer(%p), "
1763 "offset(%d), sz(%d)\n", ioc->name, __func__,
1764 diag_data, karg.starting_offset, karg.bytes_to_read));
1765
1766 if (copy_to_user((void __user *)uarg->diagnostic_data,
1767 diag_data, karg.bytes_to_read)) {
1768 printk(MPT2SAS_ERR_FMT "%s: Unable to write "
1769 "mpt_diag_read_buffer_t data @ %p\n", ioc->name,
1770 __func__, diag_data);
1771 return -EFAULT;
1772 }
1773
1774 if ((karg.flags & MPT2_FLAGS_REREGISTER) == 0)
1775 return 0;
1776
1777 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: Reregister "
1778 "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type));
1779 if ((ioc->diag_buffer_status[buffer_type] &
1780 MPT2_DIAG_BUFFER_IS_RELEASED) == 0) {
1781 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
1782 "buffer_type(0x%02x) is still registered\n", ioc->name,
1783 __func__, buffer_type));
1784 return 0;
1785 }
1786 /* Get a free request frame and save the message context.
1787 */
1788 if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex))
1789 return -EAGAIN;
1790 else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex))
1791 return -ERESTARTSYS;
1792
1793 if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) {
1794 printk(MPT2SAS_ERR_FMT "%s: ctl_cmd in use\n",
1795 ioc->name, __func__);
1796 rc = -EAGAIN;
1797 goto out;
1798 }
1799
1800 smid = mpt2sas_base_get_smid(ioc, ioc->ctl_cb_idx);
1801 if (!smid) {
1802 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
1803 ioc->name, __func__);
1804 rc = -EAGAIN;
1805 goto out;
1806 }
1807
1808 rc = 0;
1809 ioc->ctl_cmds.status = MPT2_CMD_PENDING;
1810 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
1811 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
1812 ioc->ctl_cmds.smid = smid;
1813
1814 mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
1815 mpi_request->BufferType = buffer_type;
1816 mpi_request->BufferLength =
1817 cpu_to_le32(ioc->diag_buffer_sz[buffer_type]);
1818 mpi_request->BufferAddress =
1819 cpu_to_le64(ioc->diag_buffer_dma[buffer_type]);
1820 for (i = 0; i < MPT2_PRODUCT_SPECIFIC_DWORDS; i++)
1821 mpi_request->ProductSpecific[i] =
1822 cpu_to_le32(ioc->product_specific[buffer_type][i]);
1823
1824 mpt2sas_base_put_smid_default(ioc, smid, mpi_request->VF_ID);
1825 timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
1826 MPT2_IOCTL_DEFAULT_TIMEOUT*HZ);
1827
1828 if (!(ioc->ctl_cmds.status & MPT2_CMD_COMPLETE)) {
1829 printk(MPT2SAS_ERR_FMT "%s: timeout\n", ioc->name,
1830 __func__);
1831 _debug_dump_mf(mpi_request,
1832 sizeof(Mpi2DiagBufferPostRequest_t)/4);
1833 if (!(ioc->ctl_cmds.status & MPT2_CMD_RESET))
1834 issue_reset = 1;
1835 goto issue_host_reset;
1836 }
1837
1838 /* process the completed Reply Message Frame */
1839 if ((ioc->ctl_cmds.status & MPT2_CMD_REPLY_VALID) == 0) {
1840 printk(MPT2SAS_ERR_FMT "%s: no reply message\n",
1841 ioc->name, __func__);
1842 rc = -EFAULT;
1843 goto out;
1844 }
1845
1846 mpi_reply = ioc->ctl_cmds.reply;
1847 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
1848
1849 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
1850 ioc->diag_buffer_status[buffer_type] |=
1851 MPT2_DIAG_BUFFER_IS_REGISTERED;
1852 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: success\n",
1853 ioc->name, __func__));
1854 } else {
1855 printk(MPT2SAS_DEBUG_FMT "%s: ioc_status(0x%04x) "
1856 "log_info(0x%08x)\n", ioc->name, __func__,
1857 ioc_status, mpi_reply->IOCLogInfo);
1858 rc = -EFAULT;
1859 }
1860
1861 issue_host_reset:
1862 if (issue_reset)
1863 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
1864 FORCE_BIG_HAMMER);
1865
1866 out:
1867
1868 ioc->ctl_cmds.status = MPT2_CMD_NOT_USED;
1869 mutex_unlock(&ioc->ctl_cmds.mutex);
1870 return rc;
1871}
1872
1873/**
1874 * _ctl_ioctl_main - main ioctl entry point
1875 * @file - (struct file)
1876 * @cmd - ioctl opcode
1877 * @arg -
1878 */
1879static long
1880_ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg)
1881{
1882 enum block_state state;
1883 long ret = -EINVAL;
1884 unsigned long flags;
1885
1886 state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING :
1887 BLOCKING;
1888
1889 switch (cmd) {
1890 case MPT2IOCINFO:
1891 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_iocinfo))
1892 ret = _ctl_getiocinfo(arg);
1893 break;
1894 case MPT2COMMAND:
1895 {
1896 struct mpt2_ioctl_command karg;
1897 struct mpt2_ioctl_command __user *uarg;
1898 struct MPT2SAS_ADAPTER *ioc;
1899
1900 if (copy_from_user(&karg, arg, sizeof(karg))) {
1901 printk(KERN_ERR "failure at %s:%d/%s()!\n",
1902 __FILE__, __LINE__, __func__);
1903 return -EFAULT;
1904 }
1905
1906 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 ||
1907 !ioc)
1908 return -ENODEV;
1909
1910 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
1911 if (ioc->shost_recovery) {
1912 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock,
1913 flags);
1914 return -EAGAIN;
1915 }
1916 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
1917
1918 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_command)) {
1919 uarg = arg;
1920 ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf, state);
1921 }
1922 break;
1923 }
1924 case MPT2EVENTQUERY:
1925 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_eventquery))
1926 ret = _ctl_eventquery(arg);
1927 break;
1928 case MPT2EVENTENABLE:
1929 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_eventenable))
1930 ret = _ctl_eventenable(arg);
1931 break;
1932 case MPT2EVENTREPORT:
1933 ret = _ctl_eventreport(arg);
1934 break;
1935 case MPT2HARDRESET:
1936 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_diag_reset))
1937 ret = _ctl_do_reset(arg);
1938 break;
1939 case MPT2BTDHMAPPING:
1940 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_btdh_mapping))
1941 ret = _ctl_btdh_mapping(arg);
1942 break;
1943 case MPT2DIAGREGISTER:
1944 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_register))
1945 ret = _ctl_diag_register(arg, state);
1946 break;
1947 case MPT2DIAGUNREGISTER:
1948 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_unregister))
1949 ret = _ctl_diag_unregister(arg);
1950 break;
1951 case MPT2DIAGQUERY:
1952 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_query))
1953 ret = _ctl_diag_query(arg);
1954 break;
1955 case MPT2DIAGRELEASE:
1956 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_release))
1957 ret = _ctl_diag_release(arg, state);
1958 break;
1959 case MPT2DIAGREADBUFFER:
1960 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_read_buffer))
1961 ret = _ctl_diag_read_buffer(arg, state);
1962 break;
1963 default:
1964 {
1965 struct mpt2_ioctl_command karg;
1966 struct MPT2SAS_ADAPTER *ioc;
1967
1968 if (copy_from_user(&karg, arg, sizeof(karg))) {
1969 printk(KERN_ERR "failure at %s:%d/%s()!\n",
1970 __FILE__, __LINE__, __func__);
1971 return -EFAULT;
1972 }
1973
1974 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 ||
1975 !ioc)
1976 return -ENODEV;
1977
1978 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT
1979 "unsupported ioctl opcode(0x%08x)\n", ioc->name, cmd));
1980 break;
1981 }
1982 }
1983 return ret;
1984}
1985
1986/**
1987 * _ctl_ioctl - main ioctl entry point (unlocked)
1988 * @file - (struct file)
1989 * @cmd - ioctl opcode
1990 * @arg -
1991 */
1992static long
1993_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1994{
1995 long ret;
1996 lock_kernel();
1997 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg);
1998 unlock_kernel();
1999 return ret;
2000}
2001
2002#ifdef CONFIG_COMPAT
2003/**
2004 * _ctl_compat_mpt_command - convert 32bit pointers to 64bit.
2005 * @file - (struct file)
2006 * @cmd - ioctl opcode
2007 * @arg - (struct mpt2_ioctl_command32)
2008 *
2009 * MPT2COMMAND32 - Handle 32bit applications running on 64bit os.
2010 */
2011static long
2012_ctl_compat_mpt_command(struct file *file, unsigned cmd, unsigned long arg)
2013{
2014 struct mpt2_ioctl_command32 karg32;
2015 struct mpt2_ioctl_command32 __user *uarg;
2016 struct mpt2_ioctl_command karg;
2017 struct MPT2SAS_ADAPTER *ioc;
2018 enum block_state state;
2019 unsigned long flags;
2020
2021 if (_IOC_SIZE(cmd) != sizeof(struct mpt2_ioctl_command32))
2022 return -EINVAL;
2023
2024 uarg = (struct mpt2_ioctl_command32 __user *) arg;
2025
2026 if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) {
2027 printk(KERN_ERR "failure at %s:%d/%s()!\n",
2028 __FILE__, __LINE__, __func__);
2029 return -EFAULT;
2030 }
2031 if (_ctl_verify_adapter(karg32.hdr.ioc_number, &ioc) == -1 || !ioc)
2032 return -ENODEV;
2033
2034 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
2035 if (ioc->shost_recovery) {
2036 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock,
2037 flags);
2038 return -EAGAIN;
2039 }
2040 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
2041
2042 memset(&karg, 0, sizeof(struct mpt2_ioctl_command));
2043 karg.hdr.ioc_number = karg32.hdr.ioc_number;
2044 karg.hdr.port_number = karg32.hdr.port_number;
2045 karg.hdr.max_data_size = karg32.hdr.max_data_size;
2046 karg.timeout = karg32.timeout;
2047 karg.max_reply_bytes = karg32.max_reply_bytes;
2048 karg.data_in_size = karg32.data_in_size;
2049 karg.data_out_size = karg32.data_out_size;
2050 karg.max_sense_bytes = karg32.max_sense_bytes;
2051 karg.data_sge_offset = karg32.data_sge_offset;
2052 memcpy(&karg.reply_frame_buf_ptr, &karg32.reply_frame_buf_ptr,
2053 sizeof(uint32_t));
2054 memcpy(&karg.data_in_buf_ptr, &karg32.data_in_buf_ptr,
2055 sizeof(uint32_t));
2056 memcpy(&karg.data_out_buf_ptr, &karg32.data_out_buf_ptr,
2057 sizeof(uint32_t));
2058 memcpy(&karg.sense_data_ptr, &karg32.sense_data_ptr,
2059 sizeof(uint32_t));
2060 state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING;
2061 return _ctl_do_mpt_command(ioc, karg, &uarg->mf, state);
2062}
2063
2064/**
2065 * _ctl_ioctl_compat - main ioctl entry point (compat)
2066 * @file -
2067 * @cmd -
2068 * @arg -
2069 *
2070 * This routine handles 32 bit applications in 64bit os.
2071 */
2072static long
2073_ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
2074{
2075 long ret;
2076 lock_kernel();
2077 if (cmd == MPT2COMMAND32)
2078 ret = _ctl_compat_mpt_command(file, cmd, arg);
2079 else
2080 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg);
2081 unlock_kernel();
2082 return ret;
2083}
2084#endif
2085
2086/* scsi host attributes */
2087
2088/**
2089 * _ctl_version_fw_show - firmware version
2090 * @cdev - pointer to embedded class device
2091 * @buf - the buffer returned
2092 *
2093 * A sysfs 'read-only' shost attribute.
2094 */
2095static ssize_t
2096_ctl_version_fw_show(struct device *cdev, struct device_attribute *attr,
2097 char *buf)
2098{
2099 struct Scsi_Host *shost = class_to_shost(cdev);
2100 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2101
2102 return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
2103 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
2104 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
2105 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
2106 ioc->facts.FWVersion.Word & 0x000000FF);
2107}
2108static DEVICE_ATTR(version_fw, S_IRUGO, _ctl_version_fw_show, NULL);
2109
2110/**
2111 * _ctl_version_bios_show - bios version
2112 * @cdev - pointer to embedded class device
2113 * @buf - the buffer returned
2114 *
2115 * A sysfs 'read-only' shost attribute.
2116 */
2117static ssize_t
2118_ctl_version_bios_show(struct device *cdev, struct device_attribute *attr,
2119 char *buf)
2120{
2121 struct Scsi_Host *shost = class_to_shost(cdev);
2122 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2123
2124 u32 version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
2125
2126 return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
2127 (version & 0xFF000000) >> 24,
2128 (version & 0x00FF0000) >> 16,
2129 (version & 0x0000FF00) >> 8,
2130 version & 0x000000FF);
2131}
2132static DEVICE_ATTR(version_bios, S_IRUGO, _ctl_version_bios_show, NULL);
2133
2134/**
2135 * _ctl_version_mpi_show - MPI (message passing interface) version
2136 * @cdev - pointer to embedded class device
2137 * @buf - the buffer returned
2138 *
2139 * A sysfs 'read-only' shost attribute.
2140 */
2141static ssize_t
2142_ctl_version_mpi_show(struct device *cdev, struct device_attribute *attr,
2143 char *buf)
2144{
2145 struct Scsi_Host *shost = class_to_shost(cdev);
2146 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2147
2148 return snprintf(buf, PAGE_SIZE, "%03x.%02x\n",
2149 ioc->facts.MsgVersion, ioc->facts.HeaderVersion >> 8);
2150}
2151static DEVICE_ATTR(version_mpi, S_IRUGO, _ctl_version_mpi_show, NULL);
2152
2153/**
2154 * _ctl_version_product_show - product name
2155 * @cdev - pointer to embedded class device
2156 * @buf - the buffer returned
2157 *
2158 * A sysfs 'read-only' shost attribute.
2159 */
2160static ssize_t
2161_ctl_version_product_show(struct device *cdev, struct device_attribute *attr,
2162 char *buf)
2163{
2164 struct Scsi_Host *shost = class_to_shost(cdev);
2165 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2166
2167 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.ChipName);
2168}
2169static DEVICE_ATTR(version_product, S_IRUGO,
2170 _ctl_version_product_show, NULL);
2171
2172/**
2173 * _ctl_version_nvdata_persistent_show - ndvata persistent version
2174 * @cdev - pointer to embedded class device
2175 * @buf - the buffer returned
2176 *
2177 * A sysfs 'read-only' shost attribute.
2178 */
2179static ssize_t
2180_ctl_version_nvdata_persistent_show(struct device *cdev,
2181 struct device_attribute *attr, char *buf)
2182{
2183 struct Scsi_Host *shost = class_to_shost(cdev);
2184 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2185
2186 return snprintf(buf, PAGE_SIZE, "%02xh\n",
2187 le16_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word));
2188}
2189static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO,
2190 _ctl_version_nvdata_persistent_show, NULL);
2191
2192/**
2193 * _ctl_version_nvdata_default_show - nvdata default version
2194 * @cdev - pointer to embedded class device
2195 * @buf - the buffer returned
2196 *
2197 * A sysfs 'read-only' shost attribute.
2198 */
2199static ssize_t
2200_ctl_version_nvdata_default_show(struct device *cdev,
2201 struct device_attribute *attr, char *buf)
2202{
2203 struct Scsi_Host *shost = class_to_shost(cdev);
2204 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2205
2206 return snprintf(buf, PAGE_SIZE, "%02xh\n",
2207 le16_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word));
2208}
2209static DEVICE_ATTR(version_nvdata_default, S_IRUGO,
2210 _ctl_version_nvdata_default_show, NULL);
2211
2212/**
2213 * _ctl_board_name_show - board name
2214 * @cdev - pointer to embedded class device
2215 * @buf - the buffer returned
2216 *
2217 * A sysfs 'read-only' shost attribute.
2218 */
2219static ssize_t
2220_ctl_board_name_show(struct device *cdev, struct device_attribute *attr,
2221 char *buf)
2222{
2223 struct Scsi_Host *shost = class_to_shost(cdev);
2224 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2225
2226 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardName);
2227}
2228static DEVICE_ATTR(board_name, S_IRUGO, _ctl_board_name_show, NULL);
2229
2230/**
2231 * _ctl_board_assembly_show - board assembly name
2232 * @cdev - pointer to embedded class device
2233 * @buf - the buffer returned
2234 *
2235 * A sysfs 'read-only' shost attribute.
2236 */
2237static ssize_t
2238_ctl_board_assembly_show(struct device *cdev, struct device_attribute *attr,
2239 char *buf)
2240{
2241 struct Scsi_Host *shost = class_to_shost(cdev);
2242 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2243
2244 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardAssembly);
2245}
2246static DEVICE_ATTR(board_assembly, S_IRUGO,
2247 _ctl_board_assembly_show, NULL);
2248
2249/**
2250 * _ctl_board_tracer_show - board tracer number
2251 * @cdev - pointer to embedded class device
2252 * @buf - the buffer returned
2253 *
2254 * A sysfs 'read-only' shost attribute.
2255 */
2256static ssize_t
2257_ctl_board_tracer_show(struct device *cdev, struct device_attribute *attr,
2258 char *buf)
2259{
2260 struct Scsi_Host *shost = class_to_shost(cdev);
2261 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2262
2263 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardTracerNumber);
2264}
2265static DEVICE_ATTR(board_tracer, S_IRUGO,
2266 _ctl_board_tracer_show, NULL);
2267
2268/**
2269 * _ctl_io_delay_show - io missing delay
2270 * @cdev - pointer to embedded class device
2271 * @buf - the buffer returned
2272 *
2273 * This is for firmware implemention for deboucing device
2274 * removal events.
2275 *
2276 * A sysfs 'read-only' shost attribute.
2277 */
2278static ssize_t
2279_ctl_io_delay_show(struct device *cdev, struct device_attribute *attr,
2280 char *buf)
2281{
2282 struct Scsi_Host *shost = class_to_shost(cdev);
2283 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2284
2285 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay);
2286}
2287static DEVICE_ATTR(io_delay, S_IRUGO,
2288 _ctl_io_delay_show, NULL);
2289
2290/**
2291 * _ctl_device_delay_show - device missing delay
2292 * @cdev - pointer to embedded class device
2293 * @buf - the buffer returned
2294 *
2295 * This is for firmware implemention for deboucing device
2296 * removal events.
2297 *
2298 * A sysfs 'read-only' shost attribute.
2299 */
2300static ssize_t
2301_ctl_device_delay_show(struct device *cdev, struct device_attribute *attr,
2302 char *buf)
2303{
2304 struct Scsi_Host *shost = class_to_shost(cdev);
2305 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2306
2307 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay);
2308}
2309static DEVICE_ATTR(device_delay, S_IRUGO,
2310 _ctl_device_delay_show, NULL);
2311
2312/**
2313 * _ctl_fw_queue_depth_show - global credits
2314 * @cdev - pointer to embedded class device
2315 * @buf - the buffer returned
2316 *
2317 * This is firmware queue depth limit
2318 *
2319 * A sysfs 'read-only' shost attribute.
2320 */
2321static ssize_t
2322_ctl_fw_queue_depth_show(struct device *cdev, struct device_attribute *attr,
2323 char *buf)
2324{
2325 struct Scsi_Host *shost = class_to_shost(cdev);
2326 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2327
2328 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->facts.RequestCredit);
2329}
2330static DEVICE_ATTR(fw_queue_depth, S_IRUGO,
2331 _ctl_fw_queue_depth_show, NULL);
2332
2333/**
2334 * _ctl_sas_address_show - sas address
2335 * @cdev - pointer to embedded class device
2336 * @buf - the buffer returned
2337 *
2338 * This is the controller sas address
2339 *
2340 * A sysfs 'read-only' shost attribute.
2341 */
2342static ssize_t
2343_ctl_host_sas_address_show(struct device *cdev, struct device_attribute *attr,
2344 char *buf)
2345{
2346 struct Scsi_Host *shost = class_to_shost(cdev);
2347 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2348
2349 return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
2350 (unsigned long long)ioc->sas_hba.sas_address);
2351}
2352static DEVICE_ATTR(host_sas_address, S_IRUGO,
2353 _ctl_host_sas_address_show, NULL);
2354
2355/**
2356 * _ctl_logging_level_show - logging level
2357 * @cdev - pointer to embedded class device
2358 * @buf - the buffer returned
2359 *
2360 * A sysfs 'read/write' shost attribute.
2361 */
2362static ssize_t
2363_ctl_logging_level_show(struct device *cdev, struct device_attribute *attr,
2364 char *buf)
2365{
2366 struct Scsi_Host *shost = class_to_shost(cdev);
2367 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2368
2369 return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->logging_level);
2370}
2371static ssize_t
2372_ctl_logging_level_store(struct device *cdev, struct device_attribute *attr,
2373 const char *buf, size_t count)
2374{
2375 struct Scsi_Host *shost = class_to_shost(cdev);
2376 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2377 int val = 0;
2378
2379 if (sscanf(buf, "%x", &val) != 1)
2380 return -EINVAL;
2381
2382 ioc->logging_level = val;
2383 printk(MPT2SAS_INFO_FMT "logging_level=%08xh\n", ioc->name,
2384 ioc->logging_level);
2385 return strlen(buf);
2386}
2387static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR,
2388 _ctl_logging_level_show, _ctl_logging_level_store);
2389
2390struct device_attribute *mpt2sas_host_attrs[] = {
2391 &dev_attr_version_fw,
2392 &dev_attr_version_bios,
2393 &dev_attr_version_mpi,
2394 &dev_attr_version_product,
2395 &dev_attr_version_nvdata_persistent,
2396 &dev_attr_version_nvdata_default,
2397 &dev_attr_board_name,
2398 &dev_attr_board_assembly,
2399 &dev_attr_board_tracer,
2400 &dev_attr_io_delay,
2401 &dev_attr_device_delay,
2402 &dev_attr_logging_level,
2403 &dev_attr_fw_queue_depth,
2404 &dev_attr_host_sas_address,
2405 NULL,
2406};
2407
2408/* device attributes */
2409
2410/**
2411 * _ctl_device_sas_address_show - sas address
2412 * @cdev - pointer to embedded class device
2413 * @buf - the buffer returned
2414 *
2415 * This is the sas address for the target
2416 *
2417 * A sysfs 'read-only' shost attribute.
2418 */
2419static ssize_t
2420_ctl_device_sas_address_show(struct device *dev, struct device_attribute *attr,
2421 char *buf)
2422{
2423 struct scsi_device *sdev = to_scsi_device(dev);
2424 struct MPT2SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
2425
2426 return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
2427 (unsigned long long)sas_device_priv_data->sas_target->sas_address);
2428}
2429static DEVICE_ATTR(sas_address, S_IRUGO, _ctl_device_sas_address_show, NULL);
2430
2431/**
2432 * _ctl_device_handle_show - device handle
2433 * @cdev - pointer to embedded class device
2434 * @buf - the buffer returned
2435 *
2436 * This is the firmware assigned device handle
2437 *
2438 * A sysfs 'read-only' shost attribute.
2439 */
2440static ssize_t
2441_ctl_device_handle_show(struct device *dev, struct device_attribute *attr,
2442 char *buf)
2443{
2444 struct scsi_device *sdev = to_scsi_device(dev);
2445 struct MPT2SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
2446
2447 return snprintf(buf, PAGE_SIZE, "0x%04x\n",
2448 sas_device_priv_data->sas_target->handle);
2449}
2450static DEVICE_ATTR(sas_device_handle, S_IRUGO, _ctl_device_handle_show, NULL);
2451
2452struct device_attribute *mpt2sas_dev_attrs[] = {
2453 &dev_attr_sas_address,
2454 &dev_attr_sas_device_handle,
2455 NULL,
2456};
2457
2458static const struct file_operations ctl_fops = {
2459 .owner = THIS_MODULE,
2460 .unlocked_ioctl = _ctl_ioctl,
2461 .release = _ctl_release,
2462 .poll = _ctl_poll,
2463 .fasync = _ctl_fasync,
2464#ifdef CONFIG_COMPAT
2465 .compat_ioctl = _ctl_ioctl_compat,
2466#endif
2467};
2468
2469static struct miscdevice ctl_dev = {
2470 .minor = MPT2SAS_MINOR,
2471 .name = MPT2SAS_DEV_NAME,
2472 .fops = &ctl_fops,
2473};
2474
2475/**
2476 * mpt2sas_ctl_init - main entry point for ctl.
2477 *
2478 */
2479void
2480mpt2sas_ctl_init(void)
2481{
2482 async_queue = NULL;
2483 if (misc_register(&ctl_dev) < 0)
2484 printk(KERN_ERR "%s can't register misc device [minor=%d]\n",
2485 MPT2SAS_DRIVER_NAME, MPT2SAS_MINOR);
2486
2487 init_waitqueue_head(&ctl_poll_wait);
2488}
2489
2490/**
2491 * mpt2sas_ctl_exit - exit point for ctl
2492 *
2493 */
2494void
2495mpt2sas_ctl_exit(void)
2496{
2497 struct MPT2SAS_ADAPTER *ioc;
2498 int i;
2499
2500 list_for_each_entry(ioc, &mpt2sas_ioc_list, list) {
2501
2502 /* free memory associated to diag buffers */
2503 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
2504 if (!ioc->diag_buffer[i])
2505 continue;
2506 pci_free_consistent(ioc->pdev, ioc->diag_buffer_sz[i],
2507 ioc->diag_buffer[i], ioc->diag_buffer_dma[i]);
2508 ioc->diag_buffer[i] = NULL;
2509 ioc->diag_buffer_status[i] = 0;
2510 }
2511
2512 kfree(ioc->event_log);
2513 }
2514 misc_deregister(&ctl_dev);
2515}
2516
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.h b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
new file mode 100644
index 000000000000..dbb6c0cf8889
--- /dev/null
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
@@ -0,0 +1,416 @@
1/*
2 * Management Module Support for MPT (Message Passing Technology) based
3 * controllers
4 *
5 * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.h
6 * Copyright (C) 2007-2008 LSI Corporation
7 * (mailto:DL-MPTFusionLinux@lsi.com)
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * NO WARRANTY
20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 * solely responsible for determining the appropriateness of using and
25 * distributing the Program and assumes all risks associated with its
26 * exercise of rights under this Agreement, including but not limited to
27 * the risks and costs of program errors, damage to or loss of data,
28 * programs or equipment, and unavailability or interruption of operations.
29
30 * DISCLAIMER OF LIABILITY
31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38
39 * You should have received a copy of the GNU General Public License
40 * along with this program; if not, write to the Free Software
41 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
42 * USA.
43 */
44
45#ifndef MPT2SAS_CTL_H_INCLUDED
46#define MPT2SAS_CTL_H_INCLUDED
47
48#ifdef __KERNEL__
49#include <linux/miscdevice.h>
50#endif
51
52#define MPT2SAS_DEV_NAME "mpt2ctl"
53#define MPT2_MAGIC_NUMBER 'm'
54#define MPT2_IOCTL_DEFAULT_TIMEOUT (10) /* in seconds */
55
56/**
57 * IOCTL opcodes
58 */
59#define MPT2IOCINFO _IOWR(MPT2_MAGIC_NUMBER, 17, \
60 struct mpt2_ioctl_iocinfo)
61#define MPT2COMMAND _IOWR(MPT2_MAGIC_NUMBER, 20, \
62 struct mpt2_ioctl_command)
63#ifdef CONFIG_COMPAT
64#define MPT2COMMAND32 _IOWR(MPT2_MAGIC_NUMBER, 20, \
65 struct mpt2_ioctl_command32)
66#endif
67#define MPT2EVENTQUERY _IOWR(MPT2_MAGIC_NUMBER, 21, \
68 struct mpt2_ioctl_eventquery)
69#define MPT2EVENTENABLE _IOWR(MPT2_MAGIC_NUMBER, 22, \
70 struct mpt2_ioctl_eventenable)
71#define MPT2EVENTREPORT _IOWR(MPT2_MAGIC_NUMBER, 23, \
72 struct mpt2_ioctl_eventreport)
73#define MPT2HARDRESET _IOWR(MPT2_MAGIC_NUMBER, 24, \
74 struct mpt2_ioctl_diag_reset)
75#define MPT2BTDHMAPPING _IOWR(MPT2_MAGIC_NUMBER, 31, \
76 struct mpt2_ioctl_btdh_mapping)
77
78/* diag buffer support */
79#define MPT2DIAGREGISTER _IOWR(MPT2_MAGIC_NUMBER, 26, \
80 struct mpt2_diag_register)
81#define MPT2DIAGRELEASE _IOWR(MPT2_MAGIC_NUMBER, 27, \
82 struct mpt2_diag_release)
83#define MPT2DIAGUNREGISTER _IOWR(MPT2_MAGIC_NUMBER, 28, \
84 struct mpt2_diag_unregister)
85#define MPT2DIAGQUERY _IOWR(MPT2_MAGIC_NUMBER, 29, \
86 struct mpt2_diag_query)
87#define MPT2DIAGREADBUFFER _IOWR(MPT2_MAGIC_NUMBER, 30, \
88 struct mpt2_diag_read_buffer)
89
90/**
91 * struct mpt2_ioctl_header - main header structure
92 * @ioc_number - IOC unit number
93 * @port_number - IOC port number
94 * @max_data_size - maximum number bytes to transfer on read
95 */
96struct mpt2_ioctl_header {
97 uint32_t ioc_number;
98 uint32_t port_number;
99 uint32_t max_data_size;
100};
101
102/**
103 * struct mpt2_ioctl_diag_reset - diagnostic reset
104 * @hdr - generic header
105 */
106struct mpt2_ioctl_diag_reset {
107 struct mpt2_ioctl_header hdr;
108};
109
110
111/**
112 * struct mpt2_ioctl_pci_info - pci device info
113 * @device - pci device id
114 * @function - pci function id
115 * @bus - pci bus id
116 * @segment_id - pci segment id
117 */
118struct mpt2_ioctl_pci_info {
119 union {
120 struct {
121 uint32_t device:5;
122 uint32_t function:3;
123 uint32_t bus:24;
124 } bits;
125 uint32_t word;
126 } u;
127 uint32_t segment_id;
128};
129
130
131#define MPT2_IOCTL_INTERFACE_SCSI (0x00)
132#define MPT2_IOCTL_INTERFACE_FC (0x01)
133#define MPT2_IOCTL_INTERFACE_FC_IP (0x02)
134#define MPT2_IOCTL_INTERFACE_SAS (0x03)
135#define MPT2_IOCTL_INTERFACE_SAS2 (0x04)
136#define MPT2_IOCTL_VERSION_LENGTH (32)
137
138/**
139 * struct mpt2_ioctl_iocinfo - generic controller info
140 * @hdr - generic header
141 * @adapter_type - type of adapter (spi, fc, sas)
142 * @port_number - port number
143 * @pci_id - PCI Id
144 * @hw_rev - hardware revision
145 * @sub_system_device - PCI subsystem Device ID
146 * @sub_system_vendor - PCI subsystem Vendor ID
147 * @rsvd0 - reserved
148 * @firmware_version - firmware version
149 * @bios_version - BIOS version
150 * @driver_version - driver version - 32 ASCII characters
151 * @rsvd1 - reserved
152 * @scsi_id - scsi id of adapter 0
153 * @rsvd2 - reserved
154 * @pci_information - pci info (2nd revision)
155 */
156struct mpt2_ioctl_iocinfo {
157 struct mpt2_ioctl_header hdr;
158 uint32_t adapter_type;
159 uint32_t port_number;
160 uint32_t pci_id;
161 uint32_t hw_rev;
162 uint32_t subsystem_device;
163 uint32_t subsystem_vendor;
164 uint32_t rsvd0;
165 uint32_t firmware_version;
166 uint32_t bios_version;
167 uint8_t driver_version[MPT2_IOCTL_VERSION_LENGTH];
168 uint8_t rsvd1;
169 uint8_t scsi_id;
170 uint16_t rsvd2;
171 struct mpt2_ioctl_pci_info pci_information;
172};
173
174
175/* number of event log entries */
176#define MPT2SAS_CTL_EVENT_LOG_SIZE (50)
177
178/**
179 * struct mpt2_ioctl_eventquery - query event count and type
180 * @hdr - generic header
181 * @event_entries - number of events returned by get_event_report
182 * @rsvd - reserved
183 * @event_types - type of events currently being captured
184 */
185struct mpt2_ioctl_eventquery {
186 struct mpt2_ioctl_header hdr;
187 uint16_t event_entries;
188 uint16_t rsvd;
189 uint32_t event_types[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
190};
191
192/**
193 * struct mpt2_ioctl_eventenable - enable/disable event capturing
194 * @hdr - generic header
195 * @event_types - toggle off/on type of events to be captured
196 */
197struct mpt2_ioctl_eventenable {
198 struct mpt2_ioctl_header hdr;
199 uint32_t event_types[4];
200};
201
202#define MPT2_EVENT_DATA_SIZE (192)
203/**
204 * struct MPT2_IOCTL_EVENTS -
205 * @event - the event that was reported
206 * @context - unique value for each event assigned by driver
207 * @data - event data returned in fw reply message
208 */
209struct MPT2_IOCTL_EVENTS {
210 uint32_t event;
211 uint32_t context;
212 uint8_t data[MPT2_EVENT_DATA_SIZE];
213};
214
215/**
216 * struct mpt2_ioctl_eventreport - returing event log
217 * @hdr - generic header
218 * @event_data - (see struct MPT2_IOCTL_EVENTS)
219 */
220struct mpt2_ioctl_eventreport {
221 struct mpt2_ioctl_header hdr;
222 struct MPT2_IOCTL_EVENTS event_data[1];
223};
224
225/**
226 * struct mpt2_ioctl_command - generic mpt firmware passthru ioclt
227 * @hdr - generic header
228 * @timeout - command timeout in seconds. (if zero then use driver default
229 * value).
230 * @reply_frame_buf_ptr - reply location
231 * @data_in_buf_ptr - destination for read
232 * @data_out_buf_ptr - data source for write
233 * @sense_data_ptr - sense data location
234 * @max_reply_bytes - maximum number of reply bytes to be sent to app.
235 * @data_in_size - number bytes for data transfer in (read)
236 * @data_out_size - number bytes for data transfer out (write)
237 * @max_sense_bytes - maximum number of bytes for auto sense buffers
238 * @data_sge_offset - offset in words from the start of the request message to
239 * the first SGL
240 * @mf[1];
241 */
242struct mpt2_ioctl_command {
243 struct mpt2_ioctl_header hdr;
244 uint32_t timeout;
245 void __user *reply_frame_buf_ptr;
246 void __user *data_in_buf_ptr;
247 void __user *data_out_buf_ptr;
248 void __user *sense_data_ptr;
249 uint32_t max_reply_bytes;
250 uint32_t data_in_size;
251 uint32_t data_out_size;
252 uint32_t max_sense_bytes;
253 uint32_t data_sge_offset;
254 uint8_t mf[1];
255};
256
257#ifdef CONFIG_COMPAT
258struct mpt2_ioctl_command32 {
259 struct mpt2_ioctl_header hdr;
260 uint32_t timeout;
261 uint32_t reply_frame_buf_ptr;
262 uint32_t data_in_buf_ptr;
263 uint32_t data_out_buf_ptr;
264 uint32_t sense_data_ptr;
265 uint32_t max_reply_bytes;
266 uint32_t data_in_size;
267 uint32_t data_out_size;
268 uint32_t max_sense_bytes;
269 uint32_t data_sge_offset;
270 uint8_t mf[1];
271};
272#endif
273
274/**
275 * struct mpt2_ioctl_btdh_mapping - mapping info
276 * @hdr - generic header
277 * @id - target device identification number
278 * @bus - SCSI bus number that the target device exists on
279 * @handle - device handle for the target device
280 * @rsvd - reserved
281 *
282 * To obtain a bus/id the application sets
283 * handle to valid handle, and bus/id to 0xFFFF.
284 *
285 * To obtain the device handle the application sets
286 * bus/id valid value, and the handle to 0xFFFF.
287 */
288struct mpt2_ioctl_btdh_mapping {
289 struct mpt2_ioctl_header hdr;
290 uint32_t id;
291 uint32_t bus;
292 uint16_t handle;
293 uint16_t rsvd;
294};
295
296
297/* status bits for ioc->diag_buffer_status */
298#define MPT2_DIAG_BUFFER_IS_REGISTERED (0x01)
299#define MPT2_DIAG_BUFFER_IS_RELEASED (0x02)
300
301/* application flags for mpt2_diag_register, mpt2_diag_query */
302#define MPT2_APP_FLAGS_APP_OWNED (0x0001)
303#define MPT2_APP_FLAGS_BUFFER_VALID (0x0002)
304#define MPT2_APP_FLAGS_FW_BUFFER_ACCESS (0x0004)
305
306/* flags for mpt2_diag_read_buffer */
307#define MPT2_FLAGS_REREGISTER (0x0001)
308
309#define MPT2_PRODUCT_SPECIFIC_DWORDS 23
310
311/**
312 * struct mpt2_diag_register - application register with driver
313 * @hdr - generic header
314 * @reserved -
315 * @buffer_type - specifies either TRACE or SNAPSHOT
316 * @application_flags - misc flags
317 * @diagnostic_flags - specifies flags affecting command processing
318 * @product_specific - product specific information
319 * @requested_buffer_size - buffers size in bytes
320 * @unique_id - tag specified by application that is used to signal ownership
321 * of the buffer.
322 *
323 * This will allow the driver to setup any required buffers that will be
324 * needed by firmware to communicate with the driver.
325 */
326struct mpt2_diag_register {
327 struct mpt2_ioctl_header hdr;
328 uint8_t reserved;
329 uint8_t buffer_type;
330 uint16_t application_flags;
331 uint32_t diagnostic_flags;
332 uint32_t product_specific[MPT2_PRODUCT_SPECIFIC_DWORDS];
333 uint32_t requested_buffer_size;
334 uint32_t unique_id;
335};
336
337/**
338 * struct mpt2_diag_unregister - application unregister with driver
339 * @hdr - generic header
340 * @unique_id - tag uniquely identifies the buffer to be unregistered
341 *
342 * This will allow the driver to cleanup any memory allocated for diag
343 * messages and to free up any resources.
344 */
345struct mpt2_diag_unregister {
346 struct mpt2_ioctl_header hdr;
347 uint32_t unique_id;
348};
349
350/**
351 * struct mpt2_diag_query - query relevant info associated with diag buffers
352 * @hdr - generic header
353 * @reserved -
354 * @buffer_type - specifies either TRACE or SNAPSHOT
355 * @application_flags - misc flags
356 * @diagnostic_flags - specifies flags affecting command processing
357 * @product_specific - product specific information
358 * @total_buffer_size - diag buffer size in bytes
359 * @driver_added_buffer_size - size of extra space appended to end of buffer
360 * @unique_id - unique id associated with this buffer.
361 *
362 * The application will send only buffer_type and unique_id. Driver will
363 * inspect unique_id first, if valid, fill in all the info. If unique_id is
364 * 0x00, the driver will return info specified by Buffer Type.
365 */
366struct mpt2_diag_query {
367 struct mpt2_ioctl_header hdr;
368 uint8_t reserved;
369 uint8_t buffer_type;
370 uint16_t application_flags;
371 uint32_t diagnostic_flags;
372 uint32_t product_specific[MPT2_PRODUCT_SPECIFIC_DWORDS];
373 uint32_t total_buffer_size;
374 uint32_t driver_added_buffer_size;
375 uint32_t unique_id;
376};
377
378/**
379 * struct mpt2_diag_release - request to send Diag Release Message to firmware
380 * @hdr - generic header
381 * @unique_id - tag uniquely identifies the buffer to be released
382 *
383 * This allows ownership of the specified buffer to returned to the driver,
384 * allowing an application to read the buffer without fear that firmware is
385 * overwritting information in the buffer.
386 */
387struct mpt2_diag_release {
388 struct mpt2_ioctl_header hdr;
389 uint32_t unique_id;
390};
391
392/**
393 * struct mpt2_diag_read_buffer - request for copy of the diag buffer
394 * @hdr - generic header
395 * @status -
396 * @reserved -
397 * @flags - misc flags
398 * @starting_offset - starting offset within drivers buffer where to start
399 * reading data at into the specified application buffer
400 * @bytes_to_read - number of bytes to copy from the drivers buffer into the
401 * application buffer starting at starting_offset.
402 * @unique_id - unique id associated with this buffer.
403 * @diagnostic_data - data payload
404 */
405struct mpt2_diag_read_buffer {
406 struct mpt2_ioctl_header hdr;
407 uint8_t status;
408 uint8_t reserved;
409 uint16_t flags;
410 uint32_t starting_offset;
411 uint32_t bytes_to_read;
412 uint32_t unique_id;
413 uint32_t diagnostic_data[1];
414};
415
416#endif /* MPT2SAS_CTL_H_INCLUDED */
diff --git a/drivers/scsi/mpt2sas/mpt2sas_debug.h b/drivers/scsi/mpt2sas/mpt2sas_debug.h
new file mode 100644
index 000000000000..ad325096e842
--- /dev/null
+++ b/drivers/scsi/mpt2sas/mpt2sas_debug.h
@@ -0,0 +1,181 @@
1/*
2 * Logging Support for MPT (Message Passing Technology) based controllers
3 *
4 * This code is based on drivers/scsi/mpt2sas/mpt2_debug.c
5 * Copyright (C) 2007-2008 LSI Corporation
6 * (mailto:DL-MPTFusionLinux@lsi.com)
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * NO WARRANTY
19 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
20 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
21 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
23 * solely responsible for determining the appropriateness of using and
24 * distributing the Program and assumes all risks associated with its
25 * exercise of rights under this Agreement, including but not limited to
26 * the risks and costs of program errors, damage to or loss of data,
27 * programs or equipment, and unavailability or interruption of operations.
28
29 * DISCLAIMER OF LIABILITY
30 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
33 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
34 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
36 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
37
38 * You should have received a copy of the GNU General Public License
39 * along with this program; if not, write to the Free Software
40 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
41 * USA.
42 */
43
44#ifndef MPT2SAS_DEBUG_H_INCLUDED
45#define MPT2SAS_DEBUG_H_INCLUDED
46
47#define MPT_DEBUG 0x00000001
48#define MPT_DEBUG_MSG_FRAME 0x00000002
49#define MPT_DEBUG_SG 0x00000004
50#define MPT_DEBUG_EVENTS 0x00000008
51#define MPT_DEBUG_EVENT_WORK_TASK 0x00000010
52#define MPT_DEBUG_INIT 0x00000020
53#define MPT_DEBUG_EXIT 0x00000040
54#define MPT_DEBUG_FAIL 0x00000080
55#define MPT_DEBUG_TM 0x00000100
56#define MPT_DEBUG_REPLY 0x00000200
57#define MPT_DEBUG_HANDSHAKE 0x00000400
58#define MPT_DEBUG_CONFIG 0x00000800
59#define MPT_DEBUG_DL 0x00001000
60#define MPT_DEBUG_RESET 0x00002000
61#define MPT_DEBUG_SCSI 0x00004000
62#define MPT_DEBUG_IOCTL 0x00008000
63#define MPT_DEBUG_CSMISAS 0x00010000
64#define MPT_DEBUG_SAS 0x00020000
65#define MPT_DEBUG_TRANSPORT 0x00040000
66#define MPT_DEBUG_TASK_SET_FULL 0x00080000
67
68#define MPT_DEBUG_TARGET_MODE 0x00100000
69
70
71/*
72 * CONFIG_SCSI_MPT2SAS_LOGGING - enabled in Kconfig
73 */
74
75#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
76#define MPT_CHECK_LOGGING(IOC, CMD, BITS) \
77{ \
78 if (IOC->logging_level & BITS) \
79 CMD; \
80}
81#else
82#define MPT_CHECK_LOGGING(IOC, CMD, BITS)
83#endif /* CONFIG_SCSI_MPT2SAS_LOGGING */
84
85
86/*
87 * debug macros
88 */
89
90#define dprintk(IOC, CMD) \
91 MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG)
92
93#define dsgprintk(IOC, CMD) \
94 MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SG)
95
96#define devtprintk(IOC, CMD) \
97 MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_EVENTS)
98
99#define dewtprintk(IOC, CMD) \
100 MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_EVENT_WORK_TASK)
101
102#define dinitprintk(IOC, CMD) \
103 MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_INIT)
104
105#define dexitprintk(IOC, CMD) \
106 MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_EXIT)
107
108#define dfailprintk(IOC, CMD) \
109 MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_FAIL)
110
111#define dtmprintk(IOC, CMD) \
112 MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TM)
113
114#define dreplyprintk(IOC, CMD) \
115 MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_REPLY)
116
117#define dhsprintk(IOC, CMD) \
118 MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_HANDSHAKE)
119
120#define dcprintk(IOC, CMD) \
121 MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_CONFIG)
122
123#define ddlprintk(IOC, CMD) \
124 MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_DL)
125
126#define drsprintk(IOC, CMD) \
127 MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_RESET)
128
129#define dsprintk(IOC, CMD) \
130 MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SCSI)
131
132#define dctlprintk(IOC, CMD) \
133 MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_IOCTL)
134
135#define dcsmisasprintk(IOC, CMD) \
136 MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_CSMISAS)
137
138#define dsasprintk(IOC, CMD) \
139 MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS)
140
141#define dsastransport(IOC, CMD) \
142 MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS_WIDE)
143
144#define dmfprintk(IOC, CMD) \
145 MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_MSG_FRAME)
146
147#define dtsfprintk(IOC, CMD) \
148 MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TASK_SET_FULL)
149
150#define dtransportprintk(IOC, CMD) \
151 MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TRANSPORT)
152
153#define dTMprintk(IOC, CMD) \
154 MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TARGET_MODE)
155
156/* inline functions for dumping debug data*/
157#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
158/**
159 * _debug_dump_mf - print message frame contents
160 * @mpi_request: pointer to message frame
161 * @sz: number of dwords
162 */
163static inline void
164_debug_dump_mf(void *mpi_request, int sz)
165{
166 int i;
167 u32 *mfp = (u32 *)mpi_request;
168
169 printk(KERN_INFO "mf:\n\t");
170 for (i = 0; i < sz; i++) {
171 if (i && ((i % 8) == 0))
172 printk("\n\t");
173 printk("%08x ", le32_to_cpu(mfp[i]));
174 }
175 printk("\n");
176}
177#else
178#define _debug_dump_mf(mpi_request, sz)
179#endif /* CONFIG_SCSI_MPT2SAS_LOGGING */
180
181#endif /* MPT2SAS_DEBUG_H_INCLUDED */
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
new file mode 100644
index 000000000000..0c463c483c02
--- /dev/null
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -0,0 +1,5687 @@
1/*
2 * Scsi Host Layer for MPT (Message Passing Technology) based controllers
3 *
4 * This code is based on drivers/scsi/mpt2sas/mpt2_scsih.c
5 * Copyright (C) 2007-2008 LSI Corporation
6 * (mailto:DL-MPTFusionLinux@lsi.com)
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * NO WARRANTY
19 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
20 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
21 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
23 * solely responsible for determining the appropriateness of using and
24 * distributing the Program and assumes all risks associated with its
25 * exercise of rights under this Agreement, including but not limited to
26 * the risks and costs of program errors, damage to or loss of data,
27 * programs or equipment, and unavailability or interruption of operations.
28
29 * DISCLAIMER OF LIABILITY
30 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
33 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
34 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
36 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
37
38 * You should have received a copy of the GNU General Public License
39 * along with this program; if not, write to the Free Software
40 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
41 * USA.
42 */
43
44#include <linux/version.h>
45#include <linux/module.h>
46#include <linux/kernel.h>
47#include <linux/init.h>
48#include <linux/errno.h>
49#include <linux/blkdev.h>
50#include <linux/sched.h>
51#include <linux/workqueue.h>
52#include <linux/delay.h>
53#include <linux/pci.h>
54#include <linux/interrupt.h>
55
56#include "mpt2sas_base.h"
57
58MODULE_AUTHOR(MPT2SAS_AUTHOR);
59MODULE_DESCRIPTION(MPT2SAS_DESCRIPTION);
60MODULE_LICENSE("GPL");
61MODULE_VERSION(MPT2SAS_DRIVER_VERSION);
62
63#define RAID_CHANNEL 1
64
65/* forward proto's */
66static void _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc,
67 struct _sas_node *sas_expander);
68static void _firmware_event_work(struct work_struct *work);
69
70/* global parameters */
71LIST_HEAD(mpt2sas_ioc_list);
72
73/* local parameters */
74static u8 scsi_io_cb_idx = -1;
75static u8 tm_cb_idx = -1;
76static u8 ctl_cb_idx = -1;
77static u8 base_cb_idx = -1;
78static u8 transport_cb_idx = -1;
79static u8 config_cb_idx = -1;
80static int mpt_ids;
81
82/* command line options */
83static u32 logging_level;
84MODULE_PARM_DESC(logging_level, " bits for enabling additional logging info "
85 "(default=0)");
86
87/* scsi-mid layer global parmeter is max_report_luns, which is 511 */
88#define MPT2SAS_MAX_LUN (16895)
89static int max_lun = MPT2SAS_MAX_LUN;
90module_param(max_lun, int, 0);
91MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
92
93/**
94 * struct sense_info - common structure for obtaining sense keys
95 * @skey: sense key
96 * @asc: additional sense code
97 * @ascq: additional sense code qualifier
98 */
99struct sense_info {
100 u8 skey;
101 u8 asc;
102 u8 ascq;
103};
104
105
106#define MPT2SAS_RESCAN_AFTER_HOST_RESET (0xFFFF)
107/**
108 * struct fw_event_work - firmware event struct
109 * @list: link list framework
110 * @work: work object (ioc->fault_reset_work_q)
111 * @ioc: per adapter object
112 * @VF_ID: virtual function id
113 * @host_reset_handling: handling events during host reset
114 * @ignore: flag meaning this event has been marked to ignore
115 * @event: firmware event MPI2_EVENT_XXX defined in mpt2_ioc.h
116 * @event_data: reply event data payload follows
117 *
118 * This object stored on ioc->fw_event_list.
119 */
120struct fw_event_work {
121 struct list_head list;
122 struct delayed_work work;
123 struct MPT2SAS_ADAPTER *ioc;
124 u8 VF_ID;
125 u8 host_reset_handling;
126 u8 ignore;
127 u16 event;
128 void *event_data;
129};
130
131/**
132 * struct _scsi_io_transfer - scsi io transfer
133 * @handle: sas device handle (assigned by firmware)
134 * @is_raid: flag set for hidden raid components
135 * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE,
136 * @data_length: data transfer length
137 * @data_dma: dma pointer to data
138 * @sense: sense data
139 * @lun: lun number
140 * @cdb_length: cdb length
141 * @cdb: cdb contents
142 * @valid_reply: flag set for reply message
143 * @timeout: timeout for this command
144 * @sense_length: sense length
145 * @ioc_status: ioc status
146 * @scsi_state: scsi state
147 * @scsi_status: scsi staus
148 * @log_info: log information
149 * @transfer_length: data length transfer when there is a reply message
150 *
151 * Used for sending internal scsi commands to devices within this module.
152 * Refer to _scsi_send_scsi_io().
153 */
154struct _scsi_io_transfer {
155 u16 handle;
156 u8 is_raid;
157 enum dma_data_direction dir;
158 u32 data_length;
159 dma_addr_t data_dma;
160 u8 sense[SCSI_SENSE_BUFFERSIZE];
161 u32 lun;
162 u8 cdb_length;
163 u8 cdb[32];
164 u8 timeout;
165 u8 valid_reply;
166 /* the following bits are only valid when 'valid_reply = 1' */
167 u32 sense_length;
168 u16 ioc_status;
169 u8 scsi_state;
170 u8 scsi_status;
171 u32 log_info;
172 u32 transfer_length;
173};
174
175/*
176 * The pci device ids are defined in mpi/mpi2_cnfg.h.
177 */
178static struct pci_device_id scsih_pci_table[] = {
179 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004,
180 PCI_ANY_ID, PCI_ANY_ID },
181 /* Falcon ~ 2008*/
182 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008,
183 PCI_ANY_ID, PCI_ANY_ID },
184 /* Liberator ~ 2108 */
185 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1,
186 PCI_ANY_ID, PCI_ANY_ID },
187 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2,
188 PCI_ANY_ID, PCI_ANY_ID },
189 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3,
190 PCI_ANY_ID, PCI_ANY_ID },
191 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1,
192 PCI_ANY_ID, PCI_ANY_ID },
193 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2,
194 PCI_ANY_ID, PCI_ANY_ID },
195 {0} /* Terminating entry */
196};
197MODULE_DEVICE_TABLE(pci, scsih_pci_table);
198
199/**
200 * scsih_set_debug_level - global setting of ioc->logging_level.
201 *
202 * Note: The logging levels are defined in mpt2sas_debug.h.
203 */
204static int
205scsih_set_debug_level(const char *val, struct kernel_param *kp)
206{
207 int ret = param_set_int(val, kp);
208 struct MPT2SAS_ADAPTER *ioc;
209
210 if (ret)
211 return ret;
212
213 printk(KERN_INFO "setting logging_level(0x%08x)\n", logging_level);
214 list_for_each_entry(ioc, &mpt2sas_ioc_list, list)
215 ioc->logging_level = logging_level;
216 return 0;
217}
218module_param_call(logging_level, scsih_set_debug_level, param_get_int,
219 &logging_level, 0644);
220
221/**
222 * _scsih_srch_boot_sas_address - search based on sas_address
223 * @sas_address: sas address
224 * @boot_device: boot device object from bios page 2
225 *
226 * Returns 1 when there's a match, 0 means no match.
227 */
228static inline int
229_scsih_srch_boot_sas_address(u64 sas_address,
230 Mpi2BootDeviceSasWwid_t *boot_device)
231{
232 return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0;
233}
234
235/**
236 * _scsih_srch_boot_device_name - search based on device name
237 * @device_name: device name specified in INDENTIFY fram
238 * @boot_device: boot device object from bios page 2
239 *
240 * Returns 1 when there's a match, 0 means no match.
241 */
242static inline int
243_scsih_srch_boot_device_name(u64 device_name,
244 Mpi2BootDeviceDeviceName_t *boot_device)
245{
246 return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
247}
248
249/**
250 * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot
251 * @enclosure_logical_id: enclosure logical id
252 * @slot_number: slot number
253 * @boot_device: boot device object from bios page 2
254 *
255 * Returns 1 when there's a match, 0 means no match.
256 */
257static inline int
258_scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
259 Mpi2BootDeviceEnclosureSlot_t *boot_device)
260{
261 return (enclosure_logical_id == le64_to_cpu(boot_device->
262 EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device->
263 SlotNumber)) ? 1 : 0;
264}
265
266/**
267 * _scsih_is_boot_device - search for matching boot device.
268 * @sas_address: sas address
269 * @device_name: device name specified in INDENTIFY fram
270 * @enclosure_logical_id: enclosure logical id
271 * @slot_number: slot number
272 * @form: specifies boot device form
273 * @boot_device: boot device object from bios page 2
274 *
275 * Returns 1 when there's a match, 0 means no match.
276 */
277static int
278_scsih_is_boot_device(u64 sas_address, u64 device_name,
279 u64 enclosure_logical_id, u16 slot, u8 form,
280 Mpi2BiosPage2BootDevice_t *boot_device)
281{
282 int rc = 0;
283
284 switch (form) {
285 case MPI2_BIOSPAGE2_FORM_SAS_WWID:
286 if (!sas_address)
287 break;
288 rc = _scsih_srch_boot_sas_address(
289 sas_address, &boot_device->SasWwid);
290 break;
291 case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
292 if (!enclosure_logical_id)
293 break;
294 rc = _scsih_srch_boot_encl_slot(
295 enclosure_logical_id,
296 slot, &boot_device->EnclosureSlot);
297 break;
298 case MPI2_BIOSPAGE2_FORM_DEVICE_NAME:
299 if (!device_name)
300 break;
301 rc = _scsih_srch_boot_device_name(
302 device_name, &boot_device->DeviceName);
303 break;
304 case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
305 break;
306 }
307
308 return rc;
309}
310
311/**
312 * _scsih_determine_boot_device - determine boot device.
313 * @ioc: per adapter object
314 * @device: either sas_device or raid_device object
315 * @is_raid: [flag] 1 = raid object, 0 = sas object
316 *
317 * Determines whether this device should be first reported device to
318 * to scsi-ml or sas transport, this purpose is for persistant boot device.
319 * There are primary, alternate, and current entries in bios page 2. The order
320 * priority is primary, alternate, then current. This routine saves
321 * the corresponding device object and is_raid flag in the ioc object.
322 * The saved data to be used later in _scsih_probe_boot_devices().
323 */
324static void
325_scsih_determine_boot_device(struct MPT2SAS_ADAPTER *ioc,
326 void *device, u8 is_raid)
327{
328 struct _sas_device *sas_device;
329 struct _raid_device *raid_device;
330 u64 sas_address;
331 u64 device_name;
332 u64 enclosure_logical_id;
333 u16 slot;
334
335 /* only process this function when driver loads */
336 if (!ioc->wait_for_port_enable_to_complete)
337 return;
338
339 if (!is_raid) {
340 sas_device = device;
341 sas_address = sas_device->sas_address;
342 device_name = sas_device->device_name;
343 enclosure_logical_id = sas_device->enclosure_logical_id;
344 slot = sas_device->slot;
345 } else {
346 raid_device = device;
347 sas_address = raid_device->wwid;
348 device_name = 0;
349 enclosure_logical_id = 0;
350 slot = 0;
351 }
352
353 if (!ioc->req_boot_device.device) {
354 if (_scsih_is_boot_device(sas_address, device_name,
355 enclosure_logical_id, slot,
356 (ioc->bios_pg2.ReqBootDeviceForm &
357 MPI2_BIOSPAGE2_FORM_MASK),
358 &ioc->bios_pg2.RequestedBootDevice)) {
359 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT
360 "%s: req_boot_device(0x%016llx)\n",
361 ioc->name, __func__,
362 (unsigned long long)sas_address));
363 ioc->req_boot_device.device = device;
364 ioc->req_boot_device.is_raid = is_raid;
365 }
366 }
367
368 if (!ioc->req_alt_boot_device.device) {
369 if (_scsih_is_boot_device(sas_address, device_name,
370 enclosure_logical_id, slot,
371 (ioc->bios_pg2.ReqAltBootDeviceForm &
372 MPI2_BIOSPAGE2_FORM_MASK),
373 &ioc->bios_pg2.RequestedAltBootDevice)) {
374 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT
375 "%s: req_alt_boot_device(0x%016llx)\n",
376 ioc->name, __func__,
377 (unsigned long long)sas_address));
378 ioc->req_alt_boot_device.device = device;
379 ioc->req_alt_boot_device.is_raid = is_raid;
380 }
381 }
382
383 if (!ioc->current_boot_device.device) {
384 if (_scsih_is_boot_device(sas_address, device_name,
385 enclosure_logical_id, slot,
386 (ioc->bios_pg2.CurrentBootDeviceForm &
387 MPI2_BIOSPAGE2_FORM_MASK),
388 &ioc->bios_pg2.CurrentBootDevice)) {
389 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT
390 "%s: current_boot_device(0x%016llx)\n",
391 ioc->name, __func__,
392 (unsigned long long)sas_address));
393 ioc->current_boot_device.device = device;
394 ioc->current_boot_device.is_raid = is_raid;
395 }
396 }
397}
398
399/**
400 * mpt2sas_scsih_sas_device_find_by_sas_address - sas device search
401 * @ioc: per adapter object
402 * @sas_address: sas address
403 * Context: Calling function should acquire ioc->sas_device_lock
404 *
405 * This searches for sas_device based on sas_address, then return sas_device
406 * object.
407 */
408struct _sas_device *
409mpt2sas_scsih_sas_device_find_by_sas_address(struct MPT2SAS_ADAPTER *ioc,
410 u64 sas_address)
411{
412 struct _sas_device *sas_device, *r;
413
414 r = NULL;
415 /* check the sas_device_init_list */
416 list_for_each_entry(sas_device, &ioc->sas_device_init_list,
417 list) {
418 if (sas_device->sas_address != sas_address)
419 continue;
420 r = sas_device;
421 goto out;
422 }
423
424 /* then check the sas_device_list */
425 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
426 if (sas_device->sas_address != sas_address)
427 continue;
428 r = sas_device;
429 goto out;
430 }
431 out:
432 return r;
433}
434
435/**
436 * _scsih_sas_device_find_by_handle - sas device search
437 * @ioc: per adapter object
438 * @handle: sas device handle (assigned by firmware)
439 * Context: Calling function should acquire ioc->sas_device_lock
440 *
441 * This searches for sas_device based on sas_address, then return sas_device
442 * object.
443 */
444static struct _sas_device *
445_scsih_sas_device_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle)
446{
447 struct _sas_device *sas_device, *r;
448
449 r = NULL;
450 if (ioc->wait_for_port_enable_to_complete) {
451 list_for_each_entry(sas_device, &ioc->sas_device_init_list,
452 list) {
453 if (sas_device->handle != handle)
454 continue;
455 r = sas_device;
456 goto out;
457 }
458 } else {
459 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
460 if (sas_device->handle != handle)
461 continue;
462 r = sas_device;
463 goto out;
464 }
465 }
466
467 out:
468 return r;
469}
470
471/**
472 * _scsih_sas_device_remove - remove sas_device from list.
473 * @ioc: per adapter object
474 * @sas_device: the sas_device object
475 * Context: This function will acquire ioc->sas_device_lock.
476 *
477 * Removing object and freeing associated memory from the ioc->sas_device_list.
478 */
479static void
480_scsih_sas_device_remove(struct MPT2SAS_ADAPTER *ioc,
481 struct _sas_device *sas_device)
482{
483 unsigned long flags;
484
485 spin_lock_irqsave(&ioc->sas_device_lock, flags);
486 list_del(&sas_device->list);
487 memset(sas_device, 0, sizeof(struct _sas_device));
488 kfree(sas_device);
489 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
490}
491
492/**
493 * _scsih_sas_device_add - insert sas_device to the list.
494 * @ioc: per adapter object
495 * @sas_device: the sas_device object
496 * Context: This function will acquire ioc->sas_device_lock.
497 *
498 * Adding new object to the ioc->sas_device_list.
499 */
500static void
501_scsih_sas_device_add(struct MPT2SAS_ADAPTER *ioc,
502 struct _sas_device *sas_device)
503{
504 unsigned long flags;
505 u16 handle, parent_handle;
506 u64 sas_address;
507
508 dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: handle"
509 "(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__,
510 sas_device->handle, (unsigned long long)sas_device->sas_address));
511
512 spin_lock_irqsave(&ioc->sas_device_lock, flags);
513 list_add_tail(&sas_device->list, &ioc->sas_device_list);
514 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
515
516 handle = sas_device->handle;
517 parent_handle = sas_device->parent_handle;
518 sas_address = sas_device->sas_address;
519 if (!mpt2sas_transport_port_add(ioc, handle, parent_handle)) {
520 _scsih_sas_device_remove(ioc, sas_device);
521 } else if (!sas_device->starget) {
522 mpt2sas_transport_port_remove(ioc, sas_address, parent_handle);
523 _scsih_sas_device_remove(ioc, sas_device);
524 }
525}
526
527/**
528 * _scsih_sas_device_init_add - insert sas_device to the list.
529 * @ioc: per adapter object
530 * @sas_device: the sas_device object
531 * Context: This function will acquire ioc->sas_device_lock.
532 *
533 * Adding new object at driver load time to the ioc->sas_device_init_list.
534 */
535static void
536_scsih_sas_device_init_add(struct MPT2SAS_ADAPTER *ioc,
537 struct _sas_device *sas_device)
538{
539 unsigned long flags;
540
541 dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: handle"
542 "(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__,
543 sas_device->handle, (unsigned long long)sas_device->sas_address));
544
545 spin_lock_irqsave(&ioc->sas_device_lock, flags);
546 list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
547 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
548 _scsih_determine_boot_device(ioc, sas_device, 0);
549}
550
551/**
552 * mpt2sas_scsih_expander_find_by_handle - expander device search
553 * @ioc: per adapter object
554 * @handle: expander handle (assigned by firmware)
555 * Context: Calling function should acquire ioc->sas_device_lock
556 *
557 * This searches for expander device based on handle, then returns the
558 * sas_node object.
559 */
560struct _sas_node *
561mpt2sas_scsih_expander_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle)
562{
563 struct _sas_node *sas_expander, *r;
564
565 r = NULL;
566 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
567 if (sas_expander->handle != handle)
568 continue;
569 r = sas_expander;
570 goto out;
571 }
572 out:
573 return r;
574}
575
576/**
577 * _scsih_raid_device_find_by_id - raid device search
578 * @ioc: per adapter object
579 * @id: sas device target id
580 * @channel: sas device channel
581 * Context: Calling function should acquire ioc->raid_device_lock
582 *
583 * This searches for raid_device based on target id, then return raid_device
584 * object.
585 */
586static struct _raid_device *
587_scsih_raid_device_find_by_id(struct MPT2SAS_ADAPTER *ioc, int id, int channel)
588{
589 struct _raid_device *raid_device, *r;
590
591 r = NULL;
592 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
593 if (raid_device->id == id && raid_device->channel == channel) {
594 r = raid_device;
595 goto out;
596 }
597 }
598
599 out:
600 return r;
601}
602
603/**
604 * _scsih_raid_device_find_by_handle - raid device search
605 * @ioc: per adapter object
606 * @handle: sas device handle (assigned by firmware)
607 * Context: Calling function should acquire ioc->raid_device_lock
608 *
609 * This searches for raid_device based on handle, then return raid_device
610 * object.
611 */
612static struct _raid_device *
613_scsih_raid_device_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle)
614{
615 struct _raid_device *raid_device, *r;
616
617 r = NULL;
618 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
619 if (raid_device->handle != handle)
620 continue;
621 r = raid_device;
622 goto out;
623 }
624
625 out:
626 return r;
627}
628
629/**
630 * _scsih_raid_device_find_by_wwid - raid device search
631 * @ioc: per adapter object
632 * @handle: sas device handle (assigned by firmware)
633 * Context: Calling function should acquire ioc->raid_device_lock
634 *
635 * This searches for raid_device based on wwid, then return raid_device
636 * object.
637 */
638static struct _raid_device *
639_scsih_raid_device_find_by_wwid(struct MPT2SAS_ADAPTER *ioc, u64 wwid)
640{
641 struct _raid_device *raid_device, *r;
642
643 r = NULL;
644 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
645 if (raid_device->wwid != wwid)
646 continue;
647 r = raid_device;
648 goto out;
649 }
650
651 out:
652 return r;
653}
654
655/**
656 * _scsih_raid_device_add - add raid_device object
657 * @ioc: per adapter object
658 * @raid_device: raid_device object
659 *
660 * This is added to the raid_device_list link list.
661 */
662static void
663_scsih_raid_device_add(struct MPT2SAS_ADAPTER *ioc,
664 struct _raid_device *raid_device)
665{
666 unsigned long flags;
667
668 dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: handle"
669 "(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
670 raid_device->handle, (unsigned long long)raid_device->wwid));
671
672 spin_lock_irqsave(&ioc->raid_device_lock, flags);
673 list_add_tail(&raid_device->list, &ioc->raid_device_list);
674 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
675}
676
677/**
678 * _scsih_raid_device_remove - delete raid_device object
679 * @ioc: per adapter object
680 * @raid_device: raid_device object
681 *
682 * This is removed from the raid_device_list link list.
683 */
684static void
685_scsih_raid_device_remove(struct MPT2SAS_ADAPTER *ioc,
686 struct _raid_device *raid_device)
687{
688 unsigned long flags;
689
690 spin_lock_irqsave(&ioc->raid_device_lock, flags);
691 list_del(&raid_device->list);
692 memset(raid_device, 0, sizeof(struct _raid_device));
693 kfree(raid_device);
694 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
695}
696
697/**
698 * mpt2sas_scsih_expander_find_by_sas_address - expander device search
699 * @ioc: per adapter object
700 * @sas_address: sas address
701 * Context: Calling function should acquire ioc->sas_node_lock.
702 *
703 * This searches for expander device based on sas_address, then returns the
704 * sas_node object.
705 */
706struct _sas_node *
707mpt2sas_scsih_expander_find_by_sas_address(struct MPT2SAS_ADAPTER *ioc,
708 u64 sas_address)
709{
710 struct _sas_node *sas_expander, *r;
711
712 r = NULL;
713 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
714 if (sas_expander->sas_address != sas_address)
715 continue;
716 r = sas_expander;
717 goto out;
718 }
719 out:
720 return r;
721}
722
723/**
724 * _scsih_expander_node_add - insert expander device to the list.
725 * @ioc: per adapter object
726 * @sas_expander: the sas_device object
727 * Context: This function will acquire ioc->sas_node_lock.
728 *
729 * Adding new object to the ioc->sas_expander_list.
730 *
731 * Return nothing.
732 */
733static void
734_scsih_expander_node_add(struct MPT2SAS_ADAPTER *ioc,
735 struct _sas_node *sas_expander)
736{
737 unsigned long flags;
738
739 spin_lock_irqsave(&ioc->sas_node_lock, flags);
740 list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
741 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
742}
743
744/**
745 * _scsih_is_end_device - determines if device is an end device
746 * @device_info: bitfield providing information about the device.
747 * Context: none
748 *
749 * Returns 1 if end device.
750 */
751static int
752_scsih_is_end_device(u32 device_info)
753{
754 if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE &&
755 ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
756 (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
757 (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))
758 return 1;
759 else
760 return 0;
761}
762
763/**
764 * _scsih_scsi_lookup_get - returns scmd entry
765 * @ioc: per adapter object
766 * @smid: system request message index
767 * Context: This function will acquire ioc->scsi_lookup_lock.
768 *
769 * Returns the smid stored scmd pointer.
770 */
771static struct scsi_cmnd *
772_scsih_scsi_lookup_get(struct MPT2SAS_ADAPTER *ioc, u16 smid)
773{
774 unsigned long flags;
775 struct scsi_cmnd *scmd;
776
777 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
778 scmd = ioc->scsi_lookup[smid - 1].scmd;
779 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
780 return scmd;
781}
782
783/**
784 * mptscsih_getclear_scsi_lookup - returns scmd entry
785 * @ioc: per adapter object
786 * @smid: system request message index
787 * Context: This function will acquire ioc->scsi_lookup_lock.
788 *
789 * Returns the smid stored scmd pointer, as well as clearing the scmd pointer.
790 */
791static struct scsi_cmnd *
792_scsih_scsi_lookup_getclear(struct MPT2SAS_ADAPTER *ioc, u16 smid)
793{
794 unsigned long flags;
795 struct scsi_cmnd *scmd;
796
797 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
798 scmd = ioc->scsi_lookup[smid - 1].scmd;
799 ioc->scsi_lookup[smid - 1].scmd = NULL;
800 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
801 return scmd;
802}
803
804/**
805 * _scsih_scsi_lookup_set - updates scmd entry in lookup
806 * @ioc: per adapter object
807 * @smid: system request message index
808 * @scmd: pointer to scsi command object
809 * Context: This function will acquire ioc->scsi_lookup_lock.
810 *
811 * This will save scmd pointer in the scsi_lookup array.
812 *
813 * Return nothing.
814 */
815static void
816_scsih_scsi_lookup_set(struct MPT2SAS_ADAPTER *ioc, u16 smid,
817 struct scsi_cmnd *scmd)
818{
819 unsigned long flags;
820
821 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
822 ioc->scsi_lookup[smid - 1].scmd = scmd;
823 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
824}
825
826/**
827 * _scsih_scsi_lookup_find_by_scmd - scmd lookup
828 * @ioc: per adapter object
829 * @smid: system request message index
830 * @scmd: pointer to scsi command object
831 * Context: This function will acquire ioc->scsi_lookup_lock.
832 *
833 * This will search for a scmd pointer in the scsi_lookup array,
834 * returning the revelent smid. A returned value of zero means invalid.
835 */
836static u16
837_scsih_scsi_lookup_find_by_scmd(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd
838 *scmd)
839{
840 u16 smid;
841 unsigned long flags;
842 int i;
843
844 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
845 smid = 0;
846 for (i = 0; i < ioc->request_depth; i++) {
847 if (ioc->scsi_lookup[i].scmd == scmd) {
848 smid = i + 1;
849 goto out;
850 }
851 }
852 out:
853 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
854 return smid;
855}
856
857/**
858 * _scsih_scsi_lookup_find_by_target - search for matching channel:id
859 * @ioc: per adapter object
860 * @id: target id
861 * @channel: channel
862 * Context: This function will acquire ioc->scsi_lookup_lock.
863 *
864 * This will search for a matching channel:id in the scsi_lookup array,
865 * returning 1 if found.
866 */
867static u8
868_scsih_scsi_lookup_find_by_target(struct MPT2SAS_ADAPTER *ioc, int id,
869 int channel)
870{
871 u8 found;
872 unsigned long flags;
873 int i;
874
875 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
876 found = 0;
877 for (i = 0 ; i < ioc->request_depth; i++) {
878 if (ioc->scsi_lookup[i].scmd &&
879 (ioc->scsi_lookup[i].scmd->device->id == id &&
880 ioc->scsi_lookup[i].scmd->device->channel == channel)) {
881 found = 1;
882 goto out;
883 }
884 }
885 out:
886 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
887 return found;
888}
889
890/**
891 * _scsih_get_chain_buffer_dma - obtain block of chains (dma address)
892 * @ioc: per adapter object
893 * @smid: system request message index
894 *
895 * Returns phys pointer to chain buffer.
896 */
897static dma_addr_t
898_scsih_get_chain_buffer_dma(struct MPT2SAS_ADAPTER *ioc, u16 smid)
899{
900 return ioc->chain_dma + ((smid - 1) * (ioc->request_sz *
901 ioc->chains_needed_per_io));
902}
903
904/**
905 * _scsih_get_chain_buffer - obtain block of chains assigned to a mf request
906 * @ioc: per adapter object
907 * @smid: system request message index
908 *
909 * Returns virt pointer to chain buffer.
910 */
911static void *
912_scsih_get_chain_buffer(struct MPT2SAS_ADAPTER *ioc, u16 smid)
913{
914 return (void *)(ioc->chain + ((smid - 1) * (ioc->request_sz *
915 ioc->chains_needed_per_io)));
916}
917
918/**
919 * _scsih_build_scatter_gather - main sg creation routine
920 * @ioc: per adapter object
921 * @scmd: scsi command
922 * @smid: system request message index
923 * Context: none.
924 *
925 * The main routine that builds scatter gather table from a given
926 * scsi request sent via the .queuecommand main handler.
927 *
928 * Returns 0 success, anything else error
929 */
930static int
931_scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc,
932 struct scsi_cmnd *scmd, u16 smid)
933{
934 Mpi2SCSIIORequest_t *mpi_request;
935 dma_addr_t chain_dma;
936 struct scatterlist *sg_scmd;
937 void *sg_local, *chain;
938 u32 chain_offset;
939 u32 chain_length;
940 u32 chain_flags;
941 u32 sges_left;
942 u32 sges_in_segment;
943 u32 sgl_flags;
944 u32 sgl_flags_last_element;
945 u32 sgl_flags_end_buffer;
946
947 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
948
949 /* init scatter gather flags */
950 sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT;
951 if (scmd->sc_data_direction == DMA_TO_DEVICE)
952 sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
953 sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT)
954 << MPI2_SGE_FLAGS_SHIFT;
955 sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT |
956 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST)
957 << MPI2_SGE_FLAGS_SHIFT;
958 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
959
960 sg_scmd = scsi_sglist(scmd);
961 sges_left = scsi_dma_map(scmd);
962 if (!sges_left) {
963 sdev_printk(KERN_ERR, scmd->device, "pci_map_sg"
964 " failed: request for %d bytes!\n", scsi_bufflen(scmd));
965 return -ENOMEM;
966 }
967
968 sg_local = &mpi_request->SGL;
969 sges_in_segment = ioc->max_sges_in_main_message;
970 if (sges_left <= sges_in_segment)
971 goto fill_in_last_segment;
972
973 mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) +
974 (sges_in_segment * ioc->sge_size))/4;
975
976 /* fill in main message segment when there is a chain following */
977 while (sges_in_segment) {
978 if (sges_in_segment == 1)
979 ioc->base_add_sg_single(sg_local,
980 sgl_flags_last_element | sg_dma_len(sg_scmd),
981 sg_dma_address(sg_scmd));
982 else
983 ioc->base_add_sg_single(sg_local, sgl_flags |
984 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
985 sg_scmd = sg_next(sg_scmd);
986 sg_local += ioc->sge_size;
987 sges_left--;
988 sges_in_segment--;
989 }
990
991 /* initializing the chain flags and pointers */
992 chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
993 chain = _scsih_get_chain_buffer(ioc, smid);
994 chain_dma = _scsih_get_chain_buffer_dma(ioc, smid);
995 do {
996 sges_in_segment = (sges_left <=
997 ioc->max_sges_in_chain_message) ? sges_left :
998 ioc->max_sges_in_chain_message;
999 chain_offset = (sges_left == sges_in_segment) ?
1000 0 : (sges_in_segment * ioc->sge_size)/4;
1001 chain_length = sges_in_segment * ioc->sge_size;
1002 if (chain_offset) {
1003 chain_offset = chain_offset <<
1004 MPI2_SGE_CHAIN_OFFSET_SHIFT;
1005 chain_length += ioc->sge_size;
1006 }
1007 ioc->base_add_sg_single(sg_local, chain_flags | chain_offset |
1008 chain_length, chain_dma);
1009 sg_local = chain;
1010 if (!chain_offset)
1011 goto fill_in_last_segment;
1012
1013 /* fill in chain segments */
1014 while (sges_in_segment) {
1015 if (sges_in_segment == 1)
1016 ioc->base_add_sg_single(sg_local,
1017 sgl_flags_last_element |
1018 sg_dma_len(sg_scmd),
1019 sg_dma_address(sg_scmd));
1020 else
1021 ioc->base_add_sg_single(sg_local, sgl_flags |
1022 sg_dma_len(sg_scmd),
1023 sg_dma_address(sg_scmd));
1024 sg_scmd = sg_next(sg_scmd);
1025 sg_local += ioc->sge_size;
1026 sges_left--;
1027 sges_in_segment--;
1028 }
1029
1030 chain_dma += ioc->request_sz;
1031 chain += ioc->request_sz;
1032 } while (1);
1033
1034
1035 fill_in_last_segment:
1036
1037 /* fill the last segment */
1038 while (sges_left) {
1039 if (sges_left == 1)
1040 ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer |
1041 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1042 else
1043 ioc->base_add_sg_single(sg_local, sgl_flags |
1044 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1045 sg_scmd = sg_next(sg_scmd);
1046 sg_local += ioc->sge_size;
1047 sges_left--;
1048 }
1049
1050 return 0;
1051}
1052
1053/**
1054 * scsih_change_queue_depth - setting device queue depth
1055 * @sdev: scsi device struct
1056 * @qdepth: requested queue depth
1057 *
1058 * Returns queue depth.
1059 */
1060static int
1061scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1062{
1063 struct Scsi_Host *shost = sdev->host;
1064 int max_depth;
1065 int tag_type;
1066
1067 max_depth = shost->can_queue;
1068 if (!sdev->tagged_supported)
1069 max_depth = 1;
1070 if (qdepth > max_depth)
1071 qdepth = max_depth;
1072 tag_type = (qdepth == 1) ? 0 : MSG_SIMPLE_TAG;
1073 scsi_adjust_queue_depth(sdev, tag_type, qdepth);
1074
1075 if (sdev->inquiry_len > 7)
1076 sdev_printk(KERN_INFO, sdev, "qdepth(%d), tagged(%d), "
1077 "simple(%d), ordered(%d), scsi_level(%d), cmd_que(%d)\n",
1078 sdev->queue_depth, sdev->tagged_supported, sdev->simple_tags,
1079 sdev->ordered_tags, sdev->scsi_level,
1080 (sdev->inquiry[7] & 2) >> 1);
1081
1082 return sdev->queue_depth;
1083}
1084
1085/**
1086 * scsih_change_queue_depth - changing device queue tag type
1087 * @sdev: scsi device struct
1088 * @tag_type: requested tag type
1089 *
1090 * Returns queue tag type.
1091 */
1092static int
1093scsih_change_queue_type(struct scsi_device *sdev, int tag_type)
1094{
1095 if (sdev->tagged_supported) {
1096 scsi_set_tag_type(sdev, tag_type);
1097 if (tag_type)
1098 scsi_activate_tcq(sdev, sdev->queue_depth);
1099 else
1100 scsi_deactivate_tcq(sdev, sdev->queue_depth);
1101 } else
1102 tag_type = 0;
1103
1104 return tag_type;
1105}
1106
1107/**
1108 * scsih_target_alloc - target add routine
1109 * @starget: scsi target struct
1110 *
1111 * Returns 0 if ok. Any other return is assumed to be an error and
1112 * the device is ignored.
1113 */
1114static int
1115scsih_target_alloc(struct scsi_target *starget)
1116{
1117 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1118 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
1119 struct MPT2SAS_TARGET *sas_target_priv_data;
1120 struct _sas_device *sas_device;
1121 struct _raid_device *raid_device;
1122 unsigned long flags;
1123 struct sas_rphy *rphy;
1124
1125 sas_target_priv_data = kzalloc(sizeof(struct scsi_target), GFP_KERNEL);
1126 if (!sas_target_priv_data)
1127 return -ENOMEM;
1128
1129 starget->hostdata = sas_target_priv_data;
1130 sas_target_priv_data->starget = starget;
1131 sas_target_priv_data->handle = MPT2SAS_INVALID_DEVICE_HANDLE;
1132
1133 /* RAID volumes */
1134 if (starget->channel == RAID_CHANNEL) {
1135 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1136 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1137 starget->channel);
1138 if (raid_device) {
1139 sas_target_priv_data->handle = raid_device->handle;
1140 sas_target_priv_data->sas_address = raid_device->wwid;
1141 sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
1142 raid_device->starget = starget;
1143 }
1144 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1145 return 0;
1146 }
1147
1148 /* sas/sata devices */
1149 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1150 rphy = dev_to_rphy(starget->dev.parent);
1151 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
1152 rphy->identify.sas_address);
1153
1154 if (sas_device) {
1155 sas_target_priv_data->handle = sas_device->handle;
1156 sas_target_priv_data->sas_address = sas_device->sas_address;
1157 sas_device->starget = starget;
1158 sas_device->id = starget->id;
1159 sas_device->channel = starget->channel;
1160 if (sas_device->hidden_raid_component)
1161 sas_target_priv_data->flags |=
1162 MPT_TARGET_FLAGS_RAID_COMPONENT;
1163 }
1164 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1165
1166 return 0;
1167}
1168
1169/**
1170 * scsih_target_destroy - target destroy routine
1171 * @starget: scsi target struct
1172 *
1173 * Returns nothing.
1174 */
1175static void
1176scsih_target_destroy(struct scsi_target *starget)
1177{
1178 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1179 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
1180 struct MPT2SAS_TARGET *sas_target_priv_data;
1181 struct _sas_device *sas_device;
1182 struct _raid_device *raid_device;
1183 unsigned long flags;
1184 struct sas_rphy *rphy;
1185
1186 sas_target_priv_data = starget->hostdata;
1187 if (!sas_target_priv_data)
1188 return;
1189
1190 if (starget->channel == RAID_CHANNEL) {
1191 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1192 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1193 starget->channel);
1194 if (raid_device) {
1195 raid_device->starget = NULL;
1196 raid_device->sdev = NULL;
1197 }
1198 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1199 goto out;
1200 }
1201
1202 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1203 rphy = dev_to_rphy(starget->dev.parent);
1204 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
1205 rphy->identify.sas_address);
1206 if (sas_device)
1207 sas_device->starget = NULL;
1208
1209 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1210
1211 out:
1212 kfree(sas_target_priv_data);
1213 starget->hostdata = NULL;
1214}
1215
1216/**
1217 * scsih_slave_alloc - device add routine
1218 * @sdev: scsi device struct
1219 *
1220 * Returns 0 if ok. Any other return is assumed to be an error and
1221 * the device is ignored.
1222 */
1223static int
1224scsih_slave_alloc(struct scsi_device *sdev)
1225{
1226 struct Scsi_Host *shost;
1227 struct MPT2SAS_ADAPTER *ioc;
1228 struct MPT2SAS_TARGET *sas_target_priv_data;
1229 struct MPT2SAS_DEVICE *sas_device_priv_data;
1230 struct scsi_target *starget;
1231 struct _raid_device *raid_device;
1232 struct _sas_device *sas_device;
1233 unsigned long flags;
1234
1235 sas_device_priv_data = kzalloc(sizeof(struct scsi_device), GFP_KERNEL);
1236 if (!sas_device_priv_data)
1237 return -ENOMEM;
1238
1239 sas_device_priv_data->lun = sdev->lun;
1240 sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT;
1241
1242 starget = scsi_target(sdev);
1243 sas_target_priv_data = starget->hostdata;
1244 sas_target_priv_data->num_luns++;
1245 sas_device_priv_data->sas_target = sas_target_priv_data;
1246 sdev->hostdata = sas_device_priv_data;
1247 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT))
1248 sdev->no_uld_attach = 1;
1249
1250 shost = dev_to_shost(&starget->dev);
1251 ioc = shost_priv(shost);
1252 if (starget->channel == RAID_CHANNEL) {
1253 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1254 raid_device = _scsih_raid_device_find_by_id(ioc,
1255 starget->id, starget->channel);
1256 if (raid_device)
1257 raid_device->sdev = sdev; /* raid is single lun */
1258 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1259 } else {
1260 /* set TLR bit for SSP devices */
1261 if (!(ioc->facts.IOCCapabilities &
1262 MPI2_IOCFACTS_CAPABILITY_TLR))
1263 goto out;
1264 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1265 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
1266 sas_device_priv_data->sas_target->sas_address);
1267 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1268 if (sas_device && sas_device->device_info &
1269 MPI2_SAS_DEVICE_INFO_SSP_TARGET)
1270 sas_device_priv_data->flags |= MPT_DEVICE_TLR_ON;
1271 }
1272
1273 out:
1274 return 0;
1275}
1276
1277/**
1278 * scsih_slave_destroy - device destroy routine
1279 * @sdev: scsi device struct
1280 *
1281 * Returns nothing.
1282 */
1283static void
1284scsih_slave_destroy(struct scsi_device *sdev)
1285{
1286 struct MPT2SAS_TARGET *sas_target_priv_data;
1287 struct scsi_target *starget;
1288
1289 if (!sdev->hostdata)
1290 return;
1291
1292 starget = scsi_target(sdev);
1293 sas_target_priv_data = starget->hostdata;
1294 sas_target_priv_data->num_luns--;
1295 kfree(sdev->hostdata);
1296 sdev->hostdata = NULL;
1297}
1298
1299/**
1300 * scsih_display_sata_capabilities - sata capabilities
1301 * @ioc: per adapter object
1302 * @sas_device: the sas_device object
1303 * @sdev: scsi device struct
1304 */
1305static void
1306scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc,
1307 struct _sas_device *sas_device, struct scsi_device *sdev)
1308{
1309 Mpi2ConfigReply_t mpi_reply;
1310 Mpi2SasDevicePage0_t sas_device_pg0;
1311 u32 ioc_status;
1312 u16 flags;
1313 u32 device_info;
1314
1315 if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
1316 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, sas_device->handle))) {
1317 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1318 ioc->name, __FILE__, __LINE__, __func__);
1319 return;
1320 }
1321
1322 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1323 MPI2_IOCSTATUS_MASK;
1324 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
1325 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1326 ioc->name, __FILE__, __LINE__, __func__);
1327 return;
1328 }
1329
1330 flags = le16_to_cpu(sas_device_pg0.Flags);
1331 device_info = le16_to_cpu(sas_device_pg0.DeviceInfo);
1332
1333 sdev_printk(KERN_INFO, sdev,
1334 "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
1335 "sw_preserve(%s)\n",
1336 (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n",
1337 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n",
1338 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" :
1339 "n",
1340 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n",
1341 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n",
1342 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
1343}
1344
1345/**
1346 * _scsih_get_volume_capabilities - volume capabilities
1347 * @ioc: per adapter object
1348 * @sas_device: the raid_device object
1349 */
1350static void
1351_scsih_get_volume_capabilities(struct MPT2SAS_ADAPTER *ioc,
1352 struct _raid_device *raid_device)
1353{
1354 Mpi2RaidVolPage0_t *vol_pg0;
1355 Mpi2RaidPhysDiskPage0_t pd_pg0;
1356 Mpi2SasDevicePage0_t sas_device_pg0;
1357 Mpi2ConfigReply_t mpi_reply;
1358 u16 sz;
1359 u8 num_pds;
1360
1361 if ((mpt2sas_config_get_number_pds(ioc, raid_device->handle,
1362 &num_pds)) || !num_pds) {
1363 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1364 ioc->name, __FILE__, __LINE__, __func__);
1365 return;
1366 }
1367
1368 raid_device->num_pds = num_pds;
1369 sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds *
1370 sizeof(Mpi2RaidVol0PhysDisk_t));
1371 vol_pg0 = kzalloc(sz, GFP_KERNEL);
1372 if (!vol_pg0) {
1373 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1374 ioc->name, __FILE__, __LINE__, __func__);
1375 return;
1376 }
1377
1378 if ((mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
1379 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
1380 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1381 ioc->name, __FILE__, __LINE__, __func__);
1382 kfree(vol_pg0);
1383 return;
1384 }
1385
1386 raid_device->volume_type = vol_pg0->VolumeType;
1387
1388 /* figure out what the underlying devices are by
1389 * obtaining the device_info bits for the 1st device
1390 */
1391 if (!(mpt2sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
1392 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
1393 vol_pg0->PhysDisk[0].PhysDiskNum))) {
1394 if (!(mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply,
1395 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
1396 le16_to_cpu(pd_pg0.DevHandle)))) {
1397 raid_device->device_info =
1398 le32_to_cpu(sas_device_pg0.DeviceInfo);
1399 }
1400 }
1401
1402 kfree(vol_pg0);
1403}
1404
1405/**
1406 * scsih_slave_configure - device configure routine.
1407 * @sdev: scsi device struct
1408 *
1409 * Returns 0 if ok. Any other return is assumed to be an error and
1410 * the device is ignored.
1411 */
1412static int
1413scsih_slave_configure(struct scsi_device *sdev)
1414{
1415 struct Scsi_Host *shost = sdev->host;
1416 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
1417 struct MPT2SAS_DEVICE *sas_device_priv_data;
1418 struct MPT2SAS_TARGET *sas_target_priv_data;
1419 struct _sas_device *sas_device;
1420 struct _raid_device *raid_device;
1421 unsigned long flags;
1422 int qdepth;
1423 u8 ssp_target = 0;
1424 char *ds = "";
1425 char *r_level = "";
1426
1427 qdepth = 1;
1428 sas_device_priv_data = sdev->hostdata;
1429 sas_device_priv_data->configured_lun = 1;
1430 sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
1431 sas_target_priv_data = sas_device_priv_data->sas_target;
1432
1433 /* raid volume handling */
1434 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
1435
1436 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1437 raid_device = _scsih_raid_device_find_by_handle(ioc,
1438 sas_target_priv_data->handle);
1439 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1440 if (!raid_device) {
1441 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1442 ioc->name, __FILE__, __LINE__, __func__);
1443 return 0;
1444 }
1445
1446 _scsih_get_volume_capabilities(ioc, raid_device);
1447
1448 /* RAID Queue Depth Support
1449 * IS volume = underlying qdepth of drive type, either
1450 * MPT2SAS_SAS_QUEUE_DEPTH or MPT2SAS_SATA_QUEUE_DEPTH
1451 * IM/IME/R10 = 128 (MPT2SAS_RAID_QUEUE_DEPTH)
1452 */
1453 if (raid_device->device_info &
1454 MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
1455 qdepth = MPT2SAS_SAS_QUEUE_DEPTH;
1456 ds = "SSP";
1457 } else {
1458 qdepth = MPT2SAS_SATA_QUEUE_DEPTH;
1459 if (raid_device->device_info &
1460 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
1461 ds = "SATA";
1462 else
1463 ds = "STP";
1464 }
1465
1466 switch (raid_device->volume_type) {
1467 case MPI2_RAID_VOL_TYPE_RAID0:
1468 r_level = "RAID0";
1469 break;
1470 case MPI2_RAID_VOL_TYPE_RAID1E:
1471 qdepth = MPT2SAS_RAID_QUEUE_DEPTH;
1472 r_level = "RAID1E";
1473 break;
1474 case MPI2_RAID_VOL_TYPE_RAID1:
1475 qdepth = MPT2SAS_RAID_QUEUE_DEPTH;
1476 r_level = "RAID1";
1477 break;
1478 case MPI2_RAID_VOL_TYPE_RAID10:
1479 qdepth = MPT2SAS_RAID_QUEUE_DEPTH;
1480 r_level = "RAID10";
1481 break;
1482 case MPI2_RAID_VOL_TYPE_UNKNOWN:
1483 default:
1484 qdepth = MPT2SAS_RAID_QUEUE_DEPTH;
1485 r_level = "RAIDX";
1486 break;
1487 }
1488
1489 sdev_printk(KERN_INFO, sdev, "%s: "
1490 "handle(0x%04x), wwid(0x%016llx), pd_count(%d), type(%s)\n",
1491 r_level, raid_device->handle,
1492 (unsigned long long)raid_device->wwid,
1493 raid_device->num_pds, ds);
1494 scsih_change_queue_depth(sdev, qdepth);
1495 return 0;
1496 }
1497
1498 /* non-raid handling */
1499 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1500 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
1501 sas_device_priv_data->sas_target->sas_address);
1502 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1503 if (sas_device) {
1504 if (sas_target_priv_data->flags &
1505 MPT_TARGET_FLAGS_RAID_COMPONENT) {
1506 mpt2sas_config_get_volume_handle(ioc,
1507 sas_device->handle, &sas_device->volume_handle);
1508 mpt2sas_config_get_volume_wwid(ioc,
1509 sas_device->volume_handle,
1510 &sas_device->volume_wwid);
1511 }
1512 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
1513 qdepth = MPT2SAS_SAS_QUEUE_DEPTH;
1514 ssp_target = 1;
1515 ds = "SSP";
1516 } else {
1517 qdepth = MPT2SAS_SATA_QUEUE_DEPTH;
1518 if (sas_device->device_info &
1519 MPI2_SAS_DEVICE_INFO_STP_TARGET)
1520 ds = "STP";
1521 else if (sas_device->device_info &
1522 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
1523 ds = "SATA";
1524 }
1525
1526 sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), "
1527 "sas_addr(0x%016llx), device_name(0x%016llx)\n",
1528 ds, sas_device->handle,
1529 (unsigned long long)sas_device->sas_address,
1530 (unsigned long long)sas_device->device_name);
1531 sdev_printk(KERN_INFO, sdev, "%s: "
1532 "enclosure_logical_id(0x%016llx), slot(%d)\n", ds,
1533 (unsigned long long) sas_device->enclosure_logical_id,
1534 sas_device->slot);
1535
1536 if (!ssp_target)
1537 scsih_display_sata_capabilities(ioc, sas_device, sdev);
1538 }
1539
1540 scsih_change_queue_depth(sdev, qdepth);
1541
1542 if (ssp_target)
1543 sas_read_port_mode_page(sdev);
1544 return 0;
1545}
1546
1547/**
1548 * scsih_bios_param - fetch head, sector, cylinder info for a disk
1549 * @sdev: scsi device struct
1550 * @bdev: pointer to block device context
1551 * @capacity: device size (in 512 byte sectors)
1552 * @params: three element array to place output:
1553 * params[0] number of heads (max 255)
1554 * params[1] number of sectors (max 63)
1555 * params[2] number of cylinders
1556 *
1557 * Return nothing.
1558 */
1559static int
1560scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
1561 sector_t capacity, int params[])
1562{
1563 int heads;
1564 int sectors;
1565 sector_t cylinders;
1566 ulong dummy;
1567
1568 heads = 64;
1569 sectors = 32;
1570
1571 dummy = heads * sectors;
1572 cylinders = capacity;
1573 sector_div(cylinders, dummy);
1574
1575 /*
1576 * Handle extended translation size for logical drives
1577 * > 1Gb
1578 */
1579 if ((ulong)capacity >= 0x200000) {
1580 heads = 255;
1581 sectors = 63;
1582 dummy = heads * sectors;
1583 cylinders = capacity;
1584 sector_div(cylinders, dummy);
1585 }
1586
1587 /* return result */
1588 params[0] = heads;
1589 params[1] = sectors;
1590 params[2] = cylinders;
1591
1592 return 0;
1593}
1594
1595/**
1596 * _scsih_response_code - translation of device response code
1597 * @ioc: per adapter object
1598 * @response_code: response code returned by the device
1599 *
1600 * Return nothing.
1601 */
1602static void
1603_scsih_response_code(struct MPT2SAS_ADAPTER *ioc, u8 response_code)
1604{
1605 char *desc;
1606
1607 switch (response_code) {
1608 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1609 desc = "task management request completed";
1610 break;
1611 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1612 desc = "invalid frame";
1613 break;
1614 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1615 desc = "task management request not supported";
1616 break;
1617 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1618 desc = "task management request failed";
1619 break;
1620 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1621 desc = "task management request succeeded";
1622 break;
1623 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1624 desc = "invalid lun";
1625 break;
1626 case 0xA:
1627 desc = "overlapped tag attempted";
1628 break;
1629 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1630 desc = "task queued, however not sent to target";
1631 break;
1632 default:
1633 desc = "unknown";
1634 break;
1635 }
1636 printk(MPT2SAS_WARN_FMT "response_code(0x%01x): %s\n",
1637 ioc->name, response_code, desc);
1638}
1639
1640/**
1641 * scsih_tm_done - tm completion routine
1642 * @ioc: per adapter object
1643 * @smid: system request message index
1644 * @VF_ID: virtual function id
1645 * @reply: reply message frame(lower 32bit addr)
1646 * Context: none.
1647 *
1648 * The callback handler when using scsih_issue_tm.
1649 *
1650 * Return nothing.
1651 */
1652static void
1653scsih_tm_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
1654{
1655 MPI2DefaultReply_t *mpi_reply;
1656
1657 if (ioc->tm_cmds.status == MPT2_CMD_NOT_USED)
1658 return;
1659 if (ioc->tm_cmds.smid != smid)
1660 return;
1661 ioc->tm_cmds.status |= MPT2_CMD_COMPLETE;
1662 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
1663 if (mpi_reply) {
1664 memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
1665 ioc->tm_cmds.status |= MPT2_CMD_REPLY_VALID;
1666 }
1667 ioc->tm_cmds.status &= ~MPT2_CMD_PENDING;
1668 complete(&ioc->tm_cmds.done);
1669}
1670
1671/**
1672 * mpt2sas_scsih_set_tm_flag - set per target tm_busy
1673 * @ioc: per adapter object
1674 * @handle: device handle
1675 *
1676 * During taskmangement request, we need to freeze the device queue.
1677 */
1678void
1679mpt2sas_scsih_set_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle)
1680{
1681 struct MPT2SAS_DEVICE *sas_device_priv_data;
1682 struct scsi_device *sdev;
1683 u8 skip = 0;
1684
1685 shost_for_each_device(sdev, ioc->shost) {
1686 if (skip)
1687 continue;
1688 sas_device_priv_data = sdev->hostdata;
1689 if (!sas_device_priv_data)
1690 continue;
1691 if (sas_device_priv_data->sas_target->handle == handle) {
1692 sas_device_priv_data->sas_target->tm_busy = 1;
1693 skip = 1;
1694 ioc->ignore_loginfos = 1;
1695 }
1696 }
1697}
1698
1699/**
1700 * mpt2sas_scsih_clear_tm_flag - clear per target tm_busy
1701 * @ioc: per adapter object
1702 * @handle: device handle
1703 *
1704 * During taskmangement request, we need to freeze the device queue.
1705 */
1706void
1707mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle)
1708{
1709 struct MPT2SAS_DEVICE *sas_device_priv_data;
1710 struct scsi_device *sdev;
1711 u8 skip = 0;
1712
1713 shost_for_each_device(sdev, ioc->shost) {
1714 if (skip)
1715 continue;
1716 sas_device_priv_data = sdev->hostdata;
1717 if (!sas_device_priv_data)
1718 continue;
1719 if (sas_device_priv_data->sas_target->handle == handle) {
1720 sas_device_priv_data->sas_target->tm_busy = 0;
1721 skip = 1;
1722 ioc->ignore_loginfos = 0;
1723 }
1724 }
1725}
1726
1727/**
1728 * mpt2sas_scsih_issue_tm - main routine for sending tm requests
1729 * @ioc: per adapter struct
1730 * @device_handle: device handle
1731 * @lun: lun number
1732 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
1733 * @smid_task: smid assigned to the task
1734 * @timeout: timeout in seconds
1735 * Context: The calling function needs to acquire the tm_cmds.mutex
1736 *
1737 * A generic API for sending task management requests to firmware.
1738 *
1739 * The ioc->tm_cmds.status flag should be MPT2_CMD_NOT_USED before calling
1740 * this API.
1741 *
1742 * The callback index is set inside `ioc->tm_cb_idx`.
1743 *
1744 * Return nothing.
1745 */
1746void
1747mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun,
1748 u8 type, u16 smid_task, ulong timeout)
1749{
1750 Mpi2SCSITaskManagementRequest_t *mpi_request;
1751 Mpi2SCSITaskManagementReply_t *mpi_reply;
1752 u16 smid = 0;
1753 u32 ioc_state;
1754 unsigned long timeleft;
1755 u8 VF_ID = 0;
1756 unsigned long flags;
1757
1758 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
1759 if (ioc->tm_cmds.status != MPT2_CMD_NOT_USED ||
1760 ioc->shost_recovery) {
1761 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
1762 printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
1763 __func__, ioc->name);
1764 return;
1765 }
1766 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
1767
1768 ioc_state = mpt2sas_base_get_iocstate(ioc, 0);
1769 if (ioc_state & MPI2_DOORBELL_USED) {
1770 dhsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "unexpected doorbell "
1771 "active!\n", ioc->name));
1772 goto issue_host_reset;
1773 }
1774
1775 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
1776 mpt2sas_base_fault_info(ioc, ioc_state &
1777 MPI2_DOORBELL_DATA_MASK);
1778 goto issue_host_reset;
1779 }
1780
1781 smid = mpt2sas_base_get_smid(ioc, ioc->tm_cb_idx);
1782 if (!smid) {
1783 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
1784 ioc->name, __func__);
1785 return;
1786 }
1787
1788 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "sending tm: handle(0x%04x),"
1789 " task_type(0x%02x), smid(%d)\n", ioc->name, handle, type, smid));
1790 ioc->tm_cmds.status = MPT2_CMD_PENDING;
1791 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
1792 ioc->tm_cmds.smid = smid;
1793 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
1794 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1795 mpi_request->DevHandle = cpu_to_le16(handle);
1796 mpi_request->TaskType = type;
1797 mpi_request->TaskMID = cpu_to_le16(smid_task);
1798 int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
1799 mpt2sas_scsih_set_tm_flag(ioc, handle);
1800 mpt2sas_base_put_smid_hi_priority(ioc, smid, VF_ID);
1801 timeleft = wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
1802 mpt2sas_scsih_clear_tm_flag(ioc, handle);
1803 if (!(ioc->tm_cmds.status & MPT2_CMD_COMPLETE)) {
1804 printk(MPT2SAS_ERR_FMT "%s: timeout\n",
1805 ioc->name, __func__);
1806 _debug_dump_mf(mpi_request,
1807 sizeof(Mpi2SCSITaskManagementRequest_t)/4);
1808 if (!(ioc->tm_cmds.status & MPT2_CMD_RESET))
1809 goto issue_host_reset;
1810 }
1811
1812 if (ioc->tm_cmds.status & MPT2_CMD_REPLY_VALID) {
1813 mpi_reply = ioc->tm_cmds.reply;
1814 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "complete tm: "
1815 "ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
1816 ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
1817 le32_to_cpu(mpi_reply->IOCLogInfo),
1818 le32_to_cpu(mpi_reply->TerminationCount)));
1819 if (ioc->logging_level & MPT_DEBUG_TM)
1820 _scsih_response_code(ioc, mpi_reply->ResponseCode);
1821 }
1822 return;
1823 issue_host_reset:
1824 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, FORCE_BIG_HAMMER);
1825}
1826
1827/**
1828 * scsih_abort - eh threads main abort routine
1829 * @sdev: scsi device struct
1830 *
1831 * Returns SUCCESS if command aborted else FAILED
1832 */
1833static int
1834scsih_abort(struct scsi_cmnd *scmd)
1835{
1836 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
1837 struct MPT2SAS_DEVICE *sas_device_priv_data;
1838 u16 smid;
1839 u16 handle;
1840 int r;
1841 struct scsi_cmnd *scmd_lookup;
1842
1843 printk(MPT2SAS_INFO_FMT "attempting task abort! scmd(%p)\n",
1844 ioc->name, scmd);
1845 scsi_print_command(scmd);
1846
1847 sas_device_priv_data = scmd->device->hostdata;
1848 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
1849 printk(MPT2SAS_INFO_FMT "device been deleted! scmd(%p)\n",
1850 ioc->name, scmd);
1851 scmd->result = DID_NO_CONNECT << 16;
1852 scmd->scsi_done(scmd);
1853 r = SUCCESS;
1854 goto out;
1855 }
1856
1857 /* search for the command */
1858 smid = _scsih_scsi_lookup_find_by_scmd(ioc, scmd);
1859 if (!smid) {
1860 scmd->result = DID_RESET << 16;
1861 r = SUCCESS;
1862 goto out;
1863 }
1864
1865 /* for hidden raid components and volumes this is not supported */
1866 if (sas_device_priv_data->sas_target->flags &
1867 MPT_TARGET_FLAGS_RAID_COMPONENT ||
1868 sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) {
1869 scmd->result = DID_RESET << 16;
1870 r = FAILED;
1871 goto out;
1872 }
1873
1874 mutex_lock(&ioc->tm_cmds.mutex);
1875 handle = sas_device_priv_data->sas_target->handle;
1876 mpt2sas_scsih_issue_tm(ioc, handle, sas_device_priv_data->lun,
1877 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30);
1878
1879 /* sanity check - see whether command actually completed */
1880 scmd_lookup = _scsih_scsi_lookup_get(ioc, smid);
1881 if (scmd_lookup && (scmd_lookup->serial_number == scmd->serial_number))
1882 r = FAILED;
1883 else
1884 r = SUCCESS;
1885 ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
1886 mutex_unlock(&ioc->tm_cmds.mutex);
1887
1888 out:
1889 printk(MPT2SAS_INFO_FMT "task abort: %s scmd(%p)\n",
1890 ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
1891 return r;
1892}
1893
1894
1895/**
1896 * scsih_dev_reset - eh threads main device reset routine
1897 * @sdev: scsi device struct
1898 *
1899 * Returns SUCCESS if command aborted else FAILED
1900 */
1901static int
1902scsih_dev_reset(struct scsi_cmnd *scmd)
1903{
1904 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
1905 struct MPT2SAS_DEVICE *sas_device_priv_data;
1906 struct _sas_device *sas_device;
1907 unsigned long flags;
1908 u16 handle;
1909 int r;
1910
1911 printk(MPT2SAS_INFO_FMT "attempting target reset! scmd(%p)\n",
1912 ioc->name, scmd);
1913 scsi_print_command(scmd);
1914
1915 sas_device_priv_data = scmd->device->hostdata;
1916 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
1917 printk(MPT2SAS_INFO_FMT "device been deleted! scmd(%p)\n",
1918 ioc->name, scmd);
1919 scmd->result = DID_NO_CONNECT << 16;
1920 scmd->scsi_done(scmd);
1921 r = SUCCESS;
1922 goto out;
1923 }
1924
1925 /* for hidden raid components obtain the volume_handle */
1926 handle = 0;
1927 if (sas_device_priv_data->sas_target->flags &
1928 MPT_TARGET_FLAGS_RAID_COMPONENT) {
1929 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1930 sas_device = _scsih_sas_device_find_by_handle(ioc,
1931 sas_device_priv_data->sas_target->handle);
1932 if (sas_device)
1933 handle = sas_device->volume_handle;
1934 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1935 } else
1936 handle = sas_device_priv_data->sas_target->handle;
1937
1938 if (!handle) {
1939 scmd->result = DID_RESET << 16;
1940 r = FAILED;
1941 goto out;
1942 }
1943
1944 mutex_lock(&ioc->tm_cmds.mutex);
1945 mpt2sas_scsih_issue_tm(ioc, handle, 0,
1946 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30);
1947
1948 /*
1949 * sanity check see whether all commands to this target been
1950 * completed
1951 */
1952 if (_scsih_scsi_lookup_find_by_target(ioc, scmd->device->id,
1953 scmd->device->channel))
1954 r = FAILED;
1955 else
1956 r = SUCCESS;
1957 ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
1958 mutex_unlock(&ioc->tm_cmds.mutex);
1959
1960 out:
1961 printk(MPT2SAS_INFO_FMT "target reset: %s scmd(%p)\n",
1962 ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
1963 return r;
1964}
1965
1966/**
1967 * scsih_abort - eh threads main host reset routine
1968 * @sdev: scsi device struct
1969 *
1970 * Returns SUCCESS if command aborted else FAILED
1971 */
1972static int
1973scsih_host_reset(struct scsi_cmnd *scmd)
1974{
1975 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
1976 int r, retval;
1977
1978 printk(MPT2SAS_INFO_FMT "attempting host reset! scmd(%p)\n",
1979 ioc->name, scmd);
1980 scsi_print_command(scmd);
1981
1982 retval = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
1983 FORCE_BIG_HAMMER);
1984 r = (retval < 0) ? FAILED : SUCCESS;
1985 printk(MPT2SAS_INFO_FMT "host reset: %s scmd(%p)\n",
1986 ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
1987
1988 return r;
1989}
1990
1991/**
1992 * _scsih_fw_event_add - insert and queue up fw_event
1993 * @ioc: per adapter object
1994 * @fw_event: object describing the event
1995 * Context: This function will acquire ioc->fw_event_lock.
1996 *
1997 * This adds the firmware event object into link list, then queues it up to
1998 * be processed from user context.
1999 *
2000 * Return nothing.
2001 */
2002static void
2003_scsih_fw_event_add(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
2004{
2005 unsigned long flags;
2006
2007 if (ioc->firmware_event_thread == NULL)
2008 return;
2009
2010 spin_lock_irqsave(&ioc->fw_event_lock, flags);
2011 list_add_tail(&fw_event->list, &ioc->fw_event_list);
2012 INIT_DELAYED_WORK(&fw_event->work, _firmware_event_work);
2013 queue_delayed_work(ioc->firmware_event_thread, &fw_event->work, 1);
2014 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
2015}
2016
2017/**
2018 * _scsih_fw_event_free - delete fw_event
2019 * @ioc: per adapter object
2020 * @fw_event: object describing the event
2021 * Context: This function will acquire ioc->fw_event_lock.
2022 *
2023 * This removes firmware event object from link list, frees associated memory.
2024 *
2025 * Return nothing.
2026 */
2027static void
2028_scsih_fw_event_free(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work
2029 *fw_event)
2030{
2031 unsigned long flags;
2032
2033 spin_lock_irqsave(&ioc->fw_event_lock, flags);
2034 list_del(&fw_event->list);
2035 kfree(fw_event->event_data);
2036 kfree(fw_event);
2037 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
2038}
2039
2040/**
2041 * _scsih_fw_event_add - requeue an event
2042 * @ioc: per adapter object
2043 * @fw_event: object describing the event
2044 * Context: This function will acquire ioc->fw_event_lock.
2045 *
2046 * Return nothing.
2047 */
2048static void
2049_scsih_fw_event_requeue(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work
2050 *fw_event, unsigned long delay)
2051{
2052 unsigned long flags;
2053 if (ioc->firmware_event_thread == NULL)
2054 return;
2055
2056 spin_lock_irqsave(&ioc->fw_event_lock, flags);
2057 queue_delayed_work(ioc->firmware_event_thread, &fw_event->work, delay);
2058 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
2059}
2060
2061/**
2062 * _scsih_fw_event_off - turn flag off preventing event handling
2063 * @ioc: per adapter object
2064 *
2065 * Used to prevent handling of firmware events during adapter reset
2066 * driver unload.
2067 *
2068 * Return nothing.
2069 */
2070static void
2071_scsih_fw_event_off(struct MPT2SAS_ADAPTER *ioc)
2072{
2073 unsigned long flags;
2074
2075 spin_lock_irqsave(&ioc->fw_event_lock, flags);
2076 ioc->fw_events_off = 1;
2077 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
2078
2079}
2080
2081/**
2082 * _scsih_fw_event_on - turn flag on allowing firmware event handling
2083 * @ioc: per adapter object
2084 *
2085 * Returns nothing.
2086 */
2087static void
2088_scsih_fw_event_on(struct MPT2SAS_ADAPTER *ioc)
2089{
2090 unsigned long flags;
2091
2092 spin_lock_irqsave(&ioc->fw_event_lock, flags);
2093 ioc->fw_events_off = 0;
2094 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
2095}
2096
2097/**
2098 * _scsih_ublock_io_device - set the device state to SDEV_RUNNING
2099 * @ioc: per adapter object
2100 * @handle: device handle
2101 *
2102 * During device pull we need to appropiately set the sdev state.
2103 */
2104static void
2105_scsih_ublock_io_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
2106{
2107 struct MPT2SAS_DEVICE *sas_device_priv_data;
2108 struct scsi_device *sdev;
2109
2110 shost_for_each_device(sdev, ioc->shost) {
2111 sas_device_priv_data = sdev->hostdata;
2112 if (!sas_device_priv_data)
2113 continue;
2114 if (!sas_device_priv_data->block)
2115 continue;
2116 if (sas_device_priv_data->sas_target->handle == handle) {
2117 dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
2118 MPT2SAS_INFO_FMT "SDEV_RUNNING: "
2119 "handle(0x%04x)\n", ioc->name, handle));
2120 sas_device_priv_data->block = 0;
2121 scsi_device_set_state(sdev, SDEV_RUNNING);
2122 }
2123 }
2124}
2125
2126/**
2127 * _scsih_block_io_device - set the device state to SDEV_BLOCK
2128 * @ioc: per adapter object
2129 * @handle: device handle
2130 *
2131 * During device pull we need to appropiately set the sdev state.
2132 */
2133static void
2134_scsih_block_io_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
2135{
2136 struct MPT2SAS_DEVICE *sas_device_priv_data;
2137 struct scsi_device *sdev;
2138
2139 shost_for_each_device(sdev, ioc->shost) {
2140 sas_device_priv_data = sdev->hostdata;
2141 if (!sas_device_priv_data)
2142 continue;
2143 if (sas_device_priv_data->block)
2144 continue;
2145 if (sas_device_priv_data->sas_target->handle == handle) {
2146 dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
2147 MPT2SAS_INFO_FMT "SDEV_BLOCK: "
2148 "handle(0x%04x)\n", ioc->name, handle));
2149 sas_device_priv_data->block = 1;
2150 scsi_device_set_state(sdev, SDEV_BLOCK);
2151 }
2152 }
2153}
2154
2155/**
2156 * _scsih_block_io_to_children_attached_to_ex
2157 * @ioc: per adapter object
2158 * @sas_expander: the sas_device object
2159 *
2160 * This routine set sdev state to SDEV_BLOCK for all devices
2161 * attached to this expander. This function called when expander is
2162 * pulled.
2163 */
2164static void
2165_scsih_block_io_to_children_attached_to_ex(struct MPT2SAS_ADAPTER *ioc,
2166 struct _sas_node *sas_expander)
2167{
2168 struct _sas_port *mpt2sas_port;
2169 struct _sas_device *sas_device;
2170 struct _sas_node *expander_sibling;
2171 unsigned long flags;
2172
2173 if (!sas_expander)
2174 return;
2175
2176 list_for_each_entry(mpt2sas_port,
2177 &sas_expander->sas_port_list, port_list) {
2178 if (mpt2sas_port->remote_identify.device_type ==
2179 SAS_END_DEVICE) {
2180 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2181 sas_device =
2182 mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
2183 mpt2sas_port->remote_identify.sas_address);
2184 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2185 if (!sas_device)
2186 continue;
2187 _scsih_block_io_device(ioc, sas_device->handle);
2188 }
2189 }
2190
2191 list_for_each_entry(mpt2sas_port,
2192 &sas_expander->sas_port_list, port_list) {
2193
2194 if (mpt2sas_port->remote_identify.device_type ==
2195 MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER ||
2196 mpt2sas_port->remote_identify.device_type ==
2197 MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER) {
2198
2199 spin_lock_irqsave(&ioc->sas_node_lock, flags);
2200 expander_sibling =
2201 mpt2sas_scsih_expander_find_by_sas_address(
2202 ioc, mpt2sas_port->remote_identify.sas_address);
2203 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
2204 _scsih_block_io_to_children_attached_to_ex(ioc,
2205 expander_sibling);
2206 }
2207 }
2208}
2209
2210/**
2211 * _scsih_block_io_to_children_attached_directly
2212 * @ioc: per adapter object
2213 * @event_data: topology change event data
2214 *
2215 * This routine set sdev state to SDEV_BLOCK for all devices
2216 * direct attached during device pull.
2217 */
2218static void
2219_scsih_block_io_to_children_attached_directly(struct MPT2SAS_ADAPTER *ioc,
2220 Mpi2EventDataSasTopologyChangeList_t *event_data)
2221{
2222 int i;
2223 u16 handle;
2224 u16 reason_code;
2225 u8 phy_number;
2226
2227 for (i = 0; i < event_data->NumEntries; i++) {
2228 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
2229 if (!handle)
2230 continue;
2231 phy_number = event_data->StartPhyNum + i;
2232 reason_code = event_data->PHY[i].PhyStatus &
2233 MPI2_EVENT_SAS_TOPO_RC_MASK;
2234 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
2235 _scsih_block_io_device(ioc, handle);
2236 }
2237}
2238
2239/**
2240 * _scsih_check_topo_delete_events - sanity check on topo events
2241 * @ioc: per adapter object
2242 * @event_data: the event data payload
2243 *
2244 * This routine added to better handle cable breaker.
2245 *
2246 * This handles the case where driver recieves multiple expander
2247 * add and delete events in a single shot. When there is a delete event
2248 * the routine will void any pending add events waiting in the event queue.
2249 *
2250 * Return nothing.
2251 */
2252static void
2253_scsih_check_topo_delete_events(struct MPT2SAS_ADAPTER *ioc,
2254 Mpi2EventDataSasTopologyChangeList_t *event_data)
2255{
2256 struct fw_event_work *fw_event;
2257 Mpi2EventDataSasTopologyChangeList_t *local_event_data;
2258 u16 expander_handle;
2259 struct _sas_node *sas_expander;
2260 unsigned long flags;
2261
2262 expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
2263 if (expander_handle < ioc->sas_hba.num_phys) {
2264 _scsih_block_io_to_children_attached_directly(ioc, event_data);
2265 return;
2266 }
2267
2268 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING
2269 || event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING) {
2270 spin_lock_irqsave(&ioc->sas_node_lock, flags);
2271 sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc,
2272 expander_handle);
2273 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
2274 _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
2275 } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
2276 _scsih_block_io_to_children_attached_directly(ioc, event_data);
2277
2278 if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
2279 return;
2280
2281 /* mark ignore flag for pending events */
2282 spin_lock_irqsave(&ioc->fw_event_lock, flags);
2283 list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
2284 if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
2285 fw_event->ignore)
2286 continue;
2287 local_event_data = fw_event->event_data;
2288 if (local_event_data->ExpStatus ==
2289 MPI2_EVENT_SAS_TOPO_ES_ADDED ||
2290 local_event_data->ExpStatus ==
2291 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
2292 if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
2293 expander_handle) {
2294 dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT
2295 "setting ignoring flag\n", ioc->name));
2296 fw_event->ignore = 1;
2297 }
2298 }
2299 }
2300 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
2301}
2302
2303/**
2304 * _scsih_queue_rescan - queue a topology rescan from user context
2305 * @ioc: per adapter object
2306 *
2307 * Return nothing.
2308 */
2309static void
2310_scsih_queue_rescan(struct MPT2SAS_ADAPTER *ioc)
2311{
2312 struct fw_event_work *fw_event;
2313
2314 if (ioc->wait_for_port_enable_to_complete)
2315 return;
2316 fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
2317 if (!fw_event)
2318 return;
2319 fw_event->event = MPT2SAS_RESCAN_AFTER_HOST_RESET;
2320 fw_event->ioc = ioc;
2321 _scsih_fw_event_add(ioc, fw_event);
2322}
2323
2324/**
2325 * _scsih_flush_running_cmds - completing outstanding commands.
2326 * @ioc: per adapter object
2327 *
2328 * The flushing out of all pending scmd commands following host reset,
2329 * where all IO is dropped to the floor.
2330 *
2331 * Return nothing.
2332 */
2333static void
2334_scsih_flush_running_cmds(struct MPT2SAS_ADAPTER *ioc)
2335{
2336 struct scsi_cmnd *scmd;
2337 u16 smid;
2338 u16 count = 0;
2339
2340 for (smid = 1; smid <= ioc->request_depth; smid++) {
2341 scmd = _scsih_scsi_lookup_getclear(ioc, smid);
2342 if (!scmd)
2343 continue;
2344 count++;
2345 mpt2sas_base_free_smid(ioc, smid);
2346 scsi_dma_unmap(scmd);
2347 scmd->result = DID_RESET << 16;
2348 scmd->scsi_done(scmd);
2349 }
2350 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "completing %d cmds\n",
2351 ioc->name, count));
2352}
2353
2354/**
2355 * mpt2sas_scsih_reset_handler - reset callback handler (for scsih)
2356 * @ioc: per adapter object
2357 * @reset_phase: phase
2358 *
2359 * The handler for doing any required cleanup or initialization.
2360 *
2361 * The reset phase can be MPT2_IOC_PRE_RESET, MPT2_IOC_AFTER_RESET,
2362 * MPT2_IOC_DONE_RESET
2363 *
2364 * Return nothing.
2365 */
2366void
2367mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
2368{
2369 switch (reset_phase) {
2370 case MPT2_IOC_PRE_RESET:
2371 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
2372 "MPT2_IOC_PRE_RESET\n", ioc->name, __func__));
2373 _scsih_fw_event_off(ioc);
2374 break;
2375 case MPT2_IOC_AFTER_RESET:
2376 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
2377 "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__));
2378 if (ioc->tm_cmds.status & MPT2_CMD_PENDING) {
2379 ioc->tm_cmds.status |= MPT2_CMD_RESET;
2380 mpt2sas_base_free_smid(ioc, ioc->tm_cmds.smid);
2381 complete(&ioc->tm_cmds.done);
2382 }
2383 _scsih_fw_event_on(ioc);
2384 _scsih_flush_running_cmds(ioc);
2385 break;
2386 case MPT2_IOC_DONE_RESET:
2387 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
2388 "MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
2389 _scsih_queue_rescan(ioc);
2390 break;
2391 }
2392}
2393
2394/**
2395 * scsih_qcmd - main scsi request entry point
2396 * @scmd: pointer to scsi command object
2397 * @done: function pointer to be invoked on completion
2398 *
2399 * The callback index is set inside `ioc->scsi_io_cb_idx`.
2400 *
2401 * Returns 0 on success. If there's a failure, return either:
2402 * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
2403 * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
2404 */
2405static int
2406scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
2407{
2408 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
2409 struct MPT2SAS_DEVICE *sas_device_priv_data;
2410 struct MPT2SAS_TARGET *sas_target_priv_data;
2411 Mpi2SCSIIORequest_t *mpi_request;
2412 u32 mpi_control;
2413 u16 smid;
2414 unsigned long flags;
2415
2416 scmd->scsi_done = done;
2417 sas_device_priv_data = scmd->device->hostdata;
2418 if (!sas_device_priv_data) {
2419 scmd->result = DID_NO_CONNECT << 16;
2420 scmd->scsi_done(scmd);
2421 return 0;
2422 }
2423
2424 sas_target_priv_data = sas_device_priv_data->sas_target;
2425 if (!sas_target_priv_data || sas_target_priv_data->handle ==
2426 MPT2SAS_INVALID_DEVICE_HANDLE || sas_target_priv_data->deleted) {
2427 scmd->result = DID_NO_CONNECT << 16;
2428 scmd->scsi_done(scmd);
2429 return 0;
2430 }
2431
2432 /* see if we are busy with task managment stuff */
2433 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
2434 if (sas_target_priv_data->tm_busy ||
2435 ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
2436 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
2437 return SCSI_MLQUEUE_HOST_BUSY;
2438 }
2439 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
2440
2441 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2442 mpi_control = MPI2_SCSIIO_CONTROL_READ;
2443 else if (scmd->sc_data_direction == DMA_TO_DEVICE)
2444 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
2445 else
2446 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
2447
2448 /* set tags */
2449 if (!(sas_device_priv_data->flags & MPT_DEVICE_FLAGS_INIT)) {
2450 if (scmd->device->tagged_supported) {
2451 if (scmd->device->ordered_tags)
2452 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
2453 else
2454 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
2455 } else
2456/* MPI Revision I (UNIT = 0xA) - removed MPI2_SCSIIO_CONTROL_UNTAGGED */
2457/* mpi_control |= MPI2_SCSIIO_CONTROL_UNTAGGED;
2458 */
2459 mpi_control |= (0x500);
2460
2461 } else
2462 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
2463
2464 if ((sas_device_priv_data->flags & MPT_DEVICE_TLR_ON))
2465 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
2466
2467 smid = mpt2sas_base_get_smid(ioc, ioc->scsi_io_cb_idx);
2468 if (!smid) {
2469 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
2470 ioc->name, __func__);
2471 goto out;
2472 }
2473 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
2474 memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t));
2475 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
2476 if (sas_device_priv_data->sas_target->flags &
2477 MPT_TARGET_FLAGS_RAID_COMPONENT)
2478 mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
2479 else
2480 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
2481 mpi_request->DevHandle =
2482 cpu_to_le16(sas_device_priv_data->sas_target->handle);
2483 mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
2484 mpi_request->Control = cpu_to_le32(mpi_control);
2485 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len);
2486 mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
2487 mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
2488 mpi_request->SenseBufferLowAddress =
2489 (u32)mpt2sas_base_get_sense_buffer_dma(ioc, smid);
2490 mpi_request->SGLOffset0 = offsetof(Mpi2SCSIIORequest_t, SGL) / 4;
2491 mpi_request->SGLFlags = cpu_to_le16(MPI2_SCSIIO_SGLFLAGS_TYPE_MPI +
2492 MPI2_SCSIIO_SGLFLAGS_SYSTEM_ADDR);
2493
2494 int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
2495 mpi_request->LUN);
2496 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
2497
2498 if (!mpi_request->DataLength) {
2499 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request->SGL);
2500 } else {
2501 if (_scsih_build_scatter_gather(ioc, scmd, smid)) {
2502 mpt2sas_base_free_smid(ioc, smid);
2503 goto out;
2504 }
2505 }
2506
2507 _scsih_scsi_lookup_set(ioc, smid, scmd);
2508 mpt2sas_base_put_smid_scsi_io(ioc, smid, 0,
2509 sas_device_priv_data->sas_target->handle);
2510 return 0;
2511
2512 out:
2513 return SCSI_MLQUEUE_HOST_BUSY;
2514}
2515
2516/**
2517 * _scsih_normalize_sense - normalize descriptor and fixed format sense data
2518 * @sense_buffer: sense data returned by target
2519 * @data: normalized skey/asc/ascq
2520 *
2521 * Return nothing.
2522 */
2523static void
2524_scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
2525{
2526 if ((sense_buffer[0] & 0x7F) >= 0x72) {
2527 /* descriptor format */
2528 data->skey = sense_buffer[1] & 0x0F;
2529 data->asc = sense_buffer[2];
2530 data->ascq = sense_buffer[3];
2531 } else {
2532 /* fixed format */
2533 data->skey = sense_buffer[2] & 0x0F;
2534 data->asc = sense_buffer[12];
2535 data->ascq = sense_buffer[13];
2536 }
2537}
2538
2539#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
2540/**
2541 * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request
2542 * @ioc: per adapter object
2543 * @scmd: pointer to scsi command object
2544 * @mpi_reply: reply mf payload returned from firmware
2545 *
2546 * scsi_status - SCSI Status code returned from target device
2547 * scsi_state - state info associated with SCSI_IO determined by ioc
2548 * ioc_status - ioc supplied status info
2549 *
2550 * Return nothing.
2551 */
2552static void
2553_scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
2554 Mpi2SCSIIOReply_t *mpi_reply, u16 smid)
2555{
2556 u32 response_info;
2557 u8 *response_bytes;
2558 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
2559 MPI2_IOCSTATUS_MASK;
2560 u8 scsi_state = mpi_reply->SCSIState;
2561 u8 scsi_status = mpi_reply->SCSIStatus;
2562 char *desc_ioc_state = NULL;
2563 char *desc_scsi_status = NULL;
2564 char *desc_scsi_state = ioc->tmp_string;
2565
2566 switch (ioc_status) {
2567 case MPI2_IOCSTATUS_SUCCESS:
2568 desc_ioc_state = "success";
2569 break;
2570 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2571 desc_ioc_state = "invalid function";
2572 break;
2573 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2574 desc_ioc_state = "scsi recovered error";
2575 break;
2576 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2577 desc_ioc_state = "scsi invalid dev handle";
2578 break;
2579 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2580 desc_ioc_state = "scsi device not there";
2581 break;
2582 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2583 desc_ioc_state = "scsi data overrun";
2584 break;
2585 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2586 desc_ioc_state = "scsi data underrun";
2587 break;
2588 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2589 desc_ioc_state = "scsi io data error";
2590 break;
2591 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2592 desc_ioc_state = "scsi protocol error";
2593 break;
2594 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2595 desc_ioc_state = "scsi task terminated";
2596 break;
2597 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2598 desc_ioc_state = "scsi residual mismatch";
2599 break;
2600 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2601 desc_ioc_state = "scsi task mgmt failed";
2602 break;
2603 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2604 desc_ioc_state = "scsi ioc terminated";
2605 break;
2606 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2607 desc_ioc_state = "scsi ext terminated";
2608 break;
2609 default:
2610 desc_ioc_state = "unknown";
2611 break;
2612 }
2613
2614 switch (scsi_status) {
2615 case MPI2_SCSI_STATUS_GOOD:
2616 desc_scsi_status = "good";
2617 break;
2618 case MPI2_SCSI_STATUS_CHECK_CONDITION:
2619 desc_scsi_status = "check condition";
2620 break;
2621 case MPI2_SCSI_STATUS_CONDITION_MET:
2622 desc_scsi_status = "condition met";
2623 break;
2624 case MPI2_SCSI_STATUS_BUSY:
2625 desc_scsi_status = "busy";
2626 break;
2627 case MPI2_SCSI_STATUS_INTERMEDIATE:
2628 desc_scsi_status = "intermediate";
2629 break;
2630 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2631 desc_scsi_status = "intermediate condmet";
2632 break;
2633 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2634 desc_scsi_status = "reservation conflict";
2635 break;
2636 case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2637 desc_scsi_status = "command terminated";
2638 break;
2639 case MPI2_SCSI_STATUS_TASK_SET_FULL:
2640 desc_scsi_status = "task set full";
2641 break;
2642 case MPI2_SCSI_STATUS_ACA_ACTIVE:
2643 desc_scsi_status = "aca active";
2644 break;
2645 case MPI2_SCSI_STATUS_TASK_ABORTED:
2646 desc_scsi_status = "task aborted";
2647 break;
2648 default:
2649 desc_scsi_status = "unknown";
2650 break;
2651 }
2652
2653 desc_scsi_state[0] = '\0';
2654 if (!scsi_state)
2655 desc_scsi_state = " ";
2656 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2657 strcat(desc_scsi_state, "response info ");
2658 if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2659 strcat(desc_scsi_state, "state terminated ");
2660 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2661 strcat(desc_scsi_state, "no status ");
2662 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2663 strcat(desc_scsi_state, "autosense failed ");
2664 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2665 strcat(desc_scsi_state, "autosense valid ");
2666
2667 scsi_print_command(scmd);
2668 printk(MPT2SAS_WARN_FMT "\tdev handle(0x%04x), "
2669 "ioc_status(%s)(0x%04x), smid(%d)\n", ioc->name,
2670 le16_to_cpu(mpi_reply->DevHandle), desc_ioc_state,
2671 ioc_status, smid);
2672 printk(MPT2SAS_WARN_FMT "\trequest_len(%d), underflow(%d), "
2673 "resid(%d)\n", ioc->name, scsi_bufflen(scmd), scmd->underflow,
2674 scsi_get_resid(scmd));
2675 printk(MPT2SAS_WARN_FMT "\ttag(%d), transfer_count(%d), "
2676 "sc->result(0x%08x)\n", ioc->name, le16_to_cpu(mpi_reply->TaskTag),
2677 le32_to_cpu(mpi_reply->TransferCount), scmd->result);
2678 printk(MPT2SAS_WARN_FMT "\tscsi_status(%s)(0x%02x), "
2679 "scsi_state(%s)(0x%02x)\n", ioc->name, desc_scsi_status,
2680 scsi_status, desc_scsi_state, scsi_state);
2681
2682 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2683 struct sense_info data;
2684 _scsih_normalize_sense(scmd->sense_buffer, &data);
2685 printk(MPT2SAS_WARN_FMT "\t[sense_key,asc,ascq]: "
2686 "[0x%02x,0x%02x,0x%02x]\n", ioc->name, data.skey,
2687 data.asc, data.ascq);
2688 }
2689
2690 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2691 response_info = le32_to_cpu(mpi_reply->ResponseInfo);
2692 response_bytes = (u8 *)&response_info;
2693 _scsih_response_code(ioc, response_bytes[3]);
2694 }
2695}
2696#endif
2697
2698/**
2699 * _scsih_smart_predicted_fault - illuminate Fault LED
2700 * @ioc: per adapter object
2701 * @handle: device handle
2702 *
2703 * Return nothing.
2704 */
2705static void
2706_scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
2707{
2708 Mpi2SepReply_t mpi_reply;
2709 Mpi2SepRequest_t mpi_request;
2710 struct scsi_target *starget;
2711 struct MPT2SAS_TARGET *sas_target_priv_data;
2712 Mpi2EventNotificationReply_t *event_reply;
2713 Mpi2EventDataSasDeviceStatusChange_t *event_data;
2714 struct _sas_device *sas_device;
2715 ssize_t sz;
2716 unsigned long flags;
2717
2718 /* only handle non-raid devices */
2719 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2720 sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
2721 if (!sas_device) {
2722 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2723 return;
2724 }
2725 starget = sas_device->starget;
2726 sas_target_priv_data = starget->hostdata;
2727
2728 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
2729 ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))) {
2730 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2731 return;
2732 }
2733 starget_printk(KERN_WARNING, starget, "predicted fault\n");
2734 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2735
2736 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) {
2737 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
2738 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
2739 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
2740 mpi_request.SlotStatus =
2741 MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT;
2742 mpi_request.DevHandle = cpu_to_le16(handle);
2743 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
2744 if ((mpt2sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
2745 &mpi_request)) != 0) {
2746 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
2747 ioc->name, __FILE__, __LINE__, __func__);
2748 return;
2749 }
2750
2751 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
2752 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
2753 "enclosure_processor: ioc_status (0x%04x), "
2754 "loginfo(0x%08x)\n", ioc->name,
2755 le16_to_cpu(mpi_reply.IOCStatus),
2756 le32_to_cpu(mpi_reply.IOCLogInfo)));
2757 return;
2758 }
2759 }
2760
2761 /* insert into event log */
2762 sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
2763 sizeof(Mpi2EventDataSasDeviceStatusChange_t);
2764 event_reply = kzalloc(sz, GFP_KERNEL);
2765 if (!event_reply) {
2766 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
2767 ioc->name, __FILE__, __LINE__, __func__);
2768 return;
2769 }
2770
2771 event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
2772 event_reply->Event =
2773 cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
2774 event_reply->MsgLength = sz/4;
2775 event_reply->EventDataLength =
2776 cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4);
2777 event_data = (Mpi2EventDataSasDeviceStatusChange_t *)
2778 event_reply->EventData;
2779 event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA;
2780 event_data->ASC = 0x5D;
2781 event_data->DevHandle = cpu_to_le16(handle);
2782 event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
2783 mpt2sas_ctl_add_to_event_log(ioc, event_reply);
2784 kfree(event_reply);
2785}
2786
2787/**
2788 * scsih_io_done - scsi request callback
2789 * @ioc: per adapter object
2790 * @smid: system request message index
2791 * @VF_ID: virtual function id
2792 * @reply: reply message frame(lower 32bit addr)
2793 *
2794 * Callback handler when using scsih_qcmd.
2795 *
2796 * Return nothing.
2797 */
2798static void
2799scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
2800{
2801 Mpi2SCSIIORequest_t *mpi_request;
2802 Mpi2SCSIIOReply_t *mpi_reply;
2803 struct scsi_cmnd *scmd;
2804 u16 ioc_status;
2805 u32 xfer_cnt;
2806 u8 scsi_state;
2807 u8 scsi_status;
2808 u32 log_info;
2809 struct MPT2SAS_DEVICE *sas_device_priv_data;
2810 u32 response_code;
2811
2812 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
2813 scmd = _scsih_scsi_lookup_getclear(ioc, smid);
2814 if (scmd == NULL)
2815 return;
2816
2817 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
2818
2819 if (mpi_reply == NULL) {
2820 scmd->result = DID_OK << 16;
2821 goto out;
2822 }
2823
2824 sas_device_priv_data = scmd->device->hostdata;
2825 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
2826 sas_device_priv_data->sas_target->deleted) {
2827 scmd->result = DID_NO_CONNECT << 16;
2828 goto out;
2829 }
2830
2831 /* turning off TLR */
2832 if (!sas_device_priv_data->tlr_snoop_check) {
2833 sas_device_priv_data->tlr_snoop_check++;
2834 if (sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) {
2835 response_code = (le32_to_cpu(mpi_reply->ResponseInfo)
2836 >> 24);
2837 if (response_code ==
2838 MPI2_SCSITASKMGMT_RSP_INVALID_FRAME)
2839 sas_device_priv_data->flags &=
2840 ~MPT_DEVICE_TLR_ON;
2841 }
2842 }
2843
2844 xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
2845 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
2846 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
2847 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
2848 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
2849 else
2850 log_info = 0;
2851 ioc_status &= MPI2_IOCSTATUS_MASK;
2852 scsi_state = mpi_reply->SCSIState;
2853 scsi_status = mpi_reply->SCSIStatus;
2854
2855 if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
2856 (scsi_status == MPI2_SCSI_STATUS_BUSY ||
2857 scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT ||
2858 scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) {
2859 ioc_status = MPI2_IOCSTATUS_SUCCESS;
2860 }
2861
2862 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2863 struct sense_info data;
2864 const void *sense_data = mpt2sas_base_get_sense_buffer(ioc,
2865 smid);
2866 memcpy(scmd->sense_buffer, sense_data,
2867 le32_to_cpu(mpi_reply->SenseCount));
2868 _scsih_normalize_sense(scmd->sense_buffer, &data);
2869 /* failure prediction threshold exceeded */
2870 if (data.asc == 0x5D)
2871 _scsih_smart_predicted_fault(ioc,
2872 le16_to_cpu(mpi_reply->DevHandle));
2873 }
2874
2875 switch (ioc_status) {
2876 case MPI2_IOCSTATUS_BUSY:
2877 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
2878 scmd->result = SAM_STAT_BUSY;
2879 break;
2880
2881 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2882 scmd->result = DID_NO_CONNECT << 16;
2883 break;
2884
2885 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2886 if (sas_device_priv_data->block) {
2887 scmd->result = (DID_BUS_BUSY << 16);
2888 break;
2889 }
2890
2891 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2892 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2893 scmd->result = DID_RESET << 16;
2894 break;
2895
2896 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2897 if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt))
2898 scmd->result = DID_SOFT_ERROR << 16;
2899 else
2900 scmd->result = (DID_OK << 16) | scsi_status;
2901 break;
2902
2903 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2904 scmd->result = (DID_OK << 16) | scsi_status;
2905
2906 if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID))
2907 break;
2908
2909 if (xfer_cnt < scmd->underflow) {
2910 if (scsi_status == SAM_STAT_BUSY)
2911 scmd->result = SAM_STAT_BUSY;
2912 else
2913 scmd->result = DID_SOFT_ERROR << 16;
2914 } else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
2915 MPI2_SCSI_STATE_NO_SCSI_STATUS))
2916 scmd->result = DID_SOFT_ERROR << 16;
2917 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2918 scmd->result = DID_RESET << 16;
2919 else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
2920 mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
2921 mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
2922 scmd->result = (DRIVER_SENSE << 24) |
2923 SAM_STAT_CHECK_CONDITION;
2924 scmd->sense_buffer[0] = 0x70;
2925 scmd->sense_buffer[2] = ILLEGAL_REQUEST;
2926 scmd->sense_buffer[12] = 0x20;
2927 scmd->sense_buffer[13] = 0;
2928 }
2929 break;
2930
2931 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2932 scsi_set_resid(scmd, 0);
2933 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2934 case MPI2_IOCSTATUS_SUCCESS:
2935 scmd->result = (DID_OK << 16) | scsi_status;
2936 if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
2937 MPI2_SCSI_STATE_NO_SCSI_STATUS))
2938 scmd->result = DID_SOFT_ERROR << 16;
2939 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2940 scmd->result = DID_RESET << 16;
2941 break;
2942
2943 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2944 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2945 case MPI2_IOCSTATUS_INVALID_SGL:
2946 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2947 case MPI2_IOCSTATUS_INVALID_FIELD:
2948 case MPI2_IOCSTATUS_INVALID_STATE:
2949 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2950 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2951 default:
2952 scmd->result = DID_SOFT_ERROR << 16;
2953 break;
2954
2955 }
2956
2957#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
2958 if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY))
2959 _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid);
2960#endif
2961
2962 out:
2963 scsi_dma_unmap(scmd);
2964 scmd->scsi_done(scmd);
2965}
2966
2967/**
2968 * _scsih_link_change - process phy link changes
2969 * @ioc: per adapter object
2970 * @handle: phy handle
2971 * @attached_handle: valid for devices attached to link
2972 * @phy_number: phy number
2973 * @link_rate: new link rate
2974 * Context: user.
2975 *
2976 * Return nothing.
2977 */
2978static void
2979_scsih_link_change(struct MPT2SAS_ADAPTER *ioc, u16 handle, u16 attached_handle,
2980 u8 phy_number, u8 link_rate)
2981{
2982 mpt2sas_transport_update_phy_link_change(ioc, handle, attached_handle,
2983 phy_number, link_rate);
2984}
2985
2986/**
2987 * _scsih_sas_host_refresh - refreshing sas host object contents
2988 * @ioc: per adapter object
2989 * @update: update link information
2990 * Context: user
2991 *
2992 * During port enable, fw will send topology events for every device. Its
2993 * possible that the handles may change from the previous setting, so this
2994 * code keeping handles updating if changed.
2995 *
2996 * Return nothing.
2997 */
2998static void
2999_scsih_sas_host_refresh(struct MPT2SAS_ADAPTER *ioc, u8 update)
3000{
3001 u16 sz;
3002 u16 ioc_status;
3003 int i;
3004 Mpi2ConfigReply_t mpi_reply;
3005 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
3006
3007 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT
3008 "updating handles for sas_host(0x%016llx)\n",
3009 ioc->name, (unsigned long long)ioc->sas_hba.sas_address));
3010
3011 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
3012 * sizeof(Mpi2SasIOUnit0PhyData_t));
3013 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
3014 if (!sas_iounit_pg0) {
3015 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
3016 ioc->name, __FILE__, __LINE__, __func__);
3017 return;
3018 }
3019 if (!(mpt2sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
3020 sas_iounit_pg0, sz))) {
3021 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
3022 MPI2_IOCSTATUS_MASK;
3023 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
3024 goto out;
3025 for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
3026 ioc->sas_hba.phy[i].handle =
3027 le16_to_cpu(sas_iounit_pg0->PhyData[i].
3028 ControllerDevHandle);
3029 if (update)
3030 _scsih_link_change(ioc,
3031 ioc->sas_hba.phy[i].handle,
3032 le16_to_cpu(sas_iounit_pg0->PhyData[i].
3033 AttachedDevHandle), i,
3034 sas_iounit_pg0->PhyData[i].
3035 NegotiatedLinkRate >> 4);
3036 }
3037 }
3038
3039 out:
3040 kfree(sas_iounit_pg0);
3041}
3042
3043/**
3044 * _scsih_sas_host_add - create sas host object
3045 * @ioc: per adapter object
3046 *
3047 * Creating host side data object, stored in ioc->sas_hba
3048 *
3049 * Return nothing.
3050 */
3051static void
3052_scsih_sas_host_add(struct MPT2SAS_ADAPTER *ioc)
3053{
3054 int i;
3055 Mpi2ConfigReply_t mpi_reply;
3056 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
3057 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
3058 Mpi2SasPhyPage0_t phy_pg0;
3059 Mpi2SasDevicePage0_t sas_device_pg0;
3060 Mpi2SasEnclosurePage0_t enclosure_pg0;
3061 u16 ioc_status;
3062 u16 sz;
3063 u16 device_missing_delay;
3064
3065 mpt2sas_config_get_number_hba_phys(ioc, &ioc->sas_hba.num_phys);
3066 if (!ioc->sas_hba.num_phys) {
3067 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
3068 ioc->name, __FILE__, __LINE__, __func__);
3069 return;
3070 }
3071
3072 /* sas_iounit page 0 */
3073 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
3074 sizeof(Mpi2SasIOUnit0PhyData_t));
3075 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
3076 if (!sas_iounit_pg0) {
3077 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
3078 ioc->name, __FILE__, __LINE__, __func__);
3079 return;
3080 }
3081 if ((mpt2sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
3082 sas_iounit_pg0, sz))) {
3083 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
3084 ioc->name, __FILE__, __LINE__, __func__);
3085 goto out;
3086 }
3087 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
3088 MPI2_IOCSTATUS_MASK;
3089 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
3090 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
3091 ioc->name, __FILE__, __LINE__, __func__);
3092 goto out;
3093 }
3094
3095 /* sas_iounit page 1 */
3096 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
3097 sizeof(Mpi2SasIOUnit1PhyData_t));
3098 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
3099 if (!sas_iounit_pg1) {
3100 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
3101 ioc->name, __FILE__, __LINE__, __func__);
3102 goto out;
3103 }
3104 if ((mpt2sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
3105 sas_iounit_pg1, sz))) {
3106 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
3107 ioc->name, __FILE__, __LINE__, __func__);
3108 goto out;
3109 }
3110 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
3111 MPI2_IOCSTATUS_MASK;
3112 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
3113 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
3114 ioc->name, __FILE__, __LINE__, __func__);
3115 goto out;
3116 }
3117
3118 ioc->io_missing_delay =
3119 le16_to_cpu(sas_iounit_pg1->IODeviceMissingDelay);
3120 device_missing_delay =
3121 le16_to_cpu(sas_iounit_pg1->ReportDeviceMissingDelay);
3122 if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
3123 ioc->device_missing_delay = (device_missing_delay &
3124 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
3125 else
3126 ioc->device_missing_delay = device_missing_delay &
3127 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
3128
3129 ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
3130 ioc->sas_hba.phy = kcalloc(ioc->sas_hba.num_phys,
3131 sizeof(struct _sas_phy), GFP_KERNEL);
3132 if (!ioc->sas_hba.phy) {
3133 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
3134 ioc->name, __FILE__, __LINE__, __func__);
3135 goto out;
3136 }
3137 for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
3138 if ((mpt2sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
3139 i))) {
3140 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
3141 ioc->name, __FILE__, __LINE__, __func__);
3142 goto out;
3143 }
3144 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
3145 MPI2_IOCSTATUS_MASK;
3146 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
3147 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
3148 ioc->name, __FILE__, __LINE__, __func__);
3149 goto out;
3150 }
3151 ioc->sas_hba.phy[i].handle =
3152 le16_to_cpu(sas_iounit_pg0->PhyData[i].ControllerDevHandle);
3153 ioc->sas_hba.phy[i].phy_id = i;
3154 mpt2sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
3155 phy_pg0, ioc->sas_hba.parent_dev);
3156 }
3157 if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
3158 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.phy[0].handle))) {
3159 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
3160 ioc->name, __FILE__, __LINE__, __func__);
3161 goto out;
3162 }
3163 ioc->sas_hba.handle = le16_to_cpu(sas_device_pg0.DevHandle);
3164 ioc->sas_hba.enclosure_handle =
3165 le16_to_cpu(sas_device_pg0.EnclosureHandle);
3166 ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
3167 printk(MPT2SAS_INFO_FMT "host_add: handle(0x%04x), "
3168 "sas_addr(0x%016llx), phys(%d)\n", ioc->name, ioc->sas_hba.handle,
3169 (unsigned long long) ioc->sas_hba.sas_address,
3170 ioc->sas_hba.num_phys) ;
3171
3172 if (ioc->sas_hba.enclosure_handle) {
3173 if (!(mpt2sas_config_get_enclosure_pg0(ioc, &mpi_reply,
3174 &enclosure_pg0,
3175 MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
3176 ioc->sas_hba.enclosure_handle))) {
3177 ioc->sas_hba.enclosure_logical_id =
3178 le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
3179 }
3180 }
3181
3182 out:
3183 kfree(sas_iounit_pg1);
3184 kfree(sas_iounit_pg0);
3185}
3186
3187/**
3188 * _scsih_expander_add - creating expander object
3189 * @ioc: per adapter object
3190 * @handle: expander handle
3191 *
3192 * Creating expander object, stored in ioc->sas_expander_list.
3193 *
3194 * Return 0 for success, else error.
3195 */
3196static int
3197_scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle)
3198{
3199 struct _sas_node *sas_expander;
3200 Mpi2ConfigReply_t mpi_reply;
3201 Mpi2ExpanderPage0_t expander_pg0;
3202 Mpi2ExpanderPage1_t expander_pg1;
3203 Mpi2SasEnclosurePage0_t enclosure_pg0;
3204 u32 ioc_status;
3205 u16 parent_handle;
3206 __le64 sas_address;
3207 int i;
3208 unsigned long flags;
3209 struct _sas_port *mpt2sas_port;
3210 int rc = 0;
3211
3212 if (!handle)
3213 return -1;
3214
3215 if ((mpt2sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
3216 MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
3217 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
3218 ioc->name, __FILE__, __LINE__, __func__);
3219 return -1;
3220 }
3221
3222 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
3223 MPI2_IOCSTATUS_MASK;
3224 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
3225 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
3226 ioc->name, __FILE__, __LINE__, __func__);
3227 return -1;
3228 }
3229
3230 /* handle out of order topology events */
3231 parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
3232 if (parent_handle >= ioc->sas_hba.num_phys) {
3233 spin_lock_irqsave(&ioc->sas_node_lock, flags);
3234 sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc,
3235 parent_handle);
3236 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
3237 if (!sas_expander) {
3238 rc = _scsih_expander_add(ioc, parent_handle);
3239 if (rc != 0)
3240 return rc;
3241 }
3242 }
3243
3244 sas_address = le64_to_cpu(expander_pg0.SASAddress);
3245
3246 spin_lock_irqsave(&ioc->sas_node_lock, flags);
3247 sas_expander = mpt2sas_scsih_expander_find_by_sas_address(ioc,
3248 sas_address);
3249 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
3250
3251 if (sas_expander)
3252 return 0;
3253
3254 sas_expander = kzalloc(sizeof(struct _sas_node),
3255 GFP_KERNEL);
3256 if (!sas_expander) {
3257 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
3258 ioc->name, __FILE__, __LINE__, __func__);
3259 return -1;
3260 }
3261
3262 sas_expander->handle = handle;
3263 sas_expander->num_phys = expander_pg0.NumPhys;
3264 sas_expander->parent_handle = parent_handle;
3265 sas_expander->enclosure_handle =
3266 le16_to_cpu(expander_pg0.EnclosureHandle);
3267 sas_expander->sas_address = sas_address;
3268
3269 printk(MPT2SAS_INFO_FMT "expander_add: handle(0x%04x),"
3270 " parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", ioc->name,
3271 handle, sas_expander->parent_handle, (unsigned long long)
3272 sas_expander->sas_address, sas_expander->num_phys);
3273
3274 if (!sas_expander->num_phys)
3275 goto out_fail;
3276 sas_expander->phy = kcalloc(sas_expander->num_phys,
3277 sizeof(struct _sas_phy), GFP_KERNEL);
3278 if (!sas_expander->phy) {
3279 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
3280 ioc->name, __FILE__, __LINE__, __func__);
3281 rc = -1;
3282 goto out_fail;
3283 }
3284
3285 INIT_LIST_HEAD(&sas_expander->sas_port_list);
3286 mpt2sas_port = mpt2sas_transport_port_add(ioc, handle,
3287 sas_expander->parent_handle);
3288 if (!mpt2sas_port) {
3289 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
3290 ioc->name, __FILE__, __LINE__, __func__);
3291 rc = -1;
3292 goto out_fail;
3293 }
3294 sas_expander->parent_dev = &mpt2sas_port->rphy->dev;
3295
3296 for (i = 0 ; i < sas_expander->num_phys ; i++) {
3297 if ((mpt2sas_config_get_expander_pg1(ioc, &mpi_reply,
3298 &expander_pg1, i, handle))) {
3299 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
3300 ioc->name, __FILE__, __LINE__, __func__);
3301 continue;
3302 }
3303 sas_expander->phy[i].handle = handle;
3304 sas_expander->phy[i].phy_id = i;
3305 mpt2sas_transport_add_expander_phy(ioc, &sas_expander->phy[i],
3306 expander_pg1, sas_expander->parent_dev);
3307 }
3308
3309 if (sas_expander->enclosure_handle) {
3310 if (!(mpt2sas_config_get_enclosure_pg0(ioc, &mpi_reply,
3311 &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
3312 sas_expander->enclosure_handle))) {
3313 sas_expander->enclosure_logical_id =
3314 le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
3315 }
3316 }
3317
3318 _scsih_expander_node_add(ioc, sas_expander);
3319 return 0;
3320
3321 out_fail:
3322
3323 if (sas_expander)
3324 kfree(sas_expander->phy);
3325 kfree(sas_expander);
3326 return rc;
3327}
3328
3329/**
3330 * _scsih_expander_remove - removing expander object
3331 * @ioc: per adapter object
3332 * @handle: expander handle
3333 *
3334 * Return nothing.
3335 */
3336static void
3337_scsih_expander_remove(struct MPT2SAS_ADAPTER *ioc, u16 handle)
3338{
3339 struct _sas_node *sas_expander;
3340 unsigned long flags;
3341
3342 spin_lock_irqsave(&ioc->sas_node_lock, flags);
3343 sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc, handle);
3344 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
3345 _scsih_expander_node_remove(ioc, sas_expander);
3346}
3347
3348/**
3349 * _scsih_add_device - creating sas device object
3350 * @ioc: per adapter object
3351 * @handle: sas device handle
3352 * @phy_num: phy number end device attached to
3353 * @is_pd: is this hidden raid component
3354 *
3355 * Creating end device object, stored in ioc->sas_device_list.
3356 *
3357 * Returns 0 for success, non-zero for failure.
3358 */
3359static int
3360_scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
3361{
3362 Mpi2ConfigReply_t mpi_reply;
3363 Mpi2SasDevicePage0_t sas_device_pg0;
3364 Mpi2SasEnclosurePage0_t enclosure_pg0;
3365 struct _sas_device *sas_device;
3366 u32 ioc_status;
3367 __le64 sas_address;
3368 u32 device_info;
3369 unsigned long flags;
3370
3371 if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
3372 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
3373 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
3374 ioc->name, __FILE__, __LINE__, __func__);
3375 return -1;
3376 }
3377
3378 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
3379 MPI2_IOCSTATUS_MASK;
3380 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
3381 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
3382 ioc->name, __FILE__, __LINE__, __func__);
3383 return -1;
3384 }
3385
3386 /* check if device is present */
3387 if (!(le16_to_cpu(sas_device_pg0.Flags) &
3388 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
3389 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
3390 ioc->name, __FILE__, __LINE__, __func__);
3391 printk(MPT2SAS_ERR_FMT "Flags = 0x%04x\n",
3392 ioc->name, le16_to_cpu(sas_device_pg0.Flags));
3393 return -1;
3394 }
3395
3396 /* check if there were any issus with discovery */
3397 if (sas_device_pg0.AccessStatus ==
3398 MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED) {
3399 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
3400 ioc->name, __FILE__, __LINE__, __func__);
3401 printk(MPT2SAS_ERR_FMT "AccessStatus = 0x%02x\n",
3402 ioc->name, sas_device_pg0.AccessStatus);
3403 return -1;
3404 }
3405
3406 /* check if this is end device */
3407 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
3408 if (!(_scsih_is_end_device(device_info))) {
3409 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
3410 ioc->name, __FILE__, __LINE__, __func__);
3411 return -1;
3412 }
3413
3414 sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
3415
3416 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3417 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
3418 sas_address);
3419 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3420
3421 if (sas_device) {
3422 _scsih_ublock_io_device(ioc, handle);
3423 return 0;
3424 }
3425
3426 sas_device = kzalloc(sizeof(struct _sas_device),
3427 GFP_KERNEL);
3428 if (!sas_device) {
3429 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
3430 ioc->name, __FILE__, __LINE__, __func__);
3431 return -1;
3432 }
3433
3434 sas_device->handle = handle;
3435 sas_device->parent_handle =
3436 le16_to_cpu(sas_device_pg0.ParentDevHandle);
3437 sas_device->enclosure_handle =
3438 le16_to_cpu(sas_device_pg0.EnclosureHandle);
3439 sas_device->slot =
3440 le16_to_cpu(sas_device_pg0.Slot);
3441 sas_device->device_info = device_info;
3442 sas_device->sas_address = sas_address;
3443 sas_device->hidden_raid_component = is_pd;
3444
3445 /* get enclosure_logical_id */
3446 if (!(mpt2sas_config_get_enclosure_pg0(ioc, &mpi_reply, &enclosure_pg0,
3447 MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
3448 sas_device->enclosure_handle))) {
3449 sas_device->enclosure_logical_id =
3450 le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
3451 }
3452
3453 /* get device name */
3454 sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
3455
3456 if (ioc->wait_for_port_enable_to_complete)
3457 _scsih_sas_device_init_add(ioc, sas_device);
3458 else
3459 _scsih_sas_device_add(ioc, sas_device);
3460
3461 return 0;
3462}
3463
3464/**
3465 * _scsih_remove_device - removing sas device object
3466 * @ioc: per adapter object
3467 * @handle: sas device handle
3468 *
3469 * Return nothing.
3470 */
3471static void
3472_scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
3473{
3474 struct MPT2SAS_TARGET *sas_target_priv_data;
3475 struct _sas_device *sas_device;
3476 unsigned long flags;
3477 Mpi2SasIoUnitControlReply_t mpi_reply;
3478 Mpi2SasIoUnitControlRequest_t mpi_request;
3479 u16 device_handle;
3480
3481 /* lookup sas_device */
3482 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3483 sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
3484 if (!sas_device) {
3485 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3486 return;
3487 }
3488
3489 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter: handle"
3490 "(0x%04x)\n", ioc->name, __func__, handle));
3491
3492 if (sas_device->starget && sas_device->starget->hostdata) {
3493 sas_target_priv_data = sas_device->starget->hostdata;
3494 sas_target_priv_data->deleted = 1;
3495 }
3496 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3497
3498 if (ioc->remove_host)
3499 goto out;
3500
3501 /* Target Reset to flush out all the outstanding IO */
3502 device_handle = (sas_device->hidden_raid_component) ?
3503 sas_device->volume_handle : handle;
3504 if (device_handle) {
3505 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "issue target reset: "
3506 "handle(0x%04x)\n", ioc->name, device_handle));
3507 mutex_lock(&ioc->tm_cmds.mutex);
3508 mpt2sas_scsih_issue_tm(ioc, device_handle, 0,
3509 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 10);
3510 ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
3511 mutex_unlock(&ioc->tm_cmds.mutex);
3512 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "issue target reset "
3513 "done: handle(0x%04x)\n", ioc->name, device_handle));
3514 }
3515
3516 /* SAS_IO_UNIT_CNTR - send REMOVE_DEVICE */
3517 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "sas_iounit: handle"
3518 "(0x%04x)\n", ioc->name, handle));
3519 memset(&mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
3520 mpi_request.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
3521 mpi_request.Operation = MPI2_SAS_OP_REMOVE_DEVICE;
3522 mpi_request.DevHandle = handle;
3523 mpi_request.VF_ID = 0;
3524 if ((mpt2sas_base_sas_iounit_control(ioc, &mpi_reply,
3525 &mpi_request)) != 0) {
3526 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
3527 ioc->name, __FILE__, __LINE__, __func__);
3528 }
3529
3530 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "sas_iounit: ioc_status"
3531 "(0x%04x), loginfo(0x%08x)\n", ioc->name,
3532 le16_to_cpu(mpi_reply.IOCStatus),
3533 le32_to_cpu(mpi_reply.IOCLogInfo)));
3534
3535 out:
3536 mpt2sas_transport_port_remove(ioc, sas_device->sas_address,
3537 sas_device->parent_handle);
3538
3539 printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), sas_addr"
3540 "(0x%016llx)\n", ioc->name, sas_device->handle,
3541 (unsigned long long) sas_device->sas_address);
3542 _scsih_sas_device_remove(ioc, sas_device);
3543
3544 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit: handle"
3545 "(0x%04x)\n", ioc->name, __func__, handle));
3546}
3547
3548#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
3549/**
3550 * _scsih_sas_topology_change_event_debug - debug for topology event
3551 * @ioc: per adapter object
3552 * @event_data: event data payload
3553 * Context: user.
3554 */
3555static void
3556_scsih_sas_topology_change_event_debug(struct MPT2SAS_ADAPTER *ioc,
3557 Mpi2EventDataSasTopologyChangeList_t *event_data)
3558{
3559 int i;
3560 u16 handle;
3561 u16 reason_code;
3562 u8 phy_number;
3563 char *status_str = NULL;
3564 char link_rate[25];
3565
3566 switch (event_data->ExpStatus) {
3567 case MPI2_EVENT_SAS_TOPO_ES_ADDED:
3568 status_str = "add";
3569 break;
3570 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
3571 status_str = "remove";
3572 break;
3573 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
3574 status_str = "responding";
3575 break;
3576 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
3577 status_str = "remove delay";
3578 break;
3579 default:
3580 status_str = "unknown status";
3581 break;
3582 }
3583 printk(MPT2SAS_DEBUG_FMT "sas topology change: (%s)\n",
3584 ioc->name, status_str);
3585 printk(KERN_DEBUG "\thandle(0x%04x), enclosure_handle(0x%04x) "
3586 "start_phy(%02d), count(%d)\n",
3587 le16_to_cpu(event_data->ExpanderDevHandle),
3588 le16_to_cpu(event_data->EnclosureHandle),
3589 event_data->StartPhyNum, event_data->NumEntries);
3590 for (i = 0; i < event_data->NumEntries; i++) {
3591 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
3592 if (!handle)
3593 continue;
3594 phy_number = event_data->StartPhyNum + i;
3595 reason_code = event_data->PHY[i].PhyStatus &
3596 MPI2_EVENT_SAS_TOPO_RC_MASK;
3597 switch (reason_code) {
3598 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
3599 snprintf(link_rate, 25, ": add, link(0x%02x)",
3600 (event_data->PHY[i].LinkRate >> 4));
3601 status_str = link_rate;
3602 break;
3603 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
3604 status_str = ": remove";
3605 break;
3606 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
3607 status_str = ": remove_delay";
3608 break;
3609 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
3610 snprintf(link_rate, 25, ": link(0x%02x)",
3611 (event_data->PHY[i].LinkRate >> 4));
3612 status_str = link_rate;
3613 break;
3614 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
3615 status_str = ": responding";
3616 break;
3617 default:
3618 status_str = ": unknown";
3619 break;
3620 }
3621 printk(KERN_DEBUG "\tphy(%02d), attached_handle(0x%04x)%s\n",
3622 phy_number, handle, status_str);
3623 }
3624}
3625#endif
3626
3627/**
3628 * _scsih_sas_topology_change_event - handle topology changes
3629 * @ioc: per adapter object
3630 * @VF_ID:
3631 * @event_data: event data payload
3632 * fw_event:
3633 * Context: user.
3634 *
3635 */
3636static void
3637_scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
3638 Mpi2EventDataSasTopologyChangeList_t *event_data,
3639 struct fw_event_work *fw_event)
3640{
3641 int i;
3642 u16 parent_handle, handle;
3643 u16 reason_code;
3644 u8 phy_number;
3645 struct _sas_node *sas_expander;
3646 unsigned long flags;
3647 u8 link_rate_;
3648
3649#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
3650 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
3651 _scsih_sas_topology_change_event_debug(ioc, event_data);
3652#endif
3653
3654 if (!ioc->sas_hba.num_phys)
3655 _scsih_sas_host_add(ioc);
3656 else
3657 _scsih_sas_host_refresh(ioc, 0);
3658
3659 if (fw_event->ignore) {
3660 dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT "ignoring expander "
3661 "event\n", ioc->name));
3662 return;
3663 }
3664
3665 parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
3666
3667 /* handle expander add */
3668 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
3669 if (_scsih_expander_add(ioc, parent_handle) != 0)
3670 return;
3671
3672 /* handle siblings events */
3673 for (i = 0; i < event_data->NumEntries; i++) {
3674 if (fw_event->ignore) {
3675 dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT "ignoring "
3676 "expander event\n", ioc->name));
3677 return;
3678 }
3679 if (event_data->PHY[i].PhyStatus &
3680 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT)
3681 continue;
3682 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
3683 if (!handle)
3684 continue;
3685 phy_number = event_data->StartPhyNum + i;
3686 reason_code = event_data->PHY[i].PhyStatus &
3687 MPI2_EVENT_SAS_TOPO_RC_MASK;
3688 link_rate_ = event_data->PHY[i].LinkRate >> 4;
3689 switch (reason_code) {
3690 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
3691 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
3692 if (!parent_handle) {
3693 if (phy_number < ioc->sas_hba.num_phys)
3694 _scsih_link_change(ioc,
3695 ioc->sas_hba.phy[phy_number].handle,
3696 handle, phy_number, link_rate_);
3697 } else {
3698 spin_lock_irqsave(&ioc->sas_node_lock, flags);
3699 sas_expander =
3700 mpt2sas_scsih_expander_find_by_handle(ioc,
3701 parent_handle);
3702 spin_unlock_irqrestore(&ioc->sas_node_lock,
3703 flags);
3704 if (sas_expander) {
3705 if (phy_number < sas_expander->num_phys)
3706 _scsih_link_change(ioc,
3707 sas_expander->
3708 phy[phy_number].handle,
3709 handle, phy_number,
3710 link_rate_);
3711 }
3712 }
3713 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED) {
3714 if (link_rate_ >= MPI2_SAS_NEG_LINK_RATE_1_5)
3715 _scsih_ublock_io_device(ioc, handle);
3716 }
3717 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED) {
3718 if (link_rate_ < MPI2_SAS_NEG_LINK_RATE_1_5)
3719 break;
3720 _scsih_add_device(ioc, handle, phy_number, 0);
3721 }
3722 break;
3723 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
3724 _scsih_remove_device(ioc, handle);
3725 break;
3726 }
3727 }
3728
3729 /* handle expander removal */
3730 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
3731 _scsih_expander_remove(ioc, parent_handle);
3732
3733}
3734
3735#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
3736/**
3737 * _scsih_sas_device_status_change_event_debug - debug for device event
3738 * @event_data: event data payload
3739 * Context: user.
3740 *
3741 * Return nothing.
3742 */
3743static void
3744_scsih_sas_device_status_change_event_debug(struct MPT2SAS_ADAPTER *ioc,
3745 Mpi2EventDataSasDeviceStatusChange_t *event_data)
3746{
3747 char *reason_str = NULL;
3748
3749 switch (event_data->ReasonCode) {
3750 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
3751 reason_str = "smart data";
3752 break;
3753 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
3754 reason_str = "unsupported device discovered";
3755 break;
3756 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
3757 reason_str = "internal device reset";
3758 break;
3759 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
3760 reason_str = "internal task abort";
3761 break;
3762 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
3763 reason_str = "internal task abort set";
3764 break;
3765 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
3766 reason_str = "internal clear task set";
3767 break;
3768 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
3769 reason_str = "internal query task";
3770 break;
3771 case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
3772 reason_str = "sata init failure";
3773 break;
3774 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
3775 reason_str = "internal device reset complete";
3776 break;
3777 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
3778 reason_str = "internal task abort complete";
3779 break;
3780 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
3781 reason_str = "internal async notification";
3782 break;
3783 default:
3784 reason_str = "unknown reason";
3785 break;
3786 }
3787 printk(MPT2SAS_DEBUG_FMT "device status change: (%s)\n"
3788 "\thandle(0x%04x), sas address(0x%016llx)", ioc->name,
3789 reason_str, le16_to_cpu(event_data->DevHandle),
3790 (unsigned long long)le64_to_cpu(event_data->SASAddress));
3791 if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
3792 printk(MPT2SAS_DEBUG_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name,
3793 event_data->ASC, event_data->ASCQ);
3794 printk(KERN_INFO "\n");
3795}
3796#endif
3797
3798/**
3799 * _scsih_sas_device_status_change_event - handle device status change
3800 * @ioc: per adapter object
3801 * @VF_ID:
3802 * @event_data: event data payload
3803 * Context: user.
3804 *
3805 * Return nothing.
3806 */
3807static void
3808_scsih_sas_device_status_change_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
3809 Mpi2EventDataSasDeviceStatusChange_t *event_data)
3810{
3811#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
3812 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
3813 _scsih_sas_device_status_change_event_debug(ioc, event_data);
3814#endif
3815}
3816
3817#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
3818/**
3819 * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure event
3820 * @ioc: per adapter object
3821 * @event_data: event data payload
3822 * Context: user.
3823 *
3824 * Return nothing.
3825 */
3826static void
3827_scsih_sas_enclosure_dev_status_change_event_debug(struct MPT2SAS_ADAPTER *ioc,
3828 Mpi2EventDataSasEnclDevStatusChange_t *event_data)
3829{
3830 char *reason_str = NULL;
3831
3832 switch (event_data->ReasonCode) {
3833 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
3834 reason_str = "enclosure add";
3835 break;
3836 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
3837 reason_str = "enclosure remove";
3838 break;
3839 default:
3840 reason_str = "unknown reason";
3841 break;
3842 }
3843
3844 printk(MPT2SAS_DEBUG_FMT "enclosure status change: (%s)\n"
3845 "\thandle(0x%04x), enclosure logical id(0x%016llx)"
3846 " number slots(%d)\n", ioc->name, reason_str,
3847 le16_to_cpu(event_data->EnclosureHandle),
3848 (unsigned long long)le64_to_cpu(event_data->EnclosureLogicalID),
3849 le16_to_cpu(event_data->StartSlot));
3850}
3851#endif
3852
3853/**
3854 * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events
3855 * @ioc: per adapter object
3856 * @VF_ID:
3857 * @event_data: event data payload
3858 * Context: user.
3859 *
3860 * Return nothing.
3861 */
3862static void
3863_scsih_sas_enclosure_dev_status_change_event(struct MPT2SAS_ADAPTER *ioc,
3864 u8 VF_ID, Mpi2EventDataSasEnclDevStatusChange_t *event_data)
3865{
3866#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
3867 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
3868 _scsih_sas_enclosure_dev_status_change_event_debug(ioc,
3869 event_data);
3870#endif
3871}
3872
3873/**
3874 * _scsih_sas_broadcast_primative_event - handle broadcast events
3875 * @ioc: per adapter object
3876 * @event_data: event data payload
3877 * Context: user.
3878 *
3879 * Return nothing.
3880 */
3881static void
3882_scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
3883 Mpi2EventDataSasBroadcastPrimitive_t *event_data)
3884{
3885 struct scsi_cmnd *scmd;
3886 u16 smid, handle;
3887 u32 lun;
3888 struct MPT2SAS_DEVICE *sas_device_priv_data;
3889 u32 termination_count;
3890 u32 query_count;
3891 Mpi2SCSITaskManagementReply_t *mpi_reply;
3892
3893 dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT "broadcast primative: "
3894 "phy number(%d), width(%d)\n", ioc->name, event_data->PhyNum,
3895 event_data->PortWidth));
3896
3897 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: enter\n", ioc->name,
3898 __func__));
3899
3900 mutex_lock(&ioc->tm_cmds.mutex);
3901 termination_count = 0;
3902 query_count = 0;
3903 mpi_reply = ioc->tm_cmds.reply;
3904 for (smid = 1; smid <= ioc->request_depth; smid++) {
3905 scmd = _scsih_scsi_lookup_get(ioc, smid);
3906 if (!scmd)
3907 continue;
3908 sas_device_priv_data = scmd->device->hostdata;
3909 if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
3910 continue;
3911 /* skip hidden raid components */
3912 if (sas_device_priv_data->sas_target->flags &
3913 MPT_TARGET_FLAGS_RAID_COMPONENT)
3914 continue;
3915 /* skip volumes */
3916 if (sas_device_priv_data->sas_target->flags &
3917 MPT_TARGET_FLAGS_VOLUME)
3918 continue;
3919
3920 handle = sas_device_priv_data->sas_target->handle;
3921 lun = sas_device_priv_data->lun;
3922 query_count++;
3923
3924 mpt2sas_scsih_issue_tm(ioc, handle, lun,
3925 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30);
3926 termination_count += le32_to_cpu(mpi_reply->TerminationCount);
3927
3928 if ((mpi_reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) &&
3929 (mpi_reply->ResponseCode ==
3930 MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
3931 mpi_reply->ResponseCode ==
3932 MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC))
3933 continue;
3934
3935 mpt2sas_scsih_issue_tm(ioc, handle, lun,
3936 MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, smid, 30);
3937 termination_count += le32_to_cpu(mpi_reply->TerminationCount);
3938 }
3939 ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
3940 ioc->broadcast_aen_busy = 0;
3941 mutex_unlock(&ioc->tm_cmds.mutex);
3942
3943 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT
3944 "%s - exit, query_count = %d termination_count = %d\n",
3945 ioc->name, __func__, query_count, termination_count));
3946}
3947
3948/**
3949 * _scsih_sas_discovery_event - handle discovery events
3950 * @ioc: per adapter object
3951 * @event_data: event data payload
3952 * Context: user.
3953 *
3954 * Return nothing.
3955 */
3956static void
3957_scsih_sas_discovery_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
3958 Mpi2EventDataSasDiscovery_t *event_data)
3959{
3960#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
3961 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
3962 printk(MPT2SAS_DEBUG_FMT "discovery event: (%s)", ioc->name,
3963 (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
3964 "start" : "stop");
3965 if (event_data->DiscoveryStatus)
3966 printk(MPT2SAS_DEBUG_FMT ", discovery_status(0x%08x)",
3967 ioc->name, le32_to_cpu(event_data->DiscoveryStatus));
3968 printk("\n");
3969 }
3970#endif
3971
3972 if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
3973 !ioc->sas_hba.num_phys)
3974 _scsih_sas_host_add(ioc);
3975}
3976
3977/**
3978 * _scsih_reprobe_lun - reprobing lun
3979 * @sdev: scsi device struct
3980 * @no_uld_attach: sdev->no_uld_attach flag setting
3981 *
3982 **/
3983static void
3984_scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
3985{
3986 int rc;
3987
3988 sdev->no_uld_attach = no_uld_attach ? 1 : 0;
3989 sdev_printk(KERN_INFO, sdev, "%s raid component\n",
3990 sdev->no_uld_attach ? "hidding" : "exposing");
3991 rc = scsi_device_reprobe(sdev);
3992}
3993
3994/**
3995 * _scsih_reprobe_target - reprobing target
3996 * @starget: scsi target struct
3997 * @no_uld_attach: sdev->no_uld_attach flag setting
3998 *
3999 * Note: no_uld_attach flag determines whether the disk device is attached
4000 * to block layer. A value of `1` means to not attach.
4001 **/
4002static void
4003_scsih_reprobe_target(struct scsi_target *starget, int no_uld_attach)
4004{
4005 struct MPT2SAS_TARGET *sas_target_priv_data = starget->hostdata;
4006
4007 if (no_uld_attach)
4008 sas_target_priv_data->flags |= MPT_TARGET_FLAGS_RAID_COMPONENT;
4009 else
4010 sas_target_priv_data->flags &= ~MPT_TARGET_FLAGS_RAID_COMPONENT;
4011
4012 starget_for_each_device(starget, no_uld_attach ? (void *)1 : NULL,
4013 _scsih_reprobe_lun);
4014}
4015/**
4016 * _scsih_sas_volume_add - add new volume
4017 * @ioc: per adapter object
4018 * @element: IR config element data
4019 * Context: user.
4020 *
4021 * Return nothing.
4022 */
4023static void
4024_scsih_sas_volume_add(struct MPT2SAS_ADAPTER *ioc,
4025 Mpi2EventIrConfigElement_t *element)
4026{
4027 struct _raid_device *raid_device;
4028 unsigned long flags;
4029 u64 wwid;
4030 u16 handle = le16_to_cpu(element->VolDevHandle);
4031 int rc;
4032
4033#if 0 /* RAID_HACKS */
4034 if (le32_to_cpu(event_data->Flags) &
4035 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4036 return;
4037#endif
4038
4039 mpt2sas_config_get_volume_wwid(ioc, handle, &wwid);
4040 if (!wwid) {
4041 printk(MPT2SAS_ERR_FMT
4042 "failure at %s:%d/%s()!\n", ioc->name,
4043 __FILE__, __LINE__, __func__);
4044 return;
4045 }
4046
4047 spin_lock_irqsave(&ioc->raid_device_lock, flags);
4048 raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid);
4049 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
4050
4051 if (raid_device)
4052 return;
4053
4054 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
4055 if (!raid_device) {
4056 printk(MPT2SAS_ERR_FMT
4057 "failure at %s:%d/%s()!\n", ioc->name,
4058 __FILE__, __LINE__, __func__);
4059 return;
4060 }
4061
4062 raid_device->id = ioc->sas_id++;
4063 raid_device->channel = RAID_CHANNEL;
4064 raid_device->handle = handle;
4065 raid_device->wwid = wwid;
4066 _scsih_raid_device_add(ioc, raid_device);
4067 if (!ioc->wait_for_port_enable_to_complete) {
4068 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
4069 raid_device->id, 0);
4070 if (rc)
4071 _scsih_raid_device_remove(ioc, raid_device);
4072 } else
4073 _scsih_determine_boot_device(ioc, raid_device, 1);
4074}
4075
4076/**
4077 * _scsih_sas_volume_delete - delete volume
4078 * @ioc: per adapter object
4079 * @element: IR config element data
4080 * Context: user.
4081 *
4082 * Return nothing.
4083 */
4084static void
4085_scsih_sas_volume_delete(struct MPT2SAS_ADAPTER *ioc,
4086 Mpi2EventIrConfigElement_t *element)
4087{
4088 struct _raid_device *raid_device;
4089 u16 handle = le16_to_cpu(element->VolDevHandle);
4090 unsigned long flags;
4091 struct MPT2SAS_TARGET *sas_target_priv_data;
4092
4093#if 0 /* RAID_HACKS */
4094 if (le32_to_cpu(event_data->Flags) &
4095 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4096 return;
4097#endif
4098
4099 spin_lock_irqsave(&ioc->raid_device_lock, flags);
4100 raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
4101 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
4102 if (!raid_device)
4103 return;
4104 if (raid_device->starget) {
4105 sas_target_priv_data = raid_device->starget->hostdata;
4106 sas_target_priv_data->deleted = 1;
4107 scsi_remove_target(&raid_device->starget->dev);
4108 }
4109 _scsih_raid_device_remove(ioc, raid_device);
4110}
4111
4112/**
4113 * _scsih_sas_pd_expose - expose pd component to /dev/sdX
4114 * @ioc: per adapter object
4115 * @element: IR config element data
4116 * Context: user.
4117 *
4118 * Return nothing.
4119 */
4120static void
4121_scsih_sas_pd_expose(struct MPT2SAS_ADAPTER *ioc,
4122 Mpi2EventIrConfigElement_t *element)
4123{
4124 struct _sas_device *sas_device;
4125 unsigned long flags;
4126 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
4127
4128 spin_lock_irqsave(&ioc->sas_device_lock, flags);
4129 sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
4130 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4131 if (!sas_device)
4132 return;
4133
4134 /* exposing raid component */
4135 sas_device->volume_handle = 0;
4136 sas_device->volume_wwid = 0;
4137 sas_device->hidden_raid_component = 0;
4138 _scsih_reprobe_target(sas_device->starget, 0);
4139}
4140
4141/**
4142 * _scsih_sas_pd_hide - hide pd component from /dev/sdX
4143 * @ioc: per adapter object
4144 * @element: IR config element data
4145 * Context: user.
4146 *
4147 * Return nothing.
4148 */
4149static void
4150_scsih_sas_pd_hide(struct MPT2SAS_ADAPTER *ioc,
4151 Mpi2EventIrConfigElement_t *element)
4152{
4153 struct _sas_device *sas_device;
4154 unsigned long flags;
4155 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
4156
4157 spin_lock_irqsave(&ioc->sas_device_lock, flags);
4158 sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
4159 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4160 if (!sas_device)
4161 return;
4162
4163 /* hiding raid component */
4164 mpt2sas_config_get_volume_handle(ioc, handle,
4165 &sas_device->volume_handle);
4166 mpt2sas_config_get_volume_wwid(ioc, sas_device->volume_handle,
4167 &sas_device->volume_wwid);
4168 sas_device->hidden_raid_component = 1;
4169 _scsih_reprobe_target(sas_device->starget, 1);
4170}
4171
4172/**
4173 * _scsih_sas_pd_delete - delete pd component
4174 * @ioc: per adapter object
4175 * @element: IR config element data
4176 * Context: user.
4177 *
4178 * Return nothing.
4179 */
4180static void
4181_scsih_sas_pd_delete(struct MPT2SAS_ADAPTER *ioc,
4182 Mpi2EventIrConfigElement_t *element)
4183{
4184 struct _sas_device *sas_device;
4185 unsigned long flags;
4186 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
4187
4188 spin_lock_irqsave(&ioc->sas_device_lock, flags);
4189 sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
4190 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4191 if (!sas_device)
4192 return;
4193 _scsih_remove_device(ioc, handle);
4194}
4195
4196/**
4197 * _scsih_sas_pd_add - remove pd component
4198 * @ioc: per adapter object
4199 * @element: IR config element data
4200 * Context: user.
4201 *
4202 * Return nothing.
4203 */
4204static void
4205_scsih_sas_pd_add(struct MPT2SAS_ADAPTER *ioc,
4206 Mpi2EventIrConfigElement_t *element)
4207{
4208 struct _sas_device *sas_device;
4209 unsigned long flags;
4210 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
4211
4212 spin_lock_irqsave(&ioc->sas_device_lock, flags);
4213 sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
4214 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4215 if (sas_device)
4216 sas_device->hidden_raid_component = 1;
4217 else
4218 _scsih_add_device(ioc, handle, 0, 1);
4219}
4220
4221#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
4222/**
4223 * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events
4224 * @ioc: per adapter object
4225 * @event_data: event data payload
4226 * Context: user.
4227 *
4228 * Return nothing.
4229 */
4230static void
4231_scsih_sas_ir_config_change_event_debug(struct MPT2SAS_ADAPTER *ioc,
4232 Mpi2EventDataIrConfigChangeList_t *event_data)
4233{
4234 Mpi2EventIrConfigElement_t *element;
4235 u8 element_type;
4236 int i;
4237 char *reason_str = NULL, *element_str = NULL;
4238
4239 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4240
4241 printk(MPT2SAS_DEBUG_FMT "raid config change: (%s), elements(%d)\n",
4242 ioc->name, (le32_to_cpu(event_data->Flags) &
4243 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ?
4244 "foreign" : "native", event_data->NumElements);
4245 for (i = 0; i < event_data->NumElements; i++, element++) {
4246 switch (element->ReasonCode) {
4247 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
4248 reason_str = "add";
4249 break;
4250 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
4251 reason_str = "remove";
4252 break;
4253 case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE:
4254 reason_str = "no change";
4255 break;
4256 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
4257 reason_str = "hide";
4258 break;
4259 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
4260 reason_str = "unhide";
4261 break;
4262 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
4263 reason_str = "volume_created";
4264 break;
4265 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
4266 reason_str = "volume_deleted";
4267 break;
4268 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
4269 reason_str = "pd_created";
4270 break;
4271 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
4272 reason_str = "pd_deleted";
4273 break;
4274 default:
4275 reason_str = "unknown reason";
4276 break;
4277 }
4278 element_type = le16_to_cpu(element->ElementFlags) &
4279 MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
4280 switch (element_type) {
4281 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT:
4282 element_str = "volume";
4283 break;
4284 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT:
4285 element_str = "phys disk";
4286 break;
4287 case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT:
4288 element_str = "hot spare";
4289 break;
4290 default:
4291 element_str = "unknown element";
4292 break;
4293 }
4294 printk(KERN_DEBUG "\t(%s:%s), vol handle(0x%04x), "
4295 "pd handle(0x%04x), pd num(0x%02x)\n", element_str,
4296 reason_str, le16_to_cpu(element->VolDevHandle),
4297 le16_to_cpu(element->PhysDiskDevHandle),
4298 element->PhysDiskNum);
4299 }
4300}
4301#endif
4302
4303/**
4304 * _scsih_sas_ir_config_change_event - handle ir configuration change events
4305 * @ioc: per adapter object
4306 * @VF_ID:
4307 * @event_data: event data payload
4308 * Context: user.
4309 *
4310 * Return nothing.
4311 */
4312static void
4313_scsih_sas_ir_config_change_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
4314 Mpi2EventDataIrConfigChangeList_t *event_data)
4315{
4316 Mpi2EventIrConfigElement_t *element;
4317 int i;
4318
4319#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
4320 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
4321 _scsih_sas_ir_config_change_event_debug(ioc, event_data);
4322
4323#endif
4324
4325 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4326 for (i = 0; i < event_data->NumElements; i++, element++) {
4327
4328 switch (element->ReasonCode) {
4329 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
4330 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
4331 _scsih_sas_volume_add(ioc, element);
4332 break;
4333 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
4334 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
4335 _scsih_sas_volume_delete(ioc, element);
4336 break;
4337 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
4338 _scsih_sas_pd_hide(ioc, element);
4339 break;
4340 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
4341 _scsih_sas_pd_expose(ioc, element);
4342 break;
4343 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
4344 _scsih_sas_pd_add(ioc, element);
4345 break;
4346 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
4347 _scsih_sas_pd_delete(ioc, element);
4348 break;
4349 }
4350 }
4351}
4352
4353/**
4354 * _scsih_sas_ir_volume_event - IR volume event
4355 * @ioc: per adapter object
4356 * @event_data: event data payload
4357 * Context: user.
4358 *
4359 * Return nothing.
4360 */
4361static void
4362_scsih_sas_ir_volume_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
4363 Mpi2EventDataIrVolume_t *event_data)
4364{
4365 u64 wwid;
4366 unsigned long flags;
4367 struct _raid_device *raid_device;
4368 u16 handle;
4369 u32 state;
4370 int rc;
4371 struct MPT2SAS_TARGET *sas_target_priv_data;
4372
4373 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
4374 return;
4375
4376 handle = le16_to_cpu(event_data->VolDevHandle);
4377 state = le32_to_cpu(event_data->NewValue);
4378 dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: handle(0x%04x), "
4379 "old(0x%08x), new(0x%08x)\n", ioc->name, __func__, handle,
4380 le32_to_cpu(event_data->PreviousValue), state));
4381
4382 spin_lock_irqsave(&ioc->raid_device_lock, flags);
4383 raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
4384 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
4385
4386 switch (state) {
4387 case MPI2_RAID_VOL_STATE_MISSING:
4388 case MPI2_RAID_VOL_STATE_FAILED:
4389 if (!raid_device)
4390 break;
4391 if (raid_device->starget) {
4392 sas_target_priv_data = raid_device->starget->hostdata;
4393 sas_target_priv_data->deleted = 1;
4394 scsi_remove_target(&raid_device->starget->dev);
4395 }
4396 _scsih_raid_device_remove(ioc, raid_device);
4397 break;
4398
4399 case MPI2_RAID_VOL_STATE_ONLINE:
4400 case MPI2_RAID_VOL_STATE_DEGRADED:
4401 case MPI2_RAID_VOL_STATE_OPTIMAL:
4402 if (raid_device)
4403 break;
4404
4405 mpt2sas_config_get_volume_wwid(ioc, handle, &wwid);
4406 if (!wwid) {
4407 printk(MPT2SAS_ERR_FMT
4408 "failure at %s:%d/%s()!\n", ioc->name,
4409 __FILE__, __LINE__, __func__);
4410 break;
4411 }
4412
4413 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
4414 if (!raid_device) {
4415 printk(MPT2SAS_ERR_FMT
4416 "failure at %s:%d/%s()!\n", ioc->name,
4417 __FILE__, __LINE__, __func__);
4418 break;
4419 }
4420
4421 raid_device->id = ioc->sas_id++;
4422 raid_device->channel = RAID_CHANNEL;
4423 raid_device->handle = handle;
4424 raid_device->wwid = wwid;
4425 _scsih_raid_device_add(ioc, raid_device);
4426 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
4427 raid_device->id, 0);
4428 if (rc)
4429 _scsih_raid_device_remove(ioc, raid_device);
4430 break;
4431
4432 case MPI2_RAID_VOL_STATE_INITIALIZING:
4433 default:
4434 break;
4435 }
4436}
4437
4438/**
4439 * _scsih_sas_ir_physical_disk_event - PD event
4440 * @ioc: per adapter object
4441 * @event_data: event data payload
4442 * Context: user.
4443 *
4444 * Return nothing.
4445 */
4446static void
4447_scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
4448 Mpi2EventDataIrPhysicalDisk_t *event_data)
4449{
4450 u16 handle;
4451 u32 state;
4452 struct _sas_device *sas_device;
4453 unsigned long flags;
4454
4455 if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
4456 return;
4457
4458 handle = le16_to_cpu(event_data->PhysDiskDevHandle);
4459 state = le32_to_cpu(event_data->NewValue);
4460
4461 dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: handle(0x%04x), "
4462 "old(0x%08x), new(0x%08x)\n", ioc->name, __func__, handle,
4463 le32_to_cpu(event_data->PreviousValue), state));
4464
4465 spin_lock_irqsave(&ioc->sas_device_lock, flags);
4466 sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
4467 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4468
4469 switch (state) {
4470#if 0
4471 case MPI2_RAID_PD_STATE_OFFLINE:
4472 if (sas_device)
4473 _scsih_remove_device(ioc, handle);
4474 break;
4475#endif
4476 case MPI2_RAID_PD_STATE_ONLINE:
4477 case MPI2_RAID_PD_STATE_DEGRADED:
4478 case MPI2_RAID_PD_STATE_REBUILDING:
4479 case MPI2_RAID_PD_STATE_OPTIMAL:
4480 if (sas_device)
4481 sas_device->hidden_raid_component = 1;
4482 else
4483 _scsih_add_device(ioc, handle, 0, 1);
4484 break;
4485
4486 case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
4487 case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
4488 case MPI2_RAID_PD_STATE_HOT_SPARE:
4489 default:
4490 break;
4491 }
4492}
4493
4494#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
4495/**
4496 * _scsih_sas_ir_operation_status_event_debug - debug for IR op event
4497 * @ioc: per adapter object
4498 * @event_data: event data payload
4499 * Context: user.
4500 *
4501 * Return nothing.
4502 */
4503static void
4504_scsih_sas_ir_operation_status_event_debug(struct MPT2SAS_ADAPTER *ioc,
4505 Mpi2EventDataIrOperationStatus_t *event_data)
4506{
4507 char *reason_str = NULL;
4508
4509 switch (event_data->RAIDOperation) {
4510 case MPI2_EVENT_IR_RAIDOP_RESYNC:
4511 reason_str = "resync";
4512 break;
4513 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
4514 reason_str = "online capacity expansion";
4515 break;
4516 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
4517 reason_str = "consistency check";
4518 break;
4519 default:
4520 reason_str = "unknown reason";
4521 break;
4522 }
4523
4524 printk(MPT2SAS_INFO_FMT "raid operational status: (%s)"
4525 "\thandle(0x%04x), percent complete(%d)\n",
4526 ioc->name, reason_str,
4527 le16_to_cpu(event_data->VolDevHandle),
4528 event_data->PercentComplete);
4529}
4530#endif
4531
4532/**
4533 * _scsih_sas_ir_operation_status_event - handle RAID operation events
4534 * @ioc: per adapter object
4535 * @VF_ID:
4536 * @event_data: event data payload
4537 * Context: user.
4538 *
4539 * Return nothing.
4540 */
4541static void
4542_scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
4543 Mpi2EventDataIrOperationStatus_t *event_data)
4544{
4545#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
4546 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
4547 _scsih_sas_ir_operation_status_event_debug(ioc, event_data);
4548#endif
4549}
4550
4551/**
4552 * _scsih_task_set_full - handle task set full
4553 * @ioc: per adapter object
4554 * @event_data: event data payload
4555 * Context: user.
4556 *
4557 * Throttle back qdepth.
4558 */
4559static void
4560_scsih_task_set_full(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
4561 Mpi2EventDataTaskSetFull_t *event_data)
4562{
4563 unsigned long flags;
4564 struct _sas_device *sas_device;
4565 static struct _raid_device *raid_device;
4566 struct scsi_device *sdev;
4567 int depth;
4568 u16 current_depth;
4569 u16 handle;
4570 int id, channel;
4571 u64 sas_address;
4572
4573 current_depth = le16_to_cpu(event_data->CurrentDepth);
4574 handle = le16_to_cpu(event_data->DevHandle);
4575 spin_lock_irqsave(&ioc->sas_device_lock, flags);
4576 sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
4577 if (!sas_device) {
4578 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4579 return;
4580 }
4581 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4582 id = sas_device->id;
4583 channel = sas_device->channel;
4584 sas_address = sas_device->sas_address;
4585
4586 /* if hidden raid component, then change to volume characteristics */
4587 if (sas_device->hidden_raid_component && sas_device->volume_handle) {
4588 spin_lock_irqsave(&ioc->raid_device_lock, flags);
4589 raid_device = _scsih_raid_device_find_by_handle(
4590 ioc, sas_device->volume_handle);
4591 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
4592 if (raid_device) {
4593 id = raid_device->id;
4594 channel = raid_device->channel;
4595 handle = raid_device->handle;
4596 sas_address = raid_device->wwid;
4597 }
4598 }
4599
4600 if (ioc->logging_level & MPT_DEBUG_TASK_SET_FULL)
4601 starget_printk(KERN_DEBUG, sas_device->starget, "task set "
4602 "full: handle(0x%04x), sas_addr(0x%016llx), depth(%d)\n",
4603 handle, (unsigned long long)sas_address, current_depth);
4604
4605 shost_for_each_device(sdev, ioc->shost) {
4606 if (sdev->id == id && sdev->channel == channel) {
4607 if (current_depth > sdev->queue_depth) {
4608 if (ioc->logging_level &
4609 MPT_DEBUG_TASK_SET_FULL)
4610 sdev_printk(KERN_INFO, sdev, "strange "
4611 "observation, the queue depth is"
4612 " (%d) meanwhile fw queue depth "
4613 "is (%d)\n", sdev->queue_depth,
4614 current_depth);
4615 continue;
4616 }
4617 depth = scsi_track_queue_full(sdev,
4618 current_depth - 1);
4619 if (depth > 0)
4620 sdev_printk(KERN_INFO, sdev, "Queue depth "
4621 "reduced to (%d)\n", depth);
4622 else if (depth < 0)
4623 sdev_printk(KERN_INFO, sdev, "Tagged Command "
4624 "Queueing is being disabled\n");
4625 else if (depth == 0)
4626 if (ioc->logging_level &
4627 MPT_DEBUG_TASK_SET_FULL)
4628 sdev_printk(KERN_INFO, sdev,
4629 "Queue depth not changed yet\n");
4630 }
4631 }
4632}
4633
4634/**
4635 * _scsih_mark_responding_sas_device - mark a sas_devices as responding
4636 * @ioc: per adapter object
4637 * @sas_address: sas address
4638 * @slot: enclosure slot id
4639 * @handle: device handle
4640 *
4641 * After host reset, find out whether devices are still responding.
4642 * Used in _scsi_remove_unresponsive_sas_devices.
4643 *
4644 * Return nothing.
4645 */
4646static void
4647_scsih_mark_responding_sas_device(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
4648 u16 slot, u16 handle)
4649{
4650 struct MPT2SAS_TARGET *sas_target_priv_data;
4651 struct scsi_target *starget;
4652 struct _sas_device *sas_device;
4653 unsigned long flags;
4654
4655 spin_lock_irqsave(&ioc->sas_device_lock, flags);
4656 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
4657 if (sas_device->sas_address == sas_address &&
4658 sas_device->slot == slot && sas_device->starget) {
4659 sas_device->responding = 1;
4660 starget_printk(KERN_INFO, sas_device->starget,
4661 "handle(0x%04x), sas_addr(0x%016llx), enclosure "
4662 "logical id(0x%016llx), slot(%d)\n", handle,
4663 (unsigned long long)sas_device->sas_address,
4664 (unsigned long long)
4665 sas_device->enclosure_logical_id,
4666 sas_device->slot);
4667 if (sas_device->handle == handle)
4668 goto out;
4669 printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n",
4670 sas_device->handle);
4671 sas_device->handle = handle;
4672 starget = sas_device->starget;
4673 sas_target_priv_data = starget->hostdata;
4674 sas_target_priv_data->handle = handle;
4675 goto out;
4676 }
4677 }
4678 out:
4679 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4680}
4681
4682/**
4683 * _scsih_search_responding_sas_devices -
4684 * @ioc: per adapter object
4685 *
4686 * After host reset, find out whether devices are still responding.
4687 * If not remove.
4688 *
4689 * Return nothing.
4690 */
4691static void
4692_scsih_search_responding_sas_devices(struct MPT2SAS_ADAPTER *ioc)
4693{
4694 Mpi2SasDevicePage0_t sas_device_pg0;
4695 Mpi2ConfigReply_t mpi_reply;
4696 u16 ioc_status;
4697 __le64 sas_address;
4698 u16 handle;
4699 u32 device_info;
4700 u16 slot;
4701
4702 printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__);
4703
4704 if (list_empty(&ioc->sas_device_list))
4705 return;
4706
4707 handle = 0xFFFF;
4708 while (!(mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply,
4709 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
4710 handle))) {
4711 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4712 MPI2_IOCSTATUS_MASK;
4713 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
4714 break;
4715 handle = le16_to_cpu(sas_device_pg0.DevHandle);
4716 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
4717 if (!(_scsih_is_end_device(device_info)))
4718 continue;
4719 sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
4720 slot = le16_to_cpu(sas_device_pg0.Slot);
4721 _scsih_mark_responding_sas_device(ioc, sas_address, slot,
4722 handle);
4723 }
4724}
4725
4726/**
4727 * _scsih_mark_responding_raid_device - mark a raid_device as responding
4728 * @ioc: per adapter object
4729 * @wwid: world wide identifier for raid volume
4730 * @handle: device handle
4731 *
4732 * After host reset, find out whether devices are still responding.
4733 * Used in _scsi_remove_unresponsive_raid_devices.
4734 *
4735 * Return nothing.
4736 */
4737static void
4738_scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
4739 u16 handle)
4740{
4741 struct MPT2SAS_TARGET *sas_target_priv_data;
4742 struct scsi_target *starget;
4743 struct _raid_device *raid_device;
4744 unsigned long flags;
4745
4746 spin_lock_irqsave(&ioc->raid_device_lock, flags);
4747 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
4748 if (raid_device->wwid == wwid && raid_device->starget) {
4749 raid_device->responding = 1;
4750 starget_printk(KERN_INFO, raid_device->starget,
4751 "handle(0x%04x), wwid(0x%016llx)\n", handle,
4752 (unsigned long long)raid_device->wwid);
4753 if (raid_device->handle == handle)
4754 goto out;
4755 printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n",
4756 raid_device->handle);
4757 raid_device->handle = handle;
4758 starget = raid_device->starget;
4759 sas_target_priv_data = starget->hostdata;
4760 sas_target_priv_data->handle = handle;
4761 goto out;
4762 }
4763 }
4764 out:
4765 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
4766}
4767
4768/**
4769 * _scsih_search_responding_raid_devices -
4770 * @ioc: per adapter object
4771 *
4772 * After host reset, find out whether devices are still responding.
4773 * If not remove.
4774 *
4775 * Return nothing.
4776 */
4777static void
4778_scsih_search_responding_raid_devices(struct MPT2SAS_ADAPTER *ioc)
4779{
4780 Mpi2RaidVolPage1_t volume_pg1;
4781 Mpi2ConfigReply_t mpi_reply;
4782 u16 ioc_status;
4783 u16 handle;
4784
4785 printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__);
4786
4787 if (list_empty(&ioc->raid_device_list))
4788 return;
4789
4790 handle = 0xFFFF;
4791 while (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
4792 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
4793 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4794 MPI2_IOCSTATUS_MASK;
4795 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
4796 break;
4797 handle = le16_to_cpu(volume_pg1.DevHandle);
4798 _scsih_mark_responding_raid_device(ioc,
4799 le64_to_cpu(volume_pg1.WWID), handle);
4800 }
4801}
4802
4803/**
4804 * _scsih_mark_responding_expander - mark a expander as responding
4805 * @ioc: per adapter object
4806 * @sas_address: sas address
4807 * @handle:
4808 *
4809 * After host reset, find out whether devices are still responding.
4810 * Used in _scsi_remove_unresponsive_expanders.
4811 *
4812 * Return nothing.
4813 */
4814static void
4815_scsih_mark_responding_expander(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
4816 u16 handle)
4817{
4818 struct _sas_node *sas_expander;
4819 unsigned long flags;
4820
4821 spin_lock_irqsave(&ioc->sas_node_lock, flags);
4822 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
4823 if (sas_expander->sas_address == sas_address) {
4824 sas_expander->responding = 1;
4825 if (sas_expander->handle != handle) {
4826 printk(KERN_INFO "old handle(0x%04x)\n",
4827 sas_expander->handle);
4828 sas_expander->handle = handle;
4829 }
4830 goto out;
4831 }
4832 }
4833 out:
4834 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
4835}
4836
4837/**
4838 * _scsih_search_responding_expanders -
4839 * @ioc: per adapter object
4840 *
4841 * After host reset, find out whether devices are still responding.
4842 * If not remove.
4843 *
4844 * Return nothing.
4845 */
4846static void
4847_scsih_search_responding_expanders(struct MPT2SAS_ADAPTER *ioc)
4848{
4849 Mpi2ExpanderPage0_t expander_pg0;
4850 Mpi2ConfigReply_t mpi_reply;
4851 u16 ioc_status;
4852 __le64 sas_address;
4853 u16 handle;
4854
4855 printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__);
4856
4857 if (list_empty(&ioc->sas_expander_list))
4858 return;
4859
4860 handle = 0xFFFF;
4861 while (!(mpt2sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
4862 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
4863
4864 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4865 MPI2_IOCSTATUS_MASK;
4866 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
4867 break;
4868
4869 handle = le16_to_cpu(expander_pg0.DevHandle);
4870 sas_address = le64_to_cpu(expander_pg0.SASAddress);
4871 printk(KERN_INFO "\texpander present: handle(0x%04x), "
4872 "sas_addr(0x%016llx)\n", handle,
4873 (unsigned long long)sas_address);
4874 _scsih_mark_responding_expander(ioc, sas_address, handle);
4875 }
4876
4877}
4878
4879/**
4880 * _scsih_remove_unresponding_devices - removing unresponding devices
4881 * @ioc: per adapter object
4882 *
4883 * Return nothing.
4884 */
4885static void
4886_scsih_remove_unresponding_devices(struct MPT2SAS_ADAPTER *ioc)
4887{
4888 struct _sas_device *sas_device, *sas_device_next;
4889 struct _sas_node *sas_expander, *sas_expander_next;
4890 struct _raid_device *raid_device, *raid_device_next;
4891 unsigned long flags;
4892
4893 _scsih_search_responding_sas_devices(ioc);
4894 _scsih_search_responding_raid_devices(ioc);
4895 _scsih_search_responding_expanders(ioc);
4896
4897 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
4898 ioc->shost_recovery = 0;
4899 if (ioc->shost->shost_state == SHOST_RECOVERY) {
4900 printk(MPT2SAS_INFO_FMT "putting controller into "
4901 "SHOST_RUNNING\n", ioc->name);
4902 scsi_host_set_state(ioc->shost, SHOST_RUNNING);
4903 }
4904 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
4905
4906 list_for_each_entry_safe(sas_device, sas_device_next,
4907 &ioc->sas_device_list, list) {
4908 if (sas_device->responding) {
4909 sas_device->responding = 0;
4910 continue;
4911 }
4912 if (sas_device->starget)
4913 starget_printk(KERN_INFO, sas_device->starget,
4914 "removing: handle(0x%04x), sas_addr(0x%016llx), "
4915 "enclosure logical id(0x%016llx), slot(%d)\n",
4916 sas_device->handle,
4917 (unsigned long long)sas_device->sas_address,
4918 (unsigned long long)
4919 sas_device->enclosure_logical_id,
4920 sas_device->slot);
4921 _scsih_remove_device(ioc, sas_device->handle);
4922 }
4923
4924 list_for_each_entry_safe(raid_device, raid_device_next,
4925 &ioc->raid_device_list, list) {
4926 if (raid_device->responding) {
4927 raid_device->responding = 0;
4928 continue;
4929 }
4930 if (raid_device->starget) {
4931 starget_printk(KERN_INFO, raid_device->starget,
4932 "removing: handle(0x%04x), wwid(0x%016llx)\n",
4933 raid_device->handle,
4934 (unsigned long long)raid_device->wwid);
4935 scsi_remove_target(&raid_device->starget->dev);
4936 }
4937 _scsih_raid_device_remove(ioc, raid_device);
4938 }
4939
4940 list_for_each_entry_safe(sas_expander, sas_expander_next,
4941 &ioc->sas_expander_list, list) {
4942 if (sas_expander->responding) {
4943 sas_expander->responding = 0;
4944 continue;
4945 }
4946 printk("\tremoving expander: handle(0x%04x), "
4947 " sas_addr(0x%016llx)\n", sas_expander->handle,
4948 (unsigned long long)sas_expander->sas_address);
4949 _scsih_expander_remove(ioc, sas_expander->handle);
4950 }
4951}
4952
4953/**
4954 * _firmware_event_work - delayed task for processing firmware events
4955 * @ioc: per adapter object
4956 * @work: equal to the fw_event_work object
4957 * Context: user.
4958 *
4959 * Return nothing.
4960 */
4961static void
4962_firmware_event_work(struct work_struct *work)
4963{
4964 struct fw_event_work *fw_event = container_of(work,
4965 struct fw_event_work, work.work);
4966 unsigned long flags;
4967 struct MPT2SAS_ADAPTER *ioc = fw_event->ioc;
4968
4969 /* This is invoked by calling _scsih_queue_rescan(). */
4970 if (fw_event->event == MPT2SAS_RESCAN_AFTER_HOST_RESET) {
4971 _scsih_fw_event_free(ioc, fw_event);
4972 _scsih_sas_host_refresh(ioc, 1);
4973 _scsih_remove_unresponding_devices(ioc);
4974 return;
4975 }
4976
4977 /* the queue is being flushed so ignore this event */
4978 spin_lock_irqsave(&ioc->fw_event_lock, flags);
4979 if (ioc->fw_events_off || ioc->remove_host) {
4980 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4981 _scsih_fw_event_free(ioc, fw_event);
4982 return;
4983 }
4984 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4985
4986 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
4987 if (ioc->shost_recovery) {
4988 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
4989 _scsih_fw_event_requeue(ioc, fw_event, 1000);
4990 return;
4991 }
4992 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
4993
4994 switch (fw_event->event) {
4995 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
4996 _scsih_sas_topology_change_event(ioc, fw_event->VF_ID,
4997 fw_event->event_data, fw_event);
4998 break;
4999 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
5000 _scsih_sas_device_status_change_event(ioc, fw_event->VF_ID,
5001 fw_event->event_data);
5002 break;
5003 case MPI2_EVENT_SAS_DISCOVERY:
5004 _scsih_sas_discovery_event(ioc, fw_event->VF_ID,
5005 fw_event->event_data);
5006 break;
5007 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
5008 _scsih_sas_broadcast_primative_event(ioc, fw_event->VF_ID,
5009 fw_event->event_data);
5010 break;
5011 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
5012 _scsih_sas_enclosure_dev_status_change_event(ioc,
5013 fw_event->VF_ID, fw_event->event_data);
5014 break;
5015 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
5016 _scsih_sas_ir_config_change_event(ioc, fw_event->VF_ID,
5017 fw_event->event_data);
5018 break;
5019 case MPI2_EVENT_IR_VOLUME:
5020 _scsih_sas_ir_volume_event(ioc, fw_event->VF_ID,
5021 fw_event->event_data);
5022 break;
5023 case MPI2_EVENT_IR_PHYSICAL_DISK:
5024 _scsih_sas_ir_physical_disk_event(ioc, fw_event->VF_ID,
5025 fw_event->event_data);
5026 break;
5027 case MPI2_EVENT_IR_OPERATION_STATUS:
5028 _scsih_sas_ir_operation_status_event(ioc, fw_event->VF_ID,
5029 fw_event->event_data);
5030 break;
5031 case MPI2_EVENT_TASK_SET_FULL:
5032 _scsih_task_set_full(ioc, fw_event->VF_ID,
5033 fw_event->event_data);
5034 break;
5035 }
5036 _scsih_fw_event_free(ioc, fw_event);
5037}
5038
5039/**
5040 * mpt2sas_scsih_event_callback - firmware event handler (called at ISR time)
5041 * @ioc: per adapter object
5042 * @VF_ID: virtual function id
5043 * @reply: reply message frame(lower 32bit addr)
5044 * Context: interrupt.
5045 *
5046 * This function merely adds a new work task into ioc->firmware_event_thread.
5047 * The tasks are worked from _firmware_event_work in user context.
5048 *
5049 * Return nothing.
5050 */
5051void
5052mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, u32 reply)
5053{
5054 struct fw_event_work *fw_event;
5055 Mpi2EventNotificationReply_t *mpi_reply;
5056 unsigned long flags;
5057 u16 event;
5058
5059 /* events turned off due to host reset or driver unloading */
5060 spin_lock_irqsave(&ioc->fw_event_lock, flags);
5061 if (ioc->fw_events_off || ioc->remove_host) {
5062 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
5063 return;
5064 }
5065 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
5066
5067 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
5068 event = le16_to_cpu(mpi_reply->Event);
5069
5070 switch (event) {
5071 /* handle these */
5072 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
5073 {
5074 Mpi2EventDataSasBroadcastPrimitive_t *baen_data =
5075 (Mpi2EventDataSasBroadcastPrimitive_t *)
5076 mpi_reply->EventData;
5077
5078 if (baen_data->Primitive !=
5079 MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT ||
5080 ioc->broadcast_aen_busy)
5081 return;
5082 ioc->broadcast_aen_busy = 1;
5083 break;
5084 }
5085
5086 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
5087 _scsih_check_topo_delete_events(ioc,
5088 (Mpi2EventDataSasTopologyChangeList_t *)
5089 mpi_reply->EventData);
5090 break;
5091
5092 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
5093 case MPI2_EVENT_IR_OPERATION_STATUS:
5094 case MPI2_EVENT_SAS_DISCOVERY:
5095 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
5096 case MPI2_EVENT_IR_VOLUME:
5097 case MPI2_EVENT_IR_PHYSICAL_DISK:
5098 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
5099 case MPI2_EVENT_TASK_SET_FULL:
5100 break;
5101
5102 default: /* ignore the rest */
5103 return;
5104 }
5105
5106 fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
5107 if (!fw_event) {
5108 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
5109 ioc->name, __FILE__, __LINE__, __func__);
5110 return;
5111 }
5112 fw_event->event_data =
5113 kzalloc(mpi_reply->EventDataLength*4, GFP_ATOMIC);
5114 if (!fw_event->event_data) {
5115 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
5116 ioc->name, __FILE__, __LINE__, __func__);
5117 kfree(fw_event);
5118 return;
5119 }
5120
5121 memcpy(fw_event->event_data, mpi_reply->EventData,
5122 mpi_reply->EventDataLength*4);
5123 fw_event->ioc = ioc;
5124 fw_event->VF_ID = VF_ID;
5125 fw_event->event = event;
5126 _scsih_fw_event_add(ioc, fw_event);
5127}
5128
5129/* shost template */
5130static struct scsi_host_template scsih_driver_template = {
5131 .module = THIS_MODULE,
5132 .name = "Fusion MPT SAS Host",
5133 .proc_name = MPT2SAS_DRIVER_NAME,
5134 .queuecommand = scsih_qcmd,
5135 .target_alloc = scsih_target_alloc,
5136 .slave_alloc = scsih_slave_alloc,
5137 .slave_configure = scsih_slave_configure,
5138 .target_destroy = scsih_target_destroy,
5139 .slave_destroy = scsih_slave_destroy,
5140 .change_queue_depth = scsih_change_queue_depth,
5141 .change_queue_type = scsih_change_queue_type,
5142 .eh_abort_handler = scsih_abort,
5143 .eh_device_reset_handler = scsih_dev_reset,
5144 .eh_host_reset_handler = scsih_host_reset,
5145 .bios_param = scsih_bios_param,
5146 .can_queue = 1,
5147 .this_id = -1,
5148 .sg_tablesize = MPT2SAS_SG_DEPTH,
5149 .max_sectors = 8192,
5150 .cmd_per_lun = 7,
5151 .use_clustering = ENABLE_CLUSTERING,
5152 .shost_attrs = mpt2sas_host_attrs,
5153 .sdev_attrs = mpt2sas_dev_attrs,
5154};
5155
5156/**
5157 * _scsih_expander_node_remove - removing expander device from list.
5158 * @ioc: per adapter object
5159 * @sas_expander: the sas_device object
5160 * Context: Calling function should acquire ioc->sas_node_lock.
5161 *
5162 * Removing object and freeing associated memory from the
5163 * ioc->sas_expander_list.
5164 *
5165 * Return nothing.
5166 */
5167static void
5168_scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc,
5169 struct _sas_node *sas_expander)
5170{
5171 struct _sas_port *mpt2sas_port;
5172 struct _sas_device *sas_device;
5173 struct _sas_node *expander_sibling;
5174 unsigned long flags;
5175
5176 if (!sas_expander)
5177 return;
5178
5179 /* remove sibling ports attached to this expander */
5180 retry_device_search:
5181 list_for_each_entry(mpt2sas_port,
5182 &sas_expander->sas_port_list, port_list) {
5183 if (mpt2sas_port->remote_identify.device_type ==
5184 SAS_END_DEVICE) {
5185 spin_lock_irqsave(&ioc->sas_device_lock, flags);
5186 sas_device =
5187 mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
5188 mpt2sas_port->remote_identify.sas_address);
5189 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5190 if (!sas_device)
5191 continue;
5192 _scsih_remove_device(ioc, sas_device->handle);
5193 goto retry_device_search;
5194 }
5195 }
5196
5197 retry_expander_search:
5198 list_for_each_entry(mpt2sas_port,
5199 &sas_expander->sas_port_list, port_list) {
5200
5201 if (mpt2sas_port->remote_identify.device_type ==
5202 MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER ||
5203 mpt2sas_port->remote_identify.device_type ==
5204 MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER) {
5205
5206 spin_lock_irqsave(&ioc->sas_node_lock, flags);
5207 expander_sibling =
5208 mpt2sas_scsih_expander_find_by_sas_address(
5209 ioc, mpt2sas_port->remote_identify.sas_address);
5210 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
5211 if (!expander_sibling)
5212 continue;
5213 _scsih_expander_remove(ioc, expander_sibling->handle);
5214 goto retry_expander_search;
5215 }
5216 }
5217
5218 mpt2sas_transport_port_remove(ioc, sas_expander->sas_address,
5219 sas_expander->parent_handle);
5220
5221 printk(MPT2SAS_INFO_FMT "expander_remove: handle"
5222 "(0x%04x), sas_addr(0x%016llx)\n", ioc->name,
5223 sas_expander->handle, (unsigned long long)
5224 sas_expander->sas_address);
5225
5226 list_del(&sas_expander->list);
5227 kfree(sas_expander->phy);
5228 kfree(sas_expander);
5229}
5230
5231/**
5232 * scsih_remove - detach and remove add host
5233 * @pdev: PCI device struct
5234 *
5235 * Return nothing.
5236 */
5237static void __devexit
5238scsih_remove(struct pci_dev *pdev)
5239{
5240 struct Scsi_Host *shost = pci_get_drvdata(pdev);
5241 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
5242 struct _sas_port *mpt2sas_port;
5243 struct _sas_device *sas_device;
5244 struct _sas_node *expander_sibling;
5245 struct workqueue_struct *wq;
5246 unsigned long flags;
5247
5248 ioc->remove_host = 1;
5249 _scsih_fw_event_off(ioc);
5250
5251 spin_lock_irqsave(&ioc->fw_event_lock, flags);
5252 wq = ioc->firmware_event_thread;
5253 ioc->firmware_event_thread = NULL;
5254 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
5255 if (wq)
5256 destroy_workqueue(wq);
5257
5258 /* free ports attached to the sas_host */
5259 retry_again:
5260 list_for_each_entry(mpt2sas_port,
5261 &ioc->sas_hba.sas_port_list, port_list) {
5262 if (mpt2sas_port->remote_identify.device_type ==
5263 SAS_END_DEVICE) {
5264 sas_device =
5265 mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
5266 mpt2sas_port->remote_identify.sas_address);
5267 if (sas_device) {
5268 _scsih_remove_device(ioc, sas_device->handle);
5269 goto retry_again;
5270 }
5271 } else {
5272 expander_sibling =
5273 mpt2sas_scsih_expander_find_by_sas_address(ioc,
5274 mpt2sas_port->remote_identify.sas_address);
5275 if (expander_sibling) {
5276 _scsih_expander_remove(ioc,
5277 expander_sibling->handle);
5278 goto retry_again;
5279 }
5280 }
5281 }
5282
5283 /* free phys attached to the sas_host */
5284 if (ioc->sas_hba.num_phys) {
5285 kfree(ioc->sas_hba.phy);
5286 ioc->sas_hba.phy = NULL;
5287 ioc->sas_hba.num_phys = 0;
5288 }
5289
5290 sas_remove_host(shost);
5291 mpt2sas_base_detach(ioc);
5292 list_del(&ioc->list);
5293 scsi_remove_host(shost);
5294 scsi_host_put(shost);
5295}
5296
5297/**
5298 * _scsih_probe_boot_devices - reports 1st device
5299 * @ioc: per adapter object
5300 *
5301 * If specified in bios page 2, this routine reports the 1st
5302 * device scsi-ml or sas transport for persistent boot device
5303 * purposes. Please refer to function _scsih_determine_boot_device()
5304 */
5305static void
5306_scsih_probe_boot_devices(struct MPT2SAS_ADAPTER *ioc)
5307{
5308 u8 is_raid;
5309 void *device;
5310 struct _sas_device *sas_device;
5311 struct _raid_device *raid_device;
5312 u16 handle, parent_handle;
5313 u64 sas_address;
5314 unsigned long flags;
5315 int rc;
5316
5317 device = NULL;
5318 if (ioc->req_boot_device.device) {
5319 device = ioc->req_boot_device.device;
5320 is_raid = ioc->req_boot_device.is_raid;
5321 } else if (ioc->req_alt_boot_device.device) {
5322 device = ioc->req_alt_boot_device.device;
5323 is_raid = ioc->req_alt_boot_device.is_raid;
5324 } else if (ioc->current_boot_device.device) {
5325 device = ioc->current_boot_device.device;
5326 is_raid = ioc->current_boot_device.is_raid;
5327 }
5328
5329 if (!device)
5330 return;
5331
5332 if (is_raid) {
5333 raid_device = device;
5334 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
5335 raid_device->id, 0);
5336 if (rc)
5337 _scsih_raid_device_remove(ioc, raid_device);
5338 } else {
5339 sas_device = device;
5340 handle = sas_device->handle;
5341 parent_handle = sas_device->parent_handle;
5342 sas_address = sas_device->sas_address;
5343 spin_lock_irqsave(&ioc->sas_device_lock, flags);
5344 list_move_tail(&sas_device->list, &ioc->sas_device_list);
5345 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5346 if (!mpt2sas_transport_port_add(ioc, sas_device->handle,
5347 sas_device->parent_handle)) {
5348 _scsih_sas_device_remove(ioc, sas_device);
5349 } else if (!sas_device->starget) {
5350 mpt2sas_transport_port_remove(ioc, sas_address,
5351 parent_handle);
5352 _scsih_sas_device_remove(ioc, sas_device);
5353 }
5354 }
5355}
5356
5357/**
5358 * _scsih_probe_raid - reporting raid volumes to scsi-ml
5359 * @ioc: per adapter object
5360 *
5361 * Called during initial loading of the driver.
5362 */
5363static void
5364_scsih_probe_raid(struct MPT2SAS_ADAPTER *ioc)
5365{
5366 struct _raid_device *raid_device, *raid_next;
5367 int rc;
5368
5369 list_for_each_entry_safe(raid_device, raid_next,
5370 &ioc->raid_device_list, list) {
5371 if (raid_device->starget)
5372 continue;
5373 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
5374 raid_device->id, 0);
5375 if (rc)
5376 _scsih_raid_device_remove(ioc, raid_device);
5377 }
5378}
5379
5380/**
5381 * _scsih_probe_sas - reporting raid volumes to sas transport
5382 * @ioc: per adapter object
5383 *
5384 * Called during initial loading of the driver.
5385 */
5386static void
5387_scsih_probe_sas(struct MPT2SAS_ADAPTER *ioc)
5388{
5389 struct _sas_device *sas_device, *next;
5390 unsigned long flags;
5391 u16 handle, parent_handle;
5392 u64 sas_address;
5393
5394 /* SAS Device List */
5395 list_for_each_entry_safe(sas_device, next, &ioc->sas_device_init_list,
5396 list) {
5397 spin_lock_irqsave(&ioc->sas_device_lock, flags);
5398 list_move_tail(&sas_device->list, &ioc->sas_device_list);
5399 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5400
5401 handle = sas_device->handle;
5402 parent_handle = sas_device->parent_handle;
5403 sas_address = sas_device->sas_address;
5404 if (!mpt2sas_transport_port_add(ioc, handle, parent_handle)) {
5405 _scsih_sas_device_remove(ioc, sas_device);
5406 } else if (!sas_device->starget) {
5407 mpt2sas_transport_port_remove(ioc, sas_address,
5408 parent_handle);
5409 _scsih_sas_device_remove(ioc, sas_device);
5410 }
5411 }
5412}
5413
5414/**
5415 * _scsih_probe_devices - probing for devices
5416 * @ioc: per adapter object
5417 *
5418 * Called during initial loading of the driver.
5419 */
5420static void
5421_scsih_probe_devices(struct MPT2SAS_ADAPTER *ioc)
5422{
5423 u16 volume_mapping_flags =
5424 le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
5425 MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
5426
5427 if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
5428 return; /* return when IOC doesn't support initiator mode */
5429
5430 _scsih_probe_boot_devices(ioc);
5431
5432 if (ioc->ir_firmware) {
5433 if ((volume_mapping_flags &
5434 MPI2_IOCPAGE8_IRFLAGS_HIGH_VOLUME_MAPPING)) {
5435 _scsih_probe_sas(ioc);
5436 _scsih_probe_raid(ioc);
5437 } else {
5438 _scsih_probe_raid(ioc);
5439 _scsih_probe_sas(ioc);
5440 }
5441 } else
5442 _scsih_probe_sas(ioc);
5443}
5444
5445/**
5446 * scsih_probe - attach and add scsi host
5447 * @pdev: PCI device struct
5448 * @id: pci device id
5449 *
5450 * Returns 0 success, anything else error.
5451 */
5452static int
5453scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5454{
5455 struct MPT2SAS_ADAPTER *ioc;
5456 struct Scsi_Host *shost;
5457
5458 shost = scsi_host_alloc(&scsih_driver_template,
5459 sizeof(struct MPT2SAS_ADAPTER));
5460 if (!shost)
5461 return -ENODEV;
5462
5463 /* init local params */
5464 ioc = shost_priv(shost);
5465 memset(ioc, 0, sizeof(struct MPT2SAS_ADAPTER));
5466 INIT_LIST_HEAD(&ioc->list);
5467 list_add_tail(&ioc->list, &mpt2sas_ioc_list);
5468 ioc->shost = shost;
5469 ioc->id = mpt_ids++;
5470 sprintf(ioc->name, "%s%d", MPT2SAS_DRIVER_NAME, ioc->id);
5471 ioc->pdev = pdev;
5472 ioc->scsi_io_cb_idx = scsi_io_cb_idx;
5473 ioc->tm_cb_idx = tm_cb_idx;
5474 ioc->ctl_cb_idx = ctl_cb_idx;
5475 ioc->base_cb_idx = base_cb_idx;
5476 ioc->transport_cb_idx = transport_cb_idx;
5477 ioc->config_cb_idx = config_cb_idx;
5478 ioc->logging_level = logging_level;
5479 /* misc semaphores and spin locks */
5480 spin_lock_init(&ioc->ioc_reset_in_progress_lock);
5481 spin_lock_init(&ioc->scsi_lookup_lock);
5482 spin_lock_init(&ioc->sas_device_lock);
5483 spin_lock_init(&ioc->sas_node_lock);
5484 spin_lock_init(&ioc->fw_event_lock);
5485 spin_lock_init(&ioc->raid_device_lock);
5486
5487 INIT_LIST_HEAD(&ioc->sas_device_list);
5488 INIT_LIST_HEAD(&ioc->sas_device_init_list);
5489 INIT_LIST_HEAD(&ioc->sas_expander_list);
5490 INIT_LIST_HEAD(&ioc->fw_event_list);
5491 INIT_LIST_HEAD(&ioc->raid_device_list);
5492 INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
5493
5494 /* init shost parameters */
5495 shost->max_cmd_len = 16;
5496 shost->max_lun = max_lun;
5497 shost->transportt = mpt2sas_transport_template;
5498 shost->unique_id = ioc->id;
5499
5500 if ((scsi_add_host(shost, &pdev->dev))) {
5501 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
5502 ioc->name, __FILE__, __LINE__, __func__);
5503 list_del(&ioc->list);
5504 goto out_add_shost_fail;
5505 }
5506
5507 /* event thread */
5508 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
5509 "fw_event%d", ioc->id);
5510 ioc->firmware_event_thread = create_singlethread_workqueue(
5511 ioc->firmware_event_name);
5512 if (!ioc->firmware_event_thread) {
5513 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
5514 ioc->name, __FILE__, __LINE__, __func__);
5515 goto out_thread_fail;
5516 }
5517
5518 ioc->wait_for_port_enable_to_complete = 1;
5519 if ((mpt2sas_base_attach(ioc))) {
5520 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
5521 ioc->name, __FILE__, __LINE__, __func__);
5522 goto out_attach_fail;
5523 }
5524
5525 ioc->wait_for_port_enable_to_complete = 0;
5526 _scsih_probe_devices(ioc);
5527 return 0;
5528
5529 out_attach_fail:
5530 destroy_workqueue(ioc->firmware_event_thread);
5531 out_thread_fail:
5532 list_del(&ioc->list);
5533 scsi_remove_host(shost);
5534 out_add_shost_fail:
5535 return -ENODEV;
5536}
5537
5538#ifdef CONFIG_PM
5539/**
5540 * scsih_suspend - power management suspend main entry point
5541 * @pdev: PCI device struct
5542 * @state: PM state change to (usually PCI_D3)
5543 *
5544 * Returns 0 success, anything else error.
5545 */
5546static int
5547scsih_suspend(struct pci_dev *pdev, pm_message_t state)
5548{
5549 struct Scsi_Host *shost = pci_get_drvdata(pdev);
5550 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
5551 u32 device_state;
5552
5553 flush_scheduled_work();
5554 scsi_block_requests(shost);
5555 device_state = pci_choose_state(pdev, state);
5556 printk(MPT2SAS_INFO_FMT "pdev=0x%p, slot=%s, entering "
5557 "operating state [D%d]\n", ioc->name, pdev,
5558 pci_name(pdev), device_state);
5559
5560 mpt2sas_base_free_resources(ioc);
5561 pci_save_state(pdev);
5562 pci_disable_device(pdev);
5563 pci_set_power_state(pdev, device_state);
5564 return 0;
5565}
5566
5567/**
5568 * scsih_resume - power management resume main entry point
5569 * @pdev: PCI device struct
5570 *
5571 * Returns 0 success, anything else error.
5572 */
5573static int
5574scsih_resume(struct pci_dev *pdev)
5575{
5576 struct Scsi_Host *shost = pci_get_drvdata(pdev);
5577 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
5578 u32 device_state = pdev->current_state;
5579 int r;
5580
5581 printk(MPT2SAS_INFO_FMT "pdev=0x%p, slot=%s, previous "
5582 "operating state [D%d]\n", ioc->name, pdev,
5583 pci_name(pdev), device_state);
5584
5585 pci_set_power_state(pdev, PCI_D0);
5586 pci_enable_wake(pdev, PCI_D0, 0);
5587 pci_restore_state(pdev);
5588 ioc->pdev = pdev;
5589 r = mpt2sas_base_map_resources(ioc);
5590 if (r)
5591 return r;
5592
5593 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, SOFT_RESET);
5594 scsi_unblock_requests(shost);
5595 return 0;
5596}
5597#endif /* CONFIG_PM */
5598
5599
5600static struct pci_driver scsih_driver = {
5601 .name = MPT2SAS_DRIVER_NAME,
5602 .id_table = scsih_pci_table,
5603 .probe = scsih_probe,
5604 .remove = __devexit_p(scsih_remove),
5605#ifdef CONFIG_PM
5606 .suspend = scsih_suspend,
5607 .resume = scsih_resume,
5608#endif
5609};
5610
5611
5612/**
5613 * scsih_init - main entry point for this driver.
5614 *
5615 * Returns 0 success, anything else error.
5616 */
5617static int __init
5618scsih_init(void)
5619{
5620 int error;
5621
5622 mpt_ids = 0;
5623 printk(KERN_INFO "%s version %s loaded\n", MPT2SAS_DRIVER_NAME,
5624 MPT2SAS_DRIVER_VERSION);
5625
5626 mpt2sas_transport_template =
5627 sas_attach_transport(&mpt2sas_transport_functions);
5628 if (!mpt2sas_transport_template)
5629 return -ENODEV;
5630
5631 mpt2sas_base_initialize_callback_handler();
5632
5633 /* queuecommand callback hander */
5634 scsi_io_cb_idx = mpt2sas_base_register_callback_handler(scsih_io_done);
5635
5636 /* task managment callback handler */
5637 tm_cb_idx = mpt2sas_base_register_callback_handler(scsih_tm_done);
5638
5639 /* base internal commands callback handler */
5640 base_cb_idx = mpt2sas_base_register_callback_handler(mpt2sas_base_done);
5641
5642 /* transport internal commands callback handler */
5643 transport_cb_idx = mpt2sas_base_register_callback_handler(
5644 mpt2sas_transport_done);
5645
5646 /* configuration page API internal commands callback handler */
5647 config_cb_idx = mpt2sas_base_register_callback_handler(
5648 mpt2sas_config_done);
5649
5650 /* ctl module callback handler */
5651 ctl_cb_idx = mpt2sas_base_register_callback_handler(mpt2sas_ctl_done);
5652
5653 mpt2sas_ctl_init();
5654
5655 error = pci_register_driver(&scsih_driver);
5656 if (error)
5657 sas_release_transport(mpt2sas_transport_template);
5658
5659 return error;
5660}
5661
5662/**
5663 * scsih_exit - exit point for this driver (when it is a module).
5664 *
5665 * Returns 0 success, anything else error.
5666 */
5667static void __exit
5668scsih_exit(void)
5669{
5670 printk(KERN_INFO "mpt2sas version %s unloading\n",
5671 MPT2SAS_DRIVER_VERSION);
5672
5673 pci_unregister_driver(&scsih_driver);
5674
5675 sas_release_transport(mpt2sas_transport_template);
5676 mpt2sas_base_release_callback_handler(scsi_io_cb_idx);
5677 mpt2sas_base_release_callback_handler(tm_cb_idx);
5678 mpt2sas_base_release_callback_handler(base_cb_idx);
5679 mpt2sas_base_release_callback_handler(transport_cb_idx);
5680 mpt2sas_base_release_callback_handler(config_cb_idx);
5681 mpt2sas_base_release_callback_handler(ctl_cb_idx);
5682
5683 mpt2sas_ctl_exit();
5684}
5685
5686module_init(scsih_init);
5687module_exit(scsih_exit);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
new file mode 100644
index 000000000000..e03dc0b1e1a0
--- /dev/null
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -0,0 +1,1211 @@
1/*
2 * SAS Transport Layer for MPT (Message Passing Technology) based controllers
3 *
4 * This code is based on drivers/scsi/mpt2sas/mpt2_transport.c
5 * Copyright (C) 2007-2008 LSI Corporation
6 * (mailto:DL-MPTFusionLinux@lsi.com)
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * NO WARRANTY
19 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
20 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
21 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
23 * solely responsible for determining the appropriateness of using and
24 * distributing the Program and assumes all risks associated with its
25 * exercise of rights under this Agreement, including but not limited to
26 * the risks and costs of program errors, damage to or loss of data,
27 * programs or equipment, and unavailability or interruption of operations.
28
29 * DISCLAIMER OF LIABILITY
30 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
33 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
34 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
36 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
37
38 * You should have received a copy of the GNU General Public License
39 * along with this program; if not, write to the Free Software
40 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
41 * USA.
42 */
43
44#include <linux/module.h>
45#include <linux/kernel.h>
46#include <linux/init.h>
47#include <linux/errno.h>
48#include <linux/sched.h>
49#include <linux/workqueue.h>
50#include <linux/delay.h>
51#include <linux/pci.h>
52
53#include <scsi/scsi.h>
54#include <scsi/scsi_cmnd.h>
55#include <scsi/scsi_device.h>
56#include <scsi/scsi_host.h>
57#include <scsi/scsi_transport_sas.h>
58#include <scsi/scsi_dbg.h>
59
60#include "mpt2sas_base.h"
61/**
62 * _transport_sas_node_find_by_handle - sas node search
63 * @ioc: per adapter object
64 * @handle: expander or hba handle (assigned by firmware)
65 * Context: Calling function should acquire ioc->sas_node_lock.
66 *
67 * Search for either hba phys or expander device based on handle, then returns
68 * the sas_node object.
69 */
70static struct _sas_node *
71_transport_sas_node_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle)
72{
73 int i;
74
75 for (i = 0; i < ioc->sas_hba.num_phys; i++)
76 if (ioc->sas_hba.phy[i].handle == handle)
77 return &ioc->sas_hba;
78
79 return mpt2sas_scsih_expander_find_by_handle(ioc, handle);
80}
81
82/**
83 * _transport_convert_phy_link_rate -
84 * @link_rate: link rate returned from mpt firmware
85 *
86 * Convert link_rate from mpi fusion into sas_transport form.
87 */
88static enum sas_linkrate
89_transport_convert_phy_link_rate(u8 link_rate)
90{
91 enum sas_linkrate rc;
92
93 switch (link_rate) {
94 case MPI2_SAS_NEG_LINK_RATE_1_5:
95 rc = SAS_LINK_RATE_1_5_GBPS;
96 break;
97 case MPI2_SAS_NEG_LINK_RATE_3_0:
98 rc = SAS_LINK_RATE_3_0_GBPS;
99 break;
100 case MPI2_SAS_NEG_LINK_RATE_6_0:
101 rc = SAS_LINK_RATE_6_0_GBPS;
102 break;
103 case MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED:
104 rc = SAS_PHY_DISABLED;
105 break;
106 case MPI2_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED:
107 rc = SAS_LINK_RATE_FAILED;
108 break;
109 case MPI2_SAS_NEG_LINK_RATE_PORT_SELECTOR:
110 rc = SAS_SATA_PORT_SELECTOR;
111 break;
112 case MPI2_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS:
113 rc = SAS_PHY_RESET_IN_PROGRESS;
114 break;
115 default:
116 case MPI2_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE:
117 case MPI2_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE:
118 rc = SAS_LINK_RATE_UNKNOWN;
119 break;
120 }
121 return rc;
122}
123
124/**
125 * _transport_set_identify - set identify for phys and end devices
126 * @ioc: per adapter object
127 * @handle: device handle
128 * @identify: sas identify info
129 *
130 * Populates sas identify info.
131 *
132 * Returns 0 for success, non-zero for failure.
133 */
134static int
135_transport_set_identify(struct MPT2SAS_ADAPTER *ioc, u16 handle,
136 struct sas_identify *identify)
137{
138 Mpi2SasDevicePage0_t sas_device_pg0;
139 Mpi2ConfigReply_t mpi_reply;
140 u32 device_info;
141 u32 ioc_status;
142
143 if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
144 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
145 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
146 ioc->name, __FILE__, __LINE__, __func__);
147 return -1;
148 }
149
150 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
151 MPI2_IOCSTATUS_MASK;
152 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
153 printk(MPT2SAS_ERR_FMT "handle(0x%04x), ioc_status(0x%04x)"
154 "\nfailure at %s:%d/%s()!\n", ioc->name, handle, ioc_status,
155 __FILE__, __LINE__, __func__);
156 return -1;
157 }
158
159 memset(identify, 0, sizeof(identify));
160 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
161
162 /* sas_address */
163 identify->sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
164
165 /* device_type */
166 switch (device_info & MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) {
167 case MPI2_SAS_DEVICE_INFO_NO_DEVICE:
168 identify->device_type = SAS_PHY_UNUSED;
169 break;
170 case MPI2_SAS_DEVICE_INFO_END_DEVICE:
171 identify->device_type = SAS_END_DEVICE;
172 break;
173 case MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER:
174 identify->device_type = SAS_EDGE_EXPANDER_DEVICE;
175 break;
176 case MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER:
177 identify->device_type = SAS_FANOUT_EXPANDER_DEVICE;
178 break;
179 }
180
181 /* initiator_port_protocols */
182 if (device_info & MPI2_SAS_DEVICE_INFO_SSP_INITIATOR)
183 identify->initiator_port_protocols |= SAS_PROTOCOL_SSP;
184 if (device_info & MPI2_SAS_DEVICE_INFO_STP_INITIATOR)
185 identify->initiator_port_protocols |= SAS_PROTOCOL_STP;
186 if (device_info & MPI2_SAS_DEVICE_INFO_SMP_INITIATOR)
187 identify->initiator_port_protocols |= SAS_PROTOCOL_SMP;
188 if (device_info & MPI2_SAS_DEVICE_INFO_SATA_HOST)
189 identify->initiator_port_protocols |= SAS_PROTOCOL_SATA;
190
191 /* target_port_protocols */
192 if (device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET)
193 identify->target_port_protocols |= SAS_PROTOCOL_SSP;
194 if (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
195 identify->target_port_protocols |= SAS_PROTOCOL_STP;
196 if (device_info & MPI2_SAS_DEVICE_INFO_SMP_TARGET)
197 identify->target_port_protocols |= SAS_PROTOCOL_SMP;
198 if (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
199 identify->target_port_protocols |= SAS_PROTOCOL_SATA;
200
201 return 0;
202}
203
204/**
205 * mpt2sas_transport_done - internal transport layer callback handler.
206 * @ioc: per adapter object
207 * @smid: system request message index
208 * @VF_ID: virtual function id
209 * @reply: reply message frame(lower 32bit addr)
210 *
211 * Callback handler when sending internal generated transport cmds.
212 * The callback index passed is `ioc->transport_cb_idx`
213 *
214 * Return nothing.
215 */
216void
217mpt2sas_transport_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID,
218 u32 reply)
219{
220 MPI2DefaultReply_t *mpi_reply;
221
222 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
223 if (ioc->transport_cmds.status == MPT2_CMD_NOT_USED)
224 return;
225 if (ioc->transport_cmds.smid != smid)
226 return;
227 ioc->transport_cmds.status |= MPT2_CMD_COMPLETE;
228 if (mpi_reply) {
229 memcpy(ioc->transport_cmds.reply, mpi_reply,
230 mpi_reply->MsgLength*4);
231 ioc->transport_cmds.status |= MPT2_CMD_REPLY_VALID;
232 }
233 ioc->transport_cmds.status &= ~MPT2_CMD_PENDING;
234 complete(&ioc->transport_cmds.done);
235}
236
237/* report manufacture request structure */
238struct rep_manu_request{
239 u8 smp_frame_type;
240 u8 function;
241 u8 reserved;
242 u8 request_length;
243};
244
245/* report manufacture reply structure */
246struct rep_manu_reply{
247 u8 smp_frame_type; /* 0x41 */
248 u8 function; /* 0x01 */
249 u8 function_result;
250 u8 response_length;
251 u16 expander_change_count;
252 u8 reserved0[2];
253 u8 sas_format:1;
254 u8 reserved1:7;
255 u8 reserved2[3];
256 u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
257 u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
258 u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
259 u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
260 u16 component_id;
261 u8 component_revision_id;
262 u8 reserved3;
263 u8 vendor_specific[8];
264};
265
266/**
267 * transport_expander_report_manufacture - obtain SMP report_manufacture
268 * @ioc: per adapter object
269 * @sas_address: expander sas address
270 * @edev: the sas_expander_device object
271 *
272 * Fills in the sas_expander_device object when SMP port is created.
273 *
274 * Returns 0 for success, non-zero for failure.
275 */
276static int
277transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
278 u64 sas_address, struct sas_expander_device *edev)
279{
280 Mpi2SmpPassthroughRequest_t *mpi_request;
281 Mpi2SmpPassthroughReply_t *mpi_reply;
282 struct rep_manu_reply *manufacture_reply;
283 struct rep_manu_request *manufacture_request;
284 int rc;
285 u16 smid;
286 u32 ioc_state;
287 unsigned long timeleft;
288 void *psge;
289 u32 sgl_flags;
290 u8 issue_reset = 0;
291 unsigned long flags;
292 void *data_out = NULL;
293 dma_addr_t data_out_dma;
294 u32 sz;
295 u64 *sas_address_le;
296 u16 wait_state_count;
297
298 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
299 if (ioc->ioc_reset_in_progress) {
300 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
301 printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
302 __func__, ioc->name);
303 return -EFAULT;
304 }
305 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
306
307 mutex_lock(&ioc->transport_cmds.mutex);
308
309 if (ioc->transport_cmds.status != MPT2_CMD_NOT_USED) {
310 printk(MPT2SAS_ERR_FMT "%s: transport_cmds in use\n",
311 ioc->name, __func__);
312 rc = -EAGAIN;
313 goto out;
314 }
315 ioc->transport_cmds.status = MPT2_CMD_PENDING;
316
317 wait_state_count = 0;
318 ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
319 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
320 if (wait_state_count++ == 10) {
321 printk(MPT2SAS_ERR_FMT
322 "%s: failed due to ioc not operational\n",
323 ioc->name, __func__);
324 rc = -EFAULT;
325 goto out;
326 }
327 ssleep(1);
328 ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
329 printk(MPT2SAS_INFO_FMT "%s: waiting for "
330 "operational state(count=%d)\n", ioc->name,
331 __func__, wait_state_count);
332 }
333 if (wait_state_count)
334 printk(MPT2SAS_INFO_FMT "%s: ioc is operational\n",
335 ioc->name, __func__);
336
337 smid = mpt2sas_base_get_smid(ioc, ioc->transport_cb_idx);
338 if (!smid) {
339 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
340 ioc->name, __func__);
341 rc = -EAGAIN;
342 goto out;
343 }
344
345 rc = 0;
346 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
347 ioc->transport_cmds.smid = smid;
348
349 sz = sizeof(struct rep_manu_request) + sizeof(struct rep_manu_reply);
350 data_out = pci_alloc_consistent(ioc->pdev, sz, &data_out_dma);
351
352 if (!data_out) {
353 printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__,
354 __LINE__, __func__);
355 rc = -ENOMEM;
356 mpt2sas_base_free_smid(ioc, smid);
357 goto out;
358 }
359
360 manufacture_request = data_out;
361 manufacture_request->smp_frame_type = 0x40;
362 manufacture_request->function = 1;
363 manufacture_request->reserved = 0;
364 manufacture_request->request_length = 0;
365
366 memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
367 mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
368 mpi_request->PhysicalPort = 0xFF;
369 sas_address_le = (u64 *)&mpi_request->SASAddress;
370 *sas_address_le = cpu_to_le64(sas_address);
371 mpi_request->RequestDataLength = sizeof(struct rep_manu_request);
372 psge = &mpi_request->SGL;
373
374 /* WRITE sgel first */
375 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
376 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
377 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
378 ioc->base_add_sg_single(psge, sgl_flags |
379 sizeof(struct rep_manu_request), data_out_dma);
380
381 /* incr sgel */
382 psge += ioc->sge_size;
383
384 /* READ sgel last */
385 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
386 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
387 MPI2_SGE_FLAGS_END_OF_LIST);
388 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
389 ioc->base_add_sg_single(psge, sgl_flags |
390 sizeof(struct rep_manu_reply), data_out_dma +
391 sizeof(struct rep_manu_request));
392
393 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT "report_manufacture - "
394 "send to sas_addr(0x%016llx)\n", ioc->name,
395 (unsigned long long)sas_address));
396 mpt2sas_base_put_smid_default(ioc, smid, 0 /* VF_ID */);
397 timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
398 10*HZ);
399
400 if (!(ioc->transport_cmds.status & MPT2_CMD_COMPLETE)) {
401 printk(MPT2SAS_ERR_FMT "%s: timeout\n",
402 ioc->name, __func__);
403 _debug_dump_mf(mpi_request,
404 sizeof(Mpi2SmpPassthroughRequest_t)/4);
405 if (!(ioc->transport_cmds.status & MPT2_CMD_RESET))
406 issue_reset = 1;
407 goto issue_host_reset;
408 }
409
410 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT "report_manufacture - "
411 "complete\n", ioc->name));
412
413 if (ioc->transport_cmds.status & MPT2_CMD_REPLY_VALID) {
414 u8 *tmp;
415
416 mpi_reply = ioc->transport_cmds.reply;
417
418 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT
419 "report_manufacture - reply data transfer size(%d)\n",
420 ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength)));
421
422 if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
423 sizeof(struct rep_manu_reply))
424 goto out;
425
426 manufacture_reply = data_out + sizeof(struct rep_manu_request);
427 strncpy(edev->vendor_id, manufacture_reply->vendor_id,
428 SAS_EXPANDER_VENDOR_ID_LEN);
429 strncpy(edev->product_id, manufacture_reply->product_id,
430 SAS_EXPANDER_PRODUCT_ID_LEN);
431 strncpy(edev->product_rev, manufacture_reply->product_rev,
432 SAS_EXPANDER_PRODUCT_REV_LEN);
433 edev->level = manufacture_reply->sas_format;
434 if (manufacture_reply->sas_format) {
435 strncpy(edev->component_vendor_id,
436 manufacture_reply->component_vendor_id,
437 SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN);
438 tmp = (u8 *)&manufacture_reply->component_id;
439 edev->component_id = tmp[0] << 8 | tmp[1];
440 edev->component_revision_id =
441 manufacture_reply->component_revision_id;
442 }
443 } else
444 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT
445 "report_manufacture - no reply\n", ioc->name));
446
447 issue_host_reset:
448 if (issue_reset)
449 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
450 FORCE_BIG_HAMMER);
451 out:
452 ioc->transport_cmds.status = MPT2_CMD_NOT_USED;
453 if (data_out)
454 pci_free_consistent(ioc->pdev, sz, data_out, data_out_dma);
455
456 mutex_unlock(&ioc->transport_cmds.mutex);
457 return rc;
458}
459
460/**
461 * mpt2sas_transport_port_add - insert port to the list
462 * @ioc: per adapter object
463 * @handle: handle of attached device
464 * @parent_handle: parent handle(either hba or expander)
465 * Context: This function will acquire ioc->sas_node_lock.
466 *
467 * Adding new port object to the sas_node->sas_port_list.
468 *
469 * Returns mpt2sas_port.
470 */
471struct _sas_port *
472mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle,
473 u16 parent_handle)
474{
475 struct _sas_phy *mpt2sas_phy, *next;
476 struct _sas_port *mpt2sas_port;
477 unsigned long flags;
478 struct _sas_node *sas_node;
479 struct sas_rphy *rphy;
480 int i;
481 struct sas_port *port;
482
483 if (!parent_handle)
484 return NULL;
485
486 mpt2sas_port = kzalloc(sizeof(struct _sas_port),
487 GFP_KERNEL);
488 if (!mpt2sas_port) {
489 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
490 ioc->name, __FILE__, __LINE__, __func__);
491 return NULL;
492 }
493
494 INIT_LIST_HEAD(&mpt2sas_port->port_list);
495 INIT_LIST_HEAD(&mpt2sas_port->phy_list);
496 spin_lock_irqsave(&ioc->sas_node_lock, flags);
497 sas_node = _transport_sas_node_find_by_handle(ioc, parent_handle);
498 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
499
500 if (!sas_node) {
501 printk(MPT2SAS_ERR_FMT "%s: Could not find parent(0x%04x)!\n",
502 ioc->name, __func__, parent_handle);
503 goto out_fail;
504 }
505
506 mpt2sas_port->handle = parent_handle;
507 mpt2sas_port->sas_address = sas_node->sas_address;
508 if ((_transport_set_identify(ioc, handle,
509 &mpt2sas_port->remote_identify))) {
510 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
511 ioc->name, __FILE__, __LINE__, __func__);
512 goto out_fail;
513 }
514
515 if (mpt2sas_port->remote_identify.device_type == SAS_PHY_UNUSED) {
516 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
517 ioc->name, __FILE__, __LINE__, __func__);
518 goto out_fail;
519 }
520
521 for (i = 0; i < sas_node->num_phys; i++) {
522 if (sas_node->phy[i].remote_identify.sas_address !=
523 mpt2sas_port->remote_identify.sas_address)
524 continue;
525 list_add_tail(&sas_node->phy[i].port_siblings,
526 &mpt2sas_port->phy_list);
527 mpt2sas_port->num_phys++;
528 }
529
530 if (!mpt2sas_port->num_phys) {
531 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
532 ioc->name, __FILE__, __LINE__, __func__);
533 goto out_fail;
534 }
535
536 port = sas_port_alloc_num(sas_node->parent_dev);
537 if ((sas_port_add(port))) {
538 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
539 ioc->name, __FILE__, __LINE__, __func__);
540 goto out_fail;
541 }
542
543 list_for_each_entry(mpt2sas_phy, &mpt2sas_port->phy_list,
544 port_siblings) {
545 if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
546 dev_printk(KERN_INFO, &port->dev, "add: handle(0x%04x)"
547 ", sas_addr(0x%016llx), phy(%d)\n", handle,
548 (unsigned long long)
549 mpt2sas_port->remote_identify.sas_address,
550 mpt2sas_phy->phy_id);
551 sas_port_add_phy(port, mpt2sas_phy->phy);
552 }
553
554 mpt2sas_port->port = port;
555 if (mpt2sas_port->remote_identify.device_type == SAS_END_DEVICE)
556 rphy = sas_end_device_alloc(port);
557 else
558 rphy = sas_expander_alloc(port,
559 mpt2sas_port->remote_identify.device_type);
560
561 rphy->identify = mpt2sas_port->remote_identify;
562 if ((sas_rphy_add(rphy))) {
563 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
564 ioc->name, __FILE__, __LINE__, __func__);
565 }
566 if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
567 dev_printk(KERN_INFO, &rphy->dev, "add: handle(0x%04x), "
568 "sas_addr(0x%016llx)\n", handle,
569 (unsigned long long)
570 mpt2sas_port->remote_identify.sas_address);
571 mpt2sas_port->rphy = rphy;
572 spin_lock_irqsave(&ioc->sas_node_lock, flags);
573 list_add_tail(&mpt2sas_port->port_list, &sas_node->sas_port_list);
574 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
575
576 /* fill in report manufacture */
577 if (mpt2sas_port->remote_identify.device_type ==
578 MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER ||
579 mpt2sas_port->remote_identify.device_type ==
580 MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER)
581 transport_expander_report_manufacture(ioc,
582 mpt2sas_port->remote_identify.sas_address,
583 rphy_to_expander_device(rphy));
584
585 return mpt2sas_port;
586
587 out_fail:
588 list_for_each_entry_safe(mpt2sas_phy, next, &mpt2sas_port->phy_list,
589 port_siblings)
590 list_del(&mpt2sas_phy->port_siblings);
591 kfree(mpt2sas_port);
592 return NULL;
593}
594
595/**
596 * mpt2sas_transport_port_remove - remove port from the list
597 * @ioc: per adapter object
598 * @sas_address: sas address of attached device
599 * @parent_handle: handle to the upstream parent(either hba or expander)
600 * Context: This function will acquire ioc->sas_node_lock.
601 *
602 * Removing object and freeing associated memory from the
603 * ioc->sas_port_list.
604 *
605 * Return nothing.
606 */
607void
608mpt2sas_transport_port_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
609 u16 parent_handle)
610{
611 int i;
612 unsigned long flags;
613 struct _sas_port *mpt2sas_port, *next;
614 struct _sas_node *sas_node;
615 u8 found = 0;
616 struct _sas_phy *mpt2sas_phy, *next_phy;
617
618 spin_lock_irqsave(&ioc->sas_node_lock, flags);
619 sas_node = _transport_sas_node_find_by_handle(ioc, parent_handle);
620 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
621 if (!sas_node)
622 return;
623 list_for_each_entry_safe(mpt2sas_port, next, &sas_node->sas_port_list,
624 port_list) {
625 if (mpt2sas_port->remote_identify.sas_address != sas_address)
626 continue;
627 found = 1;
628 list_del(&mpt2sas_port->port_list);
629 goto out;
630 }
631 out:
632 if (!found)
633 return;
634
635 for (i = 0; i < sas_node->num_phys; i++) {
636 if (sas_node->phy[i].remote_identify.sas_address == sas_address)
637 memset(&sas_node->phy[i].remote_identify, 0 ,
638 sizeof(struct sas_identify));
639 }
640
641 list_for_each_entry_safe(mpt2sas_phy, next_phy,
642 &mpt2sas_port->phy_list, port_siblings) {
643 if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
644 dev_printk(KERN_INFO, &mpt2sas_port->port->dev,
645 "remove: parent_handle(0x%04x), "
646 "sas_addr(0x%016llx), phy(%d)\n", parent_handle,
647 (unsigned long long)
648 mpt2sas_port->remote_identify.sas_address,
649 mpt2sas_phy->phy_id);
650 sas_port_delete_phy(mpt2sas_port->port, mpt2sas_phy->phy);
651 list_del(&mpt2sas_phy->port_siblings);
652 }
653 sas_port_delete(mpt2sas_port->port);
654 kfree(mpt2sas_port);
655}
656
657/**
658 * mpt2sas_transport_add_host_phy - report sas_host phy to transport
659 * @ioc: per adapter object
660 * @mpt2sas_phy: mpt2sas per phy object
661 * @phy_pg0: sas phy page 0
662 * @parent_dev: parent device class object
663 *
664 * Returns 0 for success, non-zero for failure.
665 */
666int
667mpt2sas_transport_add_host_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy
668 *mpt2sas_phy, Mpi2SasPhyPage0_t phy_pg0, struct device *parent_dev)
669{
670 struct sas_phy *phy;
671 int phy_index = mpt2sas_phy->phy_id;
672
673
674 INIT_LIST_HEAD(&mpt2sas_phy->port_siblings);
675 phy = sas_phy_alloc(parent_dev, phy_index);
676 if (!phy) {
677 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
678 ioc->name, __FILE__, __LINE__, __func__);
679 return -1;
680 }
681 if ((_transport_set_identify(ioc, mpt2sas_phy->handle,
682 &mpt2sas_phy->identify))) {
683 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
684 ioc->name, __FILE__, __LINE__, __func__);
685 return -1;
686 }
687 phy->identify = mpt2sas_phy->identify;
688 mpt2sas_phy->attached_handle = le16_to_cpu(phy_pg0.AttachedDevHandle);
689 if (mpt2sas_phy->attached_handle)
690 _transport_set_identify(ioc, mpt2sas_phy->attached_handle,
691 &mpt2sas_phy->remote_identify);
692 phy->identify.phy_identifier = mpt2sas_phy->phy_id;
693 phy->negotiated_linkrate = _transport_convert_phy_link_rate(
694 phy_pg0.NegotiatedLinkRate & MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL);
695 phy->minimum_linkrate_hw = _transport_convert_phy_link_rate(
696 phy_pg0.HwLinkRate & MPI2_SAS_HWRATE_MIN_RATE_MASK);
697 phy->maximum_linkrate_hw = _transport_convert_phy_link_rate(
698 phy_pg0.HwLinkRate >> 4);
699 phy->minimum_linkrate = _transport_convert_phy_link_rate(
700 phy_pg0.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK);
701 phy->maximum_linkrate = _transport_convert_phy_link_rate(
702 phy_pg0.ProgrammedLinkRate >> 4);
703
704 if ((sas_phy_add(phy))) {
705 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
706 ioc->name, __FILE__, __LINE__, __func__);
707 sas_phy_free(phy);
708 return -1;
709 }
710 if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
711 dev_printk(KERN_INFO, &phy->dev,
712 "add: handle(0x%04x), sas_addr(0x%016llx)\n"
713 "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n",
714 mpt2sas_phy->handle, (unsigned long long)
715 mpt2sas_phy->identify.sas_address,
716 mpt2sas_phy->attached_handle,
717 (unsigned long long)
718 mpt2sas_phy->remote_identify.sas_address);
719 mpt2sas_phy->phy = phy;
720 return 0;
721}
722
723
724/**
725 * mpt2sas_transport_add_expander_phy - report expander phy to transport
726 * @ioc: per adapter object
727 * @mpt2sas_phy: mpt2sas per phy object
728 * @expander_pg1: expander page 1
729 * @parent_dev: parent device class object
730 *
731 * Returns 0 for success, non-zero for failure.
732 */
733int
734mpt2sas_transport_add_expander_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy
735 *mpt2sas_phy, Mpi2ExpanderPage1_t expander_pg1, struct device *parent_dev)
736{
737 struct sas_phy *phy;
738 int phy_index = mpt2sas_phy->phy_id;
739
740 INIT_LIST_HEAD(&mpt2sas_phy->port_siblings);
741 phy = sas_phy_alloc(parent_dev, phy_index);
742 if (!phy) {
743 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
744 ioc->name, __FILE__, __LINE__, __func__);
745 return -1;
746 }
747 if ((_transport_set_identify(ioc, mpt2sas_phy->handle,
748 &mpt2sas_phy->identify))) {
749 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
750 ioc->name, __FILE__, __LINE__, __func__);
751 return -1;
752 }
753 phy->identify = mpt2sas_phy->identify;
754 mpt2sas_phy->attached_handle =
755 le16_to_cpu(expander_pg1.AttachedDevHandle);
756 if (mpt2sas_phy->attached_handle)
757 _transport_set_identify(ioc, mpt2sas_phy->attached_handle,
758 &mpt2sas_phy->remote_identify);
759 phy->identify.phy_identifier = mpt2sas_phy->phy_id;
760 phy->negotiated_linkrate = _transport_convert_phy_link_rate(
761 expander_pg1.NegotiatedLinkRate &
762 MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL);
763 phy->minimum_linkrate_hw = _transport_convert_phy_link_rate(
764 expander_pg1.HwLinkRate & MPI2_SAS_HWRATE_MIN_RATE_MASK);
765 phy->maximum_linkrate_hw = _transport_convert_phy_link_rate(
766 expander_pg1.HwLinkRate >> 4);
767 phy->minimum_linkrate = _transport_convert_phy_link_rate(
768 expander_pg1.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK);
769 phy->maximum_linkrate = _transport_convert_phy_link_rate(
770 expander_pg1.ProgrammedLinkRate >> 4);
771
772 if ((sas_phy_add(phy))) {
773 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
774 ioc->name, __FILE__, __LINE__, __func__);
775 sas_phy_free(phy);
776 return -1;
777 }
778 if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
779 dev_printk(KERN_INFO, &phy->dev,
780 "add: handle(0x%04x), sas_addr(0x%016llx)\n"
781 "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n",
782 mpt2sas_phy->handle, (unsigned long long)
783 mpt2sas_phy->identify.sas_address,
784 mpt2sas_phy->attached_handle,
785 (unsigned long long)
786 mpt2sas_phy->remote_identify.sas_address);
787 mpt2sas_phy->phy = phy;
788 return 0;
789}
790
791/**
792 * mpt2sas_transport_update_phy_link_change - refreshing phy link changes and attached devices
793 * @ioc: per adapter object
794 * @handle: handle to sas_host or expander
795 * @attached_handle: attached device handle
796 * @phy_numberv: phy number
797 * @link_rate: new link rate
798 *
799 * Returns nothing.
800 */
801void
802mpt2sas_transport_update_phy_link_change(struct MPT2SAS_ADAPTER *ioc,
803 u16 handle, u16 attached_handle, u8 phy_number, u8 link_rate)
804{
805 unsigned long flags;
806 struct _sas_node *sas_node;
807 struct _sas_phy *mpt2sas_phy;
808
809 spin_lock_irqsave(&ioc->sas_node_lock, flags);
810 sas_node = _transport_sas_node_find_by_handle(ioc, handle);
811 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
812 if (!sas_node)
813 return;
814
815 mpt2sas_phy = &sas_node->phy[phy_number];
816 mpt2sas_phy->attached_handle = attached_handle;
817 if (attached_handle && (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5))
818 _transport_set_identify(ioc, mpt2sas_phy->attached_handle,
819 &mpt2sas_phy->remote_identify);
820 else
821 memset(&mpt2sas_phy->remote_identify, 0 , sizeof(struct
822 sas_identify));
823
824 if (mpt2sas_phy->phy)
825 mpt2sas_phy->phy->negotiated_linkrate =
826 _transport_convert_phy_link_rate(link_rate);
827
828 if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
829 dev_printk(KERN_INFO, &mpt2sas_phy->phy->dev,
830 "refresh: handle(0x%04x), sas_addr(0x%016llx),\n"
831 "\tlink_rate(0x%02x), phy(%d)\n"
832 "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n",
833 handle, (unsigned long long)
834 mpt2sas_phy->identify.sas_address, link_rate,
835 phy_number, attached_handle,
836 (unsigned long long)
837 mpt2sas_phy->remote_identify.sas_address);
838}
839
840static inline void *
841phy_to_ioc(struct sas_phy *phy)
842{
843 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
844 return shost_priv(shost);
845}
846
847static inline void *
848rphy_to_ioc(struct sas_rphy *rphy)
849{
850 struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent);
851 return shost_priv(shost);
852}
853
854/**
855 * transport_get_linkerrors -
856 * @phy: The sas phy object
857 *
858 * Only support sas_host direct attached phys.
859 * Returns 0 for success, non-zero for failure.
860 *
861 */
862static int
863transport_get_linkerrors(struct sas_phy *phy)
864{
865 struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
866 struct _sas_phy *mpt2sas_phy;
867 Mpi2ConfigReply_t mpi_reply;
868 Mpi2SasPhyPage1_t phy_pg1;
869 int i;
870
871 for (i = 0, mpt2sas_phy = NULL; i < ioc->sas_hba.num_phys &&
872 !mpt2sas_phy; i++) {
873 if (ioc->sas_hba.phy[i].phy != phy)
874 continue;
875 mpt2sas_phy = &ioc->sas_hba.phy[i];
876 }
877
878 if (!mpt2sas_phy) /* this phy not on sas_host */
879 return -EINVAL;
880
881 if ((mpt2sas_config_get_phy_pg1(ioc, &mpi_reply, &phy_pg1,
882 mpt2sas_phy->phy_id))) {
883 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
884 ioc->name, __FILE__, __LINE__, __func__);
885 return -ENXIO;
886 }
887
888 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo)
889 printk(MPT2SAS_INFO_FMT "phy(%d), ioc_status"
890 "(0x%04x), loginfo(0x%08x)\n", ioc->name,
891 mpt2sas_phy->phy_id,
892 le16_to_cpu(mpi_reply.IOCStatus),
893 le32_to_cpu(mpi_reply.IOCLogInfo));
894
895 phy->invalid_dword_count = le32_to_cpu(phy_pg1.InvalidDwordCount);
896 phy->running_disparity_error_count =
897 le32_to_cpu(phy_pg1.RunningDisparityErrorCount);
898 phy->loss_of_dword_sync_count =
899 le32_to_cpu(phy_pg1.LossDwordSynchCount);
900 phy->phy_reset_problem_count =
901 le32_to_cpu(phy_pg1.PhyResetProblemCount);
902 return 0;
903}
904
905/**
906 * transport_get_enclosure_identifier -
907 * @phy: The sas phy object
908 *
909 * Obtain the enclosure logical id for an expander.
910 * Returns 0 for success, non-zero for failure.
911 */
912static int
913transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
914{
915 struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
916 struct _sas_node *sas_expander;
917 unsigned long flags;
918
919 spin_lock_irqsave(&ioc->sas_node_lock, flags);
920 sas_expander = mpt2sas_scsih_expander_find_by_sas_address(ioc,
921 rphy->identify.sas_address);
922 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
923
924 if (!sas_expander)
925 return -ENXIO;
926
927 *identifier = sas_expander->enclosure_logical_id;
928 return 0;
929}
930
931/**
932 * transport_get_bay_identifier -
933 * @phy: The sas phy object
934 *
935 * Returns the slot id for a device that resides inside an enclosure.
936 */
937static int
938transport_get_bay_identifier(struct sas_rphy *rphy)
939{
940 struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
941 struct _sas_device *sas_device;
942 unsigned long flags;
943
944 spin_lock_irqsave(&ioc->sas_device_lock, flags);
945 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
946 rphy->identify.sas_address);
947 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
948
949 if (!sas_device)
950 return -ENXIO;
951
952 return sas_device->slot;
953}
954
955/**
956 * transport_phy_reset -
957 * @phy: The sas phy object
958 * @hard_reset:
959 *
960 * Only support sas_host direct attached phys.
961 * Returns 0 for success, non-zero for failure.
962 */
963static int
964transport_phy_reset(struct sas_phy *phy, int hard_reset)
965{
966 struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
967 struct _sas_phy *mpt2sas_phy;
968 Mpi2SasIoUnitControlReply_t mpi_reply;
969 Mpi2SasIoUnitControlRequest_t mpi_request;
970 int i;
971
972 for (i = 0, mpt2sas_phy = NULL; i < ioc->sas_hba.num_phys &&
973 !mpt2sas_phy; i++) {
974 if (ioc->sas_hba.phy[i].phy != phy)
975 continue;
976 mpt2sas_phy = &ioc->sas_hba.phy[i];
977 }
978
979 if (!mpt2sas_phy) /* this phy not on sas_host */
980 return -EINVAL;
981
982 memset(&mpi_request, 0, sizeof(Mpi2SasIoUnitControlReply_t));
983 mpi_request.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
984 mpi_request.Operation = hard_reset ?
985 MPI2_SAS_OP_PHY_HARD_RESET : MPI2_SAS_OP_PHY_LINK_RESET;
986 mpi_request.PhyNum = mpt2sas_phy->phy_id;
987
988 if ((mpt2sas_base_sas_iounit_control(ioc, &mpi_reply, &mpi_request))) {
989 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
990 ioc->name, __FILE__, __LINE__, __func__);
991 return -ENXIO;
992 }
993
994 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo)
995 printk(MPT2SAS_INFO_FMT "phy(%d), ioc_status"
996 "(0x%04x), loginfo(0x%08x)\n", ioc->name,
997 mpt2sas_phy->phy_id,
998 le16_to_cpu(mpi_reply.IOCStatus),
999 le32_to_cpu(mpi_reply.IOCLogInfo));
1000
1001 return 0;
1002}
1003
1004/**
1005 * transport_smp_handler - transport portal for smp passthru
1006 * @shost: shost object
1007 * @rphy: sas transport rphy object
1008 * @req:
1009 *
1010 * This used primarily for smp_utils.
1011 * Example:
1012 * smp_rep_general /sys/class/bsg/expander-5:0
1013 */
1014static int
1015transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1016 struct request *req)
1017{
1018 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
1019 Mpi2SmpPassthroughRequest_t *mpi_request;
1020 Mpi2SmpPassthroughReply_t *mpi_reply;
1021 int rc;
1022 u16 smid;
1023 u32 ioc_state;
1024 unsigned long timeleft;
1025 void *psge;
1026 u32 sgl_flags;
1027 u8 issue_reset = 0;
1028 unsigned long flags;
1029 dma_addr_t dma_addr_in = 0;
1030 dma_addr_t dma_addr_out = 0;
1031 u16 wait_state_count;
1032 struct request *rsp = req->next_rq;
1033
1034 if (!rsp) {
1035 printk(MPT2SAS_ERR_FMT "%s: the smp response space is "
1036 "missing\n", ioc->name, __func__);
1037 return -EINVAL;
1038 }
1039
1040 /* do we need to support multiple segments? */
1041 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
1042 printk(MPT2SAS_ERR_FMT "%s: multiple segments req %u %u, "
1043 "rsp %u %u\n", ioc->name, __func__, req->bio->bi_vcnt,
1044 req->data_len, rsp->bio->bi_vcnt, rsp->data_len);
1045 return -EINVAL;
1046 }
1047
1048 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
1049 if (ioc->ioc_reset_in_progress) {
1050 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
1051 printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
1052 __func__, ioc->name);
1053 return -EFAULT;
1054 }
1055 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
1056
1057 rc = mutex_lock_interruptible(&ioc->transport_cmds.mutex);
1058 if (rc)
1059 return rc;
1060
1061 if (ioc->transport_cmds.status != MPT2_CMD_NOT_USED) {
1062 printk(MPT2SAS_ERR_FMT "%s: transport_cmds in use\n", ioc->name,
1063 __func__);
1064 rc = -EAGAIN;
1065 goto out;
1066 }
1067 ioc->transport_cmds.status = MPT2_CMD_PENDING;
1068
1069 wait_state_count = 0;
1070 ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
1071 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
1072 if (wait_state_count++ == 10) {
1073 printk(MPT2SAS_ERR_FMT
1074 "%s: failed due to ioc not operational\n",
1075 ioc->name, __func__);
1076 rc = -EFAULT;
1077 goto out;
1078 }
1079 ssleep(1);
1080 ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
1081 printk(MPT2SAS_INFO_FMT "%s: waiting for "
1082 "operational state(count=%d)\n", ioc->name,
1083 __func__, wait_state_count);
1084 }
1085 if (wait_state_count)
1086 printk(MPT2SAS_INFO_FMT "%s: ioc is operational\n",
1087 ioc->name, __func__);
1088
1089 smid = mpt2sas_base_get_smid(ioc, ioc->transport_cb_idx);
1090 if (!smid) {
1091 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
1092 ioc->name, __func__);
1093 rc = -EAGAIN;
1094 goto out;
1095 }
1096
1097 rc = 0;
1098 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
1099 ioc->transport_cmds.smid = smid;
1100
1101 memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
1102 mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
1103 mpi_request->PhysicalPort = 0xFF;
1104 *((u64 *)&mpi_request->SASAddress) = (rphy) ?
1105 cpu_to_le64(rphy->identify.sas_address) :
1106 cpu_to_le64(ioc->sas_hba.sas_address);
1107 mpi_request->RequestDataLength = cpu_to_le16(req->data_len - 4);
1108 psge = &mpi_request->SGL;
1109
1110 /* WRITE sgel first */
1111 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1112 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
1113 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1114 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
1115 req->data_len, PCI_DMA_BIDIRECTIONAL);
1116 if (!dma_addr_out) {
1117 mpt2sas_base_free_smid(ioc, le16_to_cpu(smid));
1118 goto unmap;
1119 }
1120
1121 ioc->base_add_sg_single(psge, sgl_flags | (req->data_len - 4),
1122 dma_addr_out);
1123
1124 /* incr sgel */
1125 psge += ioc->sge_size;
1126
1127 /* READ sgel last */
1128 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1129 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1130 MPI2_SGE_FLAGS_END_OF_LIST);
1131 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1132 dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio),
1133 rsp->data_len, PCI_DMA_BIDIRECTIONAL);
1134 if (!dma_addr_in) {
1135 mpt2sas_base_free_smid(ioc, le16_to_cpu(smid));
1136 goto unmap;
1137 }
1138
1139 ioc->base_add_sg_single(psge, sgl_flags | (rsp->data_len + 4),
1140 dma_addr_in);
1141
1142 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s - "
1143 "sending smp request\n", ioc->name, __func__));
1144
1145 mpt2sas_base_put_smid_default(ioc, smid, 0 /* VF_ID */);
1146 timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
1147 10*HZ);
1148
1149 if (!(ioc->transport_cmds.status & MPT2_CMD_COMPLETE)) {
1150 printk(MPT2SAS_ERR_FMT "%s : timeout\n",
1151 __func__, ioc->name);
1152 _debug_dump_mf(mpi_request,
1153 sizeof(Mpi2SmpPassthroughRequest_t)/4);
1154 if (!(ioc->transport_cmds.status & MPT2_CMD_RESET))
1155 issue_reset = 1;
1156 goto issue_host_reset;
1157 }
1158
1159 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s - "
1160 "complete\n", ioc->name, __func__));
1161
1162 if (ioc->transport_cmds.status & MPT2_CMD_REPLY_VALID) {
1163
1164 mpi_reply = ioc->transport_cmds.reply;
1165
1166 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT
1167 "%s - reply data transfer size(%d)\n",
1168 ioc->name, __func__,
1169 le16_to_cpu(mpi_reply->ResponseDataLength)));
1170
1171 memcpy(req->sense, mpi_reply, sizeof(*mpi_reply));
1172 req->sense_len = sizeof(*mpi_reply);
1173 req->data_len = 0;
1174 rsp->data_len -= mpi_reply->ResponseDataLength;
1175
1176 } else {
1177 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT
1178 "%s - no reply\n", ioc->name, __func__));
1179 rc = -ENXIO;
1180 }
1181
1182 issue_host_reset:
1183 if (issue_reset) {
1184 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
1185 FORCE_BIG_HAMMER);
1186 rc = -ETIMEDOUT;
1187 }
1188
1189 unmap:
1190 if (dma_addr_out)
1191 pci_unmap_single(ioc->pdev, dma_addr_out, req->data_len,
1192 PCI_DMA_BIDIRECTIONAL);
1193 if (dma_addr_in)
1194 pci_unmap_single(ioc->pdev, dma_addr_in, rsp->data_len,
1195 PCI_DMA_BIDIRECTIONAL);
1196
1197 out:
1198 ioc->transport_cmds.status = MPT2_CMD_NOT_USED;
1199 mutex_unlock(&ioc->transport_cmds.mutex);
1200 return rc;
1201}
1202
1203struct sas_function_template mpt2sas_transport_functions = {
1204 .get_linkerrors = transport_get_linkerrors,
1205 .get_enclosure_identifier = transport_get_enclosure_identifier,
1206 .get_bay_identifier = transport_get_bay_identifier,
1207 .phy_reset = transport_phy_reset,
1208 .smp_handler = transport_smp_handler,
1209};
1210
1211struct scsi_transport_template *mpt2sas_transport_template;
diff --git a/drivers/scsi/osd/Kbuild b/drivers/scsi/osd/Kbuild
new file mode 100644
index 000000000000..0e207aa67d16
--- /dev/null
+++ b/drivers/scsi/osd/Kbuild
@@ -0,0 +1,45 @@
1#
2# Kbuild for the OSD modules
3#
4# Copyright (C) 2008 Panasas Inc. All rights reserved.
5#
6# Authors:
7# Boaz Harrosh <bharrosh@panasas.com>
8# Benny Halevy <bhalevy@panasas.com>
9#
10# This program is free software; you can redistribute it and/or modify
11# it under the terms of the GNU General Public License version 2
12#
13
14ifneq ($(OSD_INC),)
15# we are built out-of-tree Kconfigure everything as on
16
17CONFIG_SCSI_OSD_INITIATOR=m
18ccflags-y += -DCONFIG_SCSI_OSD_INITIATOR -DCONFIG_SCSI_OSD_INITIATOR_MODULE
19
20CONFIG_SCSI_OSD_ULD=m
21ccflags-y += -DCONFIG_SCSI_OSD_ULD -DCONFIG_SCSI_OSD_ULD_MODULE
22
23# CONFIG_SCSI_OSD_DPRINT_SENSE =
24# 0 - no print of errors
25# 1 - print errors
26# 2 - errors + warrnings
27ccflags-y += -DCONFIG_SCSI_OSD_DPRINT_SENSE=1
28
29# Uncomment to turn debug on
30# ccflags-y += -DCONFIG_SCSI_OSD_DEBUG
31
32# if we are built out-of-tree and the hosting kernel has OSD headers
33# then "ccflags-y +=" will not pick the out-off-tree headers. Only by doing
34# this it will work. This might break in future kernels
35LINUXINCLUDE := -I$(OSD_INC) $(LINUXINCLUDE)
36
37endif
38
39# libosd.ko - osd-initiator library
40libosd-y := osd_initiator.o
41obj-$(CONFIG_SCSI_OSD_INITIATOR) += libosd.o
42
43# osd.ko - SCSI ULD and char-device
44osd-y := osd_uld.o
45obj-$(CONFIG_SCSI_OSD_ULD) += osd.o
diff --git a/drivers/scsi/osd/Kconfig b/drivers/scsi/osd/Kconfig
new file mode 100644
index 000000000000..861b5cebaeae
--- /dev/null
+++ b/drivers/scsi/osd/Kconfig
@@ -0,0 +1,53 @@
1#
2# Kernel configuration file for the OSD scsi protocol
3#
4# Copyright (C) 2008 Panasas Inc. All rights reserved.
5#
6# Authors:
7# Boaz Harrosh <bharrosh@panasas.com>
8# Benny Halevy <bhalevy@panasas.com>
9#
10# This program is free software; you can redistribute it and/or modify
11# it under the terms of the GNU General Public version 2 License as
12# published by the Free Software Foundation
13#
14# FIXME: SCSI_OSD_INITIATOR should select CONFIG (HMAC) SHA1 somehow.
15# How is it done properly?
16#
17
18config SCSI_OSD_INITIATOR
19 tristate "OSD-Initiator library"
20 depends on SCSI
21 help
22 Enable the OSD-Initiator library (libosd.ko).
23 NOTE: You must also select CRYPTO_SHA1 + CRYPTO_HMAC and their
24 dependencies
25
26config SCSI_OSD_ULD
27 tristate "OSD Upper Level driver"
28 depends on SCSI_OSD_INITIATOR
29 help
30 Build a SCSI upper layer driver that exports /dev/osdX devices
31 to user-mode for testing and controlling OSD devices. It is also
32 needed by exofs, for mounting an OSD based file system.
33
34config SCSI_OSD_DPRINT_SENSE
35 int "(0-2) When sense is returned, DEBUG print all sense descriptors"
36 default 1
37 depends on SCSI_OSD_INITIATOR
38 help
39 When a CHECK_CONDITION status is returned from a target, and a
40 sense-buffer is retrieved, turning this on will dump a full
41 sense-decoding message. Setting to 2 will also print recoverable
42 errors that might be regularly returned for some filesystem
43 operations.
44
45config SCSI_OSD_DEBUG
46 bool "Compile All OSD modules with lots of DEBUG prints"
47 default n
48 depends on SCSI_OSD_INITIATOR
49 help
50 OSD Code is populated with lots of OSD_DEBUG(..) printouts to
51 dmesg. Enable this if you found a bug and you want to help us
52 track the problem (see also MAINTAINERS). Setting this will also
53 force SCSI_OSD_DPRINT_SENSE=2.
diff --git a/drivers/scsi/osd/Makefile b/drivers/scsi/osd/Makefile
new file mode 100755
index 000000000000..d905344f83ba
--- /dev/null
+++ b/drivers/scsi/osd/Makefile
@@ -0,0 +1,37 @@
1#
2# Makefile for the OSD modules (out of tree)
3#
4# Copyright (C) 2008 Panasas Inc. All rights reserved.
5#
6# Authors:
7# Boaz Harrosh <bharrosh@panasas.com>
8# Benny Halevy <bhalevy@panasas.com>
9#
10# This program is free software; you can redistribute it and/or modify
11# it under the terms of the GNU General Public License version 2
12#
13# This Makefile is used to call the kernel Makefile in case of an out-of-tree
14# build.
15# $KSRC should point to a Kernel source tree otherwise host's default is
16# used. (eg. /lib/modules/`uname -r`/build)
17
18# include path for out-of-tree Headers
19OSD_INC ?= `pwd`/../../../include
20
21# allow users to override these
22# e.g. to compile for a kernel that you aren't currently running
23KSRC ?= /lib/modules/$(shell uname -r)/build
24KBUILD_OUTPUT ?=
25ARCH ?=
26V ?= 0
27
28# this is the basic Kbuild out-of-tree invocation, with the M= option
29KBUILD_BASE = +$(MAKE) -C $(KSRC) M=`pwd` KBUILD_OUTPUT=$(KBUILD_OUTPUT) ARCH=$(ARCH) V=$(V)
30
31all: libosd
32
33libosd: ;
34 $(KBUILD_BASE) OSD_INC=$(OSD_INC) modules
35
36clean:
37 $(KBUILD_BASE) clean
diff --git a/drivers/scsi/osd/osd_debug.h b/drivers/scsi/osd/osd_debug.h
new file mode 100644
index 000000000000..579e491f11df
--- /dev/null
+++ b/drivers/scsi/osd/osd_debug.h
@@ -0,0 +1,30 @@
1/*
2 * osd_debug.h - Some kprintf macros
3 *
4 * Copyright (C) 2008 Panasas Inc. All rights reserved.
5 *
6 * Authors:
7 * Boaz Harrosh <bharrosh@panasas.com>
8 * Benny Halevy <bhalevy@panasas.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2
12 *
13 */
14#ifndef __OSD_DEBUG_H__
15#define __OSD_DEBUG_H__
16
17#define OSD_ERR(fmt, a...) printk(KERN_ERR "osd: " fmt, ##a)
18#define OSD_INFO(fmt, a...) printk(KERN_NOTICE "osd: " fmt, ##a)
19
20#ifdef CONFIG_SCSI_OSD_DEBUG
21#define OSD_DEBUG(fmt, a...) \
22 printk(KERN_NOTICE "osd @%s:%d: " fmt, __func__, __LINE__, ##a)
23#else
24#define OSD_DEBUG(fmt, a...) do {} while (0)
25#endif
26
27/* u64 has problems with printk this will cast it to unsigned long long */
28#define _LLU(x) (unsigned long long)(x)
29
30#endif /* ndef __OSD_DEBUG_H__ */
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
new file mode 100644
index 000000000000..552f58b655d1
--- /dev/null
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -0,0 +1,1657 @@
1/*
2 * osd_initiator - Main body of the osd initiator library.
3 *
4 * Note: The file does not contain the advanced security functionality which
5 * is only needed by the security_manager's initiators.
6 *
7 * Copyright (C) 2008 Panasas Inc. All rights reserved.
8 *
9 * Authors:
10 * Boaz Harrosh <bharrosh@panasas.com>
11 * Benny Halevy <bhalevy@panasas.com>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 *
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
25 * 3. Neither the name of the Panasas company nor the names of its
26 * contributors may be used to endorse or promote products derived
27 * from this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
30 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
31 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
32 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
37 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
38 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
39 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#include <scsi/osd_initiator.h>
43#include <scsi/osd_sec.h>
44#include <scsi/osd_attributes.h>
45#include <scsi/osd_sense.h>
46
47#include <scsi/scsi_device.h>
48
49#include "osd_debug.h"
50
51#ifndef __unused
52# define __unused __attribute__((unused))
53#endif
54
55enum { OSD_REQ_RETRIES = 1 };
56
57MODULE_AUTHOR("Boaz Harrosh <bharrosh@panasas.com>");
58MODULE_DESCRIPTION("open-osd initiator library libosd.ko");
59MODULE_LICENSE("GPL");
60
61static inline void build_test(void)
62{
63 /* structures were not packed */
64 BUILD_BUG_ON(sizeof(struct osd_capability) != OSD_CAP_LEN);
65 BUILD_BUG_ON(sizeof(struct osdv2_cdb) != OSD_TOTAL_CDB_LEN);
66 BUILD_BUG_ON(sizeof(struct osdv1_cdb) != OSDv1_TOTAL_CDB_LEN);
67}
68
69static const char *_osd_ver_desc(struct osd_request *or)
70{
71 return osd_req_is_ver1(or) ? "OSD1" : "OSD2";
72}
73
74#define ATTR_DEF_RI(id, len) ATTR_DEF(OSD_APAGE_ROOT_INFORMATION, id, len)
75
76static int _osd_print_system_info(struct osd_dev *od, void *caps)
77{
78 struct osd_request *or;
79 struct osd_attr get_attrs[] = {
80 ATTR_DEF_RI(OSD_ATTR_RI_VENDOR_IDENTIFICATION, 8),
81 ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_IDENTIFICATION, 16),
82 ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_MODEL, 32),
83 ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_REVISION_LEVEL, 4),
84 ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_SERIAL_NUMBER, 64 /*variable*/),
85 ATTR_DEF_RI(OSD_ATTR_RI_OSD_NAME, 64 /*variable*/),
86 ATTR_DEF_RI(OSD_ATTR_RI_TOTAL_CAPACITY, 8),
87 ATTR_DEF_RI(OSD_ATTR_RI_USED_CAPACITY, 8),
88 ATTR_DEF_RI(OSD_ATTR_RI_NUMBER_OF_PARTITIONS, 8),
89 ATTR_DEF_RI(OSD_ATTR_RI_CLOCK, 6),
90 /* IBM-OSD-SIM Has a bug with this one put it last */
91 ATTR_DEF_RI(OSD_ATTR_RI_OSD_SYSTEM_ID, 20),
92 };
93 void *iter = NULL, *pFirst;
94 int nelem = ARRAY_SIZE(get_attrs), a = 0;
95 int ret;
96
97 or = osd_start_request(od, GFP_KERNEL);
98 if (!or)
99 return -ENOMEM;
100
101 /* get attrs */
102 osd_req_get_attributes(or, &osd_root_object);
103 osd_req_add_get_attr_list(or, get_attrs, ARRAY_SIZE(get_attrs));
104
105 ret = osd_finalize_request(or, 0, caps, NULL);
106 if (ret)
107 goto out;
108
109 ret = osd_execute_request(or);
110 if (ret) {
111 OSD_ERR("Failed to detect %s => %d\n", _osd_ver_desc(or), ret);
112 goto out;
113 }
114
115 osd_req_decode_get_attr_list(or, get_attrs, &nelem, &iter);
116
117 OSD_INFO("Detected %s device\n",
118 _osd_ver_desc(or));
119
120 pFirst = get_attrs[a++].val_ptr;
121 OSD_INFO("OSD_ATTR_RI_VENDOR_IDENTIFICATION [%s]\n",
122 (char *)pFirst);
123
124 pFirst = get_attrs[a++].val_ptr;
125 OSD_INFO("OSD_ATTR_RI_PRODUCT_IDENTIFICATION [%s]\n",
126 (char *)pFirst);
127
128 pFirst = get_attrs[a++].val_ptr;
129 OSD_INFO("OSD_ATTR_RI_PRODUCT_MODEL [%s]\n",
130 (char *)pFirst);
131
132 pFirst = get_attrs[a++].val_ptr;
133 OSD_INFO("OSD_ATTR_RI_PRODUCT_REVISION_LEVEL [%u]\n",
134 pFirst ? get_unaligned_be32(pFirst) : ~0U);
135
136 pFirst = get_attrs[a++].val_ptr;
137 OSD_INFO("OSD_ATTR_RI_PRODUCT_SERIAL_NUMBER [%s]\n",
138 (char *)pFirst);
139
140 pFirst = get_attrs[a].val_ptr;
141 OSD_INFO("OSD_ATTR_RI_OSD_NAME [%s]\n", (char *)pFirst);
142 a++;
143
144 pFirst = get_attrs[a++].val_ptr;
145 OSD_INFO("OSD_ATTR_RI_TOTAL_CAPACITY [0x%llx]\n",
146 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
147
148 pFirst = get_attrs[a++].val_ptr;
149 OSD_INFO("OSD_ATTR_RI_USED_CAPACITY [0x%llx]\n",
150 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
151
152 pFirst = get_attrs[a++].val_ptr;
153 OSD_INFO("OSD_ATTR_RI_NUMBER_OF_PARTITIONS [%llu]\n",
154 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
155
156 if (a >= nelem)
157 goto out;
158
159 /* FIXME: Where are the time utilities */
160 pFirst = get_attrs[a++].val_ptr;
161 OSD_INFO("OSD_ATTR_RI_CLOCK [0x%02x%02x%02x%02x%02x%02x]\n",
162 ((char *)pFirst)[0], ((char *)pFirst)[1],
163 ((char *)pFirst)[2], ((char *)pFirst)[3],
164 ((char *)pFirst)[4], ((char *)pFirst)[5]);
165
166 if (a < nelem) { /* IBM-OSD-SIM bug, Might not have it */
167 unsigned len = get_attrs[a].len;
168 char sid_dump[32*4 + 2]; /* 2nibbles+space+ASCII */
169
170 hex_dump_to_buffer(get_attrs[a].val_ptr, len, 32, 1,
171 sid_dump, sizeof(sid_dump), true);
172 OSD_INFO("OSD_ATTR_RI_OSD_SYSTEM_ID(%d) [%s]\n", len, sid_dump);
173 a++;
174 }
175out:
176 osd_end_request(or);
177 return ret;
178}
179
180int osd_auto_detect_ver(struct osd_dev *od, void *caps)
181{
182 int ret;
183
184 /* Auto-detect the osd version */
185 ret = _osd_print_system_info(od, caps);
186 if (ret) {
187 osd_dev_set_ver(od, OSD_VER1);
188 OSD_DEBUG("converting to OSD1\n");
189 ret = _osd_print_system_info(od, caps);
190 }
191
192 return ret;
193}
194EXPORT_SYMBOL(osd_auto_detect_ver);
195
196static unsigned _osd_req_cdb_len(struct osd_request *or)
197{
198 return osd_req_is_ver1(or) ? OSDv1_TOTAL_CDB_LEN : OSD_TOTAL_CDB_LEN;
199}
200
201static unsigned _osd_req_alist_elem_size(struct osd_request *or, unsigned len)
202{
203 return osd_req_is_ver1(or) ?
204 osdv1_attr_list_elem_size(len) :
205 osdv2_attr_list_elem_size(len);
206}
207
208static unsigned _osd_req_alist_size(struct osd_request *or, void *list_head)
209{
210 return osd_req_is_ver1(or) ?
211 osdv1_list_size(list_head) :
212 osdv2_list_size(list_head);
213}
214
215static unsigned _osd_req_sizeof_alist_header(struct osd_request *or)
216{
217 return osd_req_is_ver1(or) ?
218 sizeof(struct osdv1_attributes_list_header) :
219 sizeof(struct osdv2_attributes_list_header);
220}
221
222static void _osd_req_set_alist_type(struct osd_request *or,
223 void *list, int list_type)
224{
225 if (osd_req_is_ver1(or)) {
226 struct osdv1_attributes_list_header *attr_list = list;
227
228 memset(attr_list, 0, sizeof(*attr_list));
229 attr_list->type = list_type;
230 } else {
231 struct osdv2_attributes_list_header *attr_list = list;
232
233 memset(attr_list, 0, sizeof(*attr_list));
234 attr_list->type = list_type;
235 }
236}
237
238static bool _osd_req_is_alist_type(struct osd_request *or,
239 void *list, int list_type)
240{
241 if (!list)
242 return false;
243
244 if (osd_req_is_ver1(or)) {
245 struct osdv1_attributes_list_header *attr_list = list;
246
247 return attr_list->type == list_type;
248 } else {
249 struct osdv2_attributes_list_header *attr_list = list;
250
251 return attr_list->type == list_type;
252 }
253}
254
255/* This is for List-objects not Attributes-Lists */
256static void _osd_req_encode_olist(struct osd_request *or,
257 struct osd_obj_id_list *list)
258{
259 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
260
261 if (osd_req_is_ver1(or)) {
262 cdbh->v1.list_identifier = list->list_identifier;
263 cdbh->v1.start_address = list->continuation_id;
264 } else {
265 cdbh->v2.list_identifier = list->list_identifier;
266 cdbh->v2.start_address = list->continuation_id;
267 }
268}
269
270static osd_cdb_offset osd_req_encode_offset(struct osd_request *or,
271 u64 offset, unsigned *padding)
272{
273 return __osd_encode_offset(offset, padding,
274 osd_req_is_ver1(or) ?
275 OSDv1_OFFSET_MIN_SHIFT : OSD_OFFSET_MIN_SHIFT,
276 OSD_OFFSET_MAX_SHIFT);
277}
278
279static struct osd_security_parameters *
280_osd_req_sec_params(struct osd_request *or)
281{
282 struct osd_cdb *ocdb = &or->cdb;
283
284 if (osd_req_is_ver1(or))
285 return &ocdb->v1.sec_params;
286 else
287 return &ocdb->v2.sec_params;
288}
289
290void osd_dev_init(struct osd_dev *osdd, struct scsi_device *scsi_device)
291{
292 memset(osdd, 0, sizeof(*osdd));
293 osdd->scsi_device = scsi_device;
294 osdd->def_timeout = BLK_DEFAULT_SG_TIMEOUT;
295#ifdef OSD_VER1_SUPPORT
296 osdd->version = OSD_VER2;
297#endif
298 /* TODO: Allocate pools for osd_request attributes ... */
299}
300EXPORT_SYMBOL(osd_dev_init);
301
302void osd_dev_fini(struct osd_dev *osdd)
303{
304 /* TODO: De-allocate pools */
305
306 osdd->scsi_device = NULL;
307}
308EXPORT_SYMBOL(osd_dev_fini);
309
310static struct osd_request *_osd_request_alloc(gfp_t gfp)
311{
312 struct osd_request *or;
313
314 /* TODO: Use mempool with one saved request */
315 or = kzalloc(sizeof(*or), gfp);
316 return or;
317}
318
319static void _osd_request_free(struct osd_request *or)
320{
321 kfree(or);
322}
323
324struct osd_request *osd_start_request(struct osd_dev *dev, gfp_t gfp)
325{
326 struct osd_request *or;
327
328 or = _osd_request_alloc(gfp);
329 if (!or)
330 return NULL;
331
332 or->osd_dev = dev;
333 or->alloc_flags = gfp;
334 or->timeout = dev->def_timeout;
335 or->retries = OSD_REQ_RETRIES;
336
337 return or;
338}
339EXPORT_SYMBOL(osd_start_request);
340
341/*
342 * If osd_finalize_request() was called but the request was not executed through
343 * the block layer, then we must release BIOs.
344 */
345static void _abort_unexecuted_bios(struct request *rq)
346{
347 struct bio *bio;
348
349 while ((bio = rq->bio) != NULL) {
350 rq->bio = bio->bi_next;
351 bio_endio(bio, 0);
352 }
353}
354
355static void _osd_free_seg(struct osd_request *or __unused,
356 struct _osd_req_data_segment *seg)
357{
358 if (!seg->buff || !seg->alloc_size)
359 return;
360
361 kfree(seg->buff);
362 seg->buff = NULL;
363 seg->alloc_size = 0;
364}
365
366void osd_end_request(struct osd_request *or)
367{
368 struct request *rq = or->request;
369
370 _osd_free_seg(or, &or->set_attr);
371 _osd_free_seg(or, &or->enc_get_attr);
372 _osd_free_seg(or, &or->get_attr);
373
374 if (rq) {
375 if (rq->next_rq) {
376 _abort_unexecuted_bios(rq->next_rq);
377 blk_put_request(rq->next_rq);
378 }
379
380 _abort_unexecuted_bios(rq);
381 blk_put_request(rq);
382 }
383 _osd_request_free(or);
384}
385EXPORT_SYMBOL(osd_end_request);
386
387int osd_execute_request(struct osd_request *or)
388{
389 return blk_execute_rq(or->request->q, NULL, or->request, 0);
390}
391EXPORT_SYMBOL(osd_execute_request);
392
393static void osd_request_async_done(struct request *req, int error)
394{
395 struct osd_request *or = req->end_io_data;
396
397 or->async_error = error;
398
399 if (error)
400 OSD_DEBUG("osd_request_async_done error recieved %d\n", error);
401
402 if (or->async_done)
403 or->async_done(or, or->async_private);
404 else
405 osd_end_request(or);
406}
407
408int osd_execute_request_async(struct osd_request *or,
409 osd_req_done_fn *done, void *private)
410{
411 or->request->end_io_data = or;
412 or->async_private = private;
413 or->async_done = done;
414
415 blk_execute_rq_nowait(or->request->q, NULL, or->request, 0,
416 osd_request_async_done);
417 return 0;
418}
419EXPORT_SYMBOL(osd_execute_request_async);
420
421u8 sg_out_pad_buffer[1 << OSDv1_OFFSET_MIN_SHIFT];
422u8 sg_in_pad_buffer[1 << OSDv1_OFFSET_MIN_SHIFT];
423
424static int _osd_realloc_seg(struct osd_request *or,
425 struct _osd_req_data_segment *seg, unsigned max_bytes)
426{
427 void *buff;
428
429 if (seg->alloc_size >= max_bytes)
430 return 0;
431
432 buff = krealloc(seg->buff, max_bytes, or->alloc_flags);
433 if (!buff) {
434 OSD_ERR("Failed to Realloc %d-bytes was-%d\n", max_bytes,
435 seg->alloc_size);
436 return -ENOMEM;
437 }
438
439 memset(buff + seg->alloc_size, 0, max_bytes - seg->alloc_size);
440 seg->buff = buff;
441 seg->alloc_size = max_bytes;
442 return 0;
443}
444
445static int _alloc_set_attr_list(struct osd_request *or,
446 const struct osd_attr *oa, unsigned nelem, unsigned add_bytes)
447{
448 unsigned total_bytes = add_bytes;
449
450 for (; nelem; --nelem, ++oa)
451 total_bytes += _osd_req_alist_elem_size(or, oa->len);
452
453 OSD_DEBUG("total_bytes=%d\n", total_bytes);
454 return _osd_realloc_seg(or, &or->set_attr, total_bytes);
455}
456
457static int _alloc_get_attr_desc(struct osd_request *or, unsigned max_bytes)
458{
459 OSD_DEBUG("total_bytes=%d\n", max_bytes);
460 return _osd_realloc_seg(or, &or->enc_get_attr, max_bytes);
461}
462
463static int _alloc_get_attr_list(struct osd_request *or)
464{
465 OSD_DEBUG("total_bytes=%d\n", or->get_attr.total_bytes);
466 return _osd_realloc_seg(or, &or->get_attr, or->get_attr.total_bytes);
467}
468
469/*
470 * Common to all OSD commands
471 */
472
473static void _osdv1_req_encode_common(struct osd_request *or,
474 __be16 act, const struct osd_obj_id *obj, u64 offset, u64 len)
475{
476 struct osdv1_cdb *ocdb = &or->cdb.v1;
477
478 /*
479 * For speed, the commands
480 * OSD_ACT_PERFORM_SCSI_COMMAND , V1 0x8F7E, V2 0x8F7C
481 * OSD_ACT_SCSI_TASK_MANAGEMENT , V1 0x8F7F, V2 0x8F7D
482 * are not supported here. Should pass zero and set after the call
483 */
484 act &= cpu_to_be16(~0x0080); /* V1 action code */
485
486 OSD_DEBUG("OSDv1 execute opcode 0x%x\n", be16_to_cpu(act));
487
488 ocdb->h.varlen_cdb.opcode = VARIABLE_LENGTH_CMD;
489 ocdb->h.varlen_cdb.additional_cdb_length = OSD_ADDITIONAL_CDB_LENGTH;
490 ocdb->h.varlen_cdb.service_action = act;
491
492 ocdb->h.partition = cpu_to_be64(obj->partition);
493 ocdb->h.object = cpu_to_be64(obj->id);
494 ocdb->h.v1.length = cpu_to_be64(len);
495 ocdb->h.v1.start_address = cpu_to_be64(offset);
496}
497
498static void _osdv2_req_encode_common(struct osd_request *or,
499 __be16 act, const struct osd_obj_id *obj, u64 offset, u64 len)
500{
501 struct osdv2_cdb *ocdb = &or->cdb.v2;
502
503 OSD_DEBUG("OSDv2 execute opcode 0x%x\n", be16_to_cpu(act));
504
505 ocdb->h.varlen_cdb.opcode = VARIABLE_LENGTH_CMD;
506 ocdb->h.varlen_cdb.additional_cdb_length = OSD_ADDITIONAL_CDB_LENGTH;
507 ocdb->h.varlen_cdb.service_action = act;
508
509 ocdb->h.partition = cpu_to_be64(obj->partition);
510 ocdb->h.object = cpu_to_be64(obj->id);
511 ocdb->h.v2.length = cpu_to_be64(len);
512 ocdb->h.v2.start_address = cpu_to_be64(offset);
513}
514
515static void _osd_req_encode_common(struct osd_request *or,
516 __be16 act, const struct osd_obj_id *obj, u64 offset, u64 len)
517{
518 if (osd_req_is_ver1(or))
519 _osdv1_req_encode_common(or, act, obj, offset, len);
520 else
521 _osdv2_req_encode_common(or, act, obj, offset, len);
522}
523
524/*
525 * Device commands
526 */
527/*TODO: void osd_req_set_master_seed_xchg(struct osd_request *, ...); */
528/*TODO: void osd_req_set_master_key(struct osd_request *, ...); */
529
530void osd_req_format(struct osd_request *or, u64 tot_capacity)
531{
532 _osd_req_encode_common(or, OSD_ACT_FORMAT_OSD, &osd_root_object, 0,
533 tot_capacity);
534}
535EXPORT_SYMBOL(osd_req_format);
536
537int osd_req_list_dev_partitions(struct osd_request *or,
538 osd_id initial_id, struct osd_obj_id_list *list, unsigned nelem)
539{
540 return osd_req_list_partition_objects(or, 0, initial_id, list, nelem);
541}
542EXPORT_SYMBOL(osd_req_list_dev_partitions);
543
544static void _osd_req_encode_flush(struct osd_request *or,
545 enum osd_options_flush_scope_values op)
546{
547 struct osd_cdb_head *ocdb = osd_cdb_head(&or->cdb);
548
549 ocdb->command_specific_options = op;
550}
551
552void osd_req_flush_obsd(struct osd_request *or,
553 enum osd_options_flush_scope_values op)
554{
555 _osd_req_encode_common(or, OSD_ACT_FLUSH_OSD, &osd_root_object, 0, 0);
556 _osd_req_encode_flush(or, op);
557}
558EXPORT_SYMBOL(osd_req_flush_obsd);
559
560/*TODO: void osd_req_perform_scsi_command(struct osd_request *,
561 const u8 *cdb, ...); */
562/*TODO: void osd_req_task_management(struct osd_request *, ...); */
563
564/*
565 * Partition commands
566 */
567static void _osd_req_encode_partition(struct osd_request *or,
568 __be16 act, osd_id partition)
569{
570 struct osd_obj_id par = {
571 .partition = partition,
572 .id = 0,
573 };
574
575 _osd_req_encode_common(or, act, &par, 0, 0);
576}
577
578void osd_req_create_partition(struct osd_request *or, osd_id partition)
579{
580 _osd_req_encode_partition(or, OSD_ACT_CREATE_PARTITION, partition);
581}
582EXPORT_SYMBOL(osd_req_create_partition);
583
584void osd_req_remove_partition(struct osd_request *or, osd_id partition)
585{
586 _osd_req_encode_partition(or, OSD_ACT_REMOVE_PARTITION, partition);
587}
588EXPORT_SYMBOL(osd_req_remove_partition);
589
590/*TODO: void osd_req_set_partition_key(struct osd_request *,
591 osd_id partition, u8 new_key_id[OSD_CRYPTO_KEYID_SIZE],
592 u8 seed[OSD_CRYPTO_SEED_SIZE]); */
593
594static int _osd_req_list_objects(struct osd_request *or,
595 __be16 action, const struct osd_obj_id *obj, osd_id initial_id,
596 struct osd_obj_id_list *list, unsigned nelem)
597{
598 struct request_queue *q = or->osd_dev->scsi_device->request_queue;
599 u64 len = nelem * sizeof(osd_id) + sizeof(*list);
600 struct bio *bio;
601
602 _osd_req_encode_common(or, action, obj, (u64)initial_id, len);
603
604 if (list->list_identifier)
605 _osd_req_encode_olist(or, list);
606
607 WARN_ON(or->in.bio);
608 bio = bio_map_kern(q, list, len, or->alloc_flags);
609 if (!bio) {
610 OSD_ERR("!!! Failed to allocate list_objects BIO\n");
611 return -ENOMEM;
612 }
613
614 bio->bi_rw &= ~(1 << BIO_RW);
615 or->in.bio = bio;
616 or->in.total_bytes = bio->bi_size;
617 return 0;
618}
619
620int osd_req_list_partition_collections(struct osd_request *or,
621 osd_id partition, osd_id initial_id, struct osd_obj_id_list *list,
622 unsigned nelem)
623{
624 struct osd_obj_id par = {
625 .partition = partition,
626 .id = 0,
627 };
628
629 return osd_req_list_collection_objects(or, &par, initial_id, list,
630 nelem);
631}
632EXPORT_SYMBOL(osd_req_list_partition_collections);
633
634int osd_req_list_partition_objects(struct osd_request *or,
635 osd_id partition, osd_id initial_id, struct osd_obj_id_list *list,
636 unsigned nelem)
637{
638 struct osd_obj_id par = {
639 .partition = partition,
640 .id = 0,
641 };
642
643 return _osd_req_list_objects(or, OSD_ACT_LIST, &par, initial_id, list,
644 nelem);
645}
646EXPORT_SYMBOL(osd_req_list_partition_objects);
647
648void osd_req_flush_partition(struct osd_request *or,
649 osd_id partition, enum osd_options_flush_scope_values op)
650{
651 _osd_req_encode_partition(or, OSD_ACT_FLUSH_PARTITION, partition);
652 _osd_req_encode_flush(or, op);
653}
654EXPORT_SYMBOL(osd_req_flush_partition);
655
656/*
657 * Collection commands
658 */
659/*TODO: void osd_req_create_collection(struct osd_request *,
660 const struct osd_obj_id *); */
661/*TODO: void osd_req_remove_collection(struct osd_request *,
662 const struct osd_obj_id *); */
663
664int osd_req_list_collection_objects(struct osd_request *or,
665 const struct osd_obj_id *obj, osd_id initial_id,
666 struct osd_obj_id_list *list, unsigned nelem)
667{
668 return _osd_req_list_objects(or, OSD_ACT_LIST_COLLECTION, obj,
669 initial_id, list, nelem);
670}
671EXPORT_SYMBOL(osd_req_list_collection_objects);
672
673/*TODO: void query(struct osd_request *, ...); V2 */
674
675void osd_req_flush_collection(struct osd_request *or,
676 const struct osd_obj_id *obj, enum osd_options_flush_scope_values op)
677{
678 _osd_req_encode_common(or, OSD_ACT_FLUSH_PARTITION, obj, 0, 0);
679 _osd_req_encode_flush(or, op);
680}
681EXPORT_SYMBOL(osd_req_flush_collection);
682
683/*TODO: void get_member_attrs(struct osd_request *, ...); V2 */
684/*TODO: void set_member_attrs(struct osd_request *, ...); V2 */
685
686/*
687 * Object commands
688 */
689void osd_req_create_object(struct osd_request *or, struct osd_obj_id *obj)
690{
691 _osd_req_encode_common(or, OSD_ACT_CREATE, obj, 0, 0);
692}
693EXPORT_SYMBOL(osd_req_create_object);
694
695void osd_req_remove_object(struct osd_request *or, struct osd_obj_id *obj)
696{
697 _osd_req_encode_common(or, OSD_ACT_REMOVE, obj, 0, 0);
698}
699EXPORT_SYMBOL(osd_req_remove_object);
700
701
702/*TODO: void osd_req_create_multi(struct osd_request *or,
703 struct osd_obj_id *first, struct osd_obj_id_list *list, unsigned nelem);
704*/
705
706void osd_req_write(struct osd_request *or,
707 const struct osd_obj_id *obj, struct bio *bio, u64 offset)
708{
709 _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, bio->bi_size);
710 WARN_ON(or->out.bio || or->out.total_bytes);
711 bio->bi_rw |= (1 << BIO_RW);
712 or->out.bio = bio;
713 or->out.total_bytes = bio->bi_size;
714}
715EXPORT_SYMBOL(osd_req_write);
716
717/*TODO: void osd_req_append(struct osd_request *,
718 const struct osd_obj_id *, struct bio *data_out); */
719/*TODO: void osd_req_create_write(struct osd_request *,
720 const struct osd_obj_id *, struct bio *data_out, u64 offset); */
721/*TODO: void osd_req_clear(struct osd_request *,
722 const struct osd_obj_id *, u64 offset, u64 len); */
723/*TODO: void osd_req_punch(struct osd_request *,
724 const struct osd_obj_id *, u64 offset, u64 len); V2 */
725
726void osd_req_flush_object(struct osd_request *or,
727 const struct osd_obj_id *obj, enum osd_options_flush_scope_values op,
728 /*V2*/ u64 offset, /*V2*/ u64 len)
729{
730 if (unlikely(osd_req_is_ver1(or) && (offset || len))) {
731 OSD_DEBUG("OSD Ver1 flush on specific range ignored\n");
732 offset = 0;
733 len = 0;
734 }
735
736 _osd_req_encode_common(or, OSD_ACT_FLUSH, obj, offset, len);
737 _osd_req_encode_flush(or, op);
738}
739EXPORT_SYMBOL(osd_req_flush_object);
740
741void osd_req_read(struct osd_request *or,
742 const struct osd_obj_id *obj, struct bio *bio, u64 offset)
743{
744 _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, bio->bi_size);
745 WARN_ON(or->in.bio || or->in.total_bytes);
746 bio->bi_rw &= ~(1 << BIO_RW);
747 or->in.bio = bio;
748 or->in.total_bytes = bio->bi_size;
749}
750EXPORT_SYMBOL(osd_req_read);
751
752void osd_req_get_attributes(struct osd_request *or,
753 const struct osd_obj_id *obj)
754{
755 _osd_req_encode_common(or, OSD_ACT_GET_ATTRIBUTES, obj, 0, 0);
756}
757EXPORT_SYMBOL(osd_req_get_attributes);
758
759void osd_req_set_attributes(struct osd_request *or,
760 const struct osd_obj_id *obj)
761{
762 _osd_req_encode_common(or, OSD_ACT_SET_ATTRIBUTES, obj, 0, 0);
763}
764EXPORT_SYMBOL(osd_req_set_attributes);
765
766/*
767 * Attributes List-mode
768 */
769
770int osd_req_add_set_attr_list(struct osd_request *or,
771 const struct osd_attr *oa, unsigned nelem)
772{
773 unsigned total_bytes = or->set_attr.total_bytes;
774 void *attr_last;
775 int ret;
776
777 if (or->attributes_mode &&
778 or->attributes_mode != OSD_CDB_GET_SET_ATTR_LISTS) {
779 WARN_ON(1);
780 return -EINVAL;
781 }
782 or->attributes_mode = OSD_CDB_GET_SET_ATTR_LISTS;
783
784 if (!total_bytes) { /* first-time: allocate and put list header */
785 total_bytes = _osd_req_sizeof_alist_header(or);
786 ret = _alloc_set_attr_list(or, oa, nelem, total_bytes);
787 if (ret)
788 return ret;
789 _osd_req_set_alist_type(or, or->set_attr.buff,
790 OSD_ATTR_LIST_SET_RETRIEVE);
791 }
792 attr_last = or->set_attr.buff + total_bytes;
793
794 for (; nelem; --nelem) {
795 struct osd_attributes_list_element *attr;
796 unsigned elem_size = _osd_req_alist_elem_size(or, oa->len);
797
798 total_bytes += elem_size;
799 if (unlikely(or->set_attr.alloc_size < total_bytes)) {
800 or->set_attr.total_bytes = total_bytes - elem_size;
801 ret = _alloc_set_attr_list(or, oa, nelem, total_bytes);
802 if (ret)
803 return ret;
804 attr_last =
805 or->set_attr.buff + or->set_attr.total_bytes;
806 }
807
808 attr = attr_last;
809 attr->attr_page = cpu_to_be32(oa->attr_page);
810 attr->attr_id = cpu_to_be32(oa->attr_id);
811 attr->attr_bytes = cpu_to_be16(oa->len);
812 memcpy(attr->attr_val, oa->val_ptr, oa->len);
813
814 attr_last += elem_size;
815 ++oa;
816 }
817
818 or->set_attr.total_bytes = total_bytes;
819 return 0;
820}
821EXPORT_SYMBOL(osd_req_add_set_attr_list);
822
823static int _append_map_kern(struct request *req,
824 void *buff, unsigned len, gfp_t flags)
825{
826 struct bio *bio;
827 int ret;
828
829 bio = bio_map_kern(req->q, buff, len, flags);
830 if (IS_ERR(bio)) {
831 OSD_ERR("Failed bio_map_kern(%p, %d) => %ld\n", buff, len,
832 PTR_ERR(bio));
833 return PTR_ERR(bio);
834 }
835 ret = blk_rq_append_bio(req->q, req, bio);
836 if (ret) {
837 OSD_ERR("Failed blk_rq_append_bio(%p) => %d\n", bio, ret);
838 bio_put(bio);
839 }
840 return ret;
841}
842
843static int _req_append_segment(struct osd_request *or,
844 unsigned padding, struct _osd_req_data_segment *seg,
845 struct _osd_req_data_segment *last_seg, struct _osd_io_info *io)
846{
847 void *pad_buff;
848 int ret;
849
850 if (padding) {
851 /* check if we can just add it to last buffer */
852 if (last_seg &&
853 (padding <= last_seg->alloc_size - last_seg->total_bytes))
854 pad_buff = last_seg->buff + last_seg->total_bytes;
855 else
856 pad_buff = io->pad_buff;
857
858 ret = _append_map_kern(io->req, pad_buff, padding,
859 or->alloc_flags);
860 if (ret)
861 return ret;
862 io->total_bytes += padding;
863 }
864
865 ret = _append_map_kern(io->req, seg->buff, seg->total_bytes,
866 or->alloc_flags);
867 if (ret)
868 return ret;
869
870 io->total_bytes += seg->total_bytes;
871 OSD_DEBUG("padding=%d buff=%p total_bytes=%d\n", padding, seg->buff,
872 seg->total_bytes);
873 return 0;
874}
875
876static int _osd_req_finalize_set_attr_list(struct osd_request *or)
877{
878 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
879 unsigned padding;
880 int ret;
881
882 if (!or->set_attr.total_bytes) {
883 cdbh->attrs_list.set_attr_offset = OSD_OFFSET_UNUSED;
884 return 0;
885 }
886
887 cdbh->attrs_list.set_attr_bytes = cpu_to_be32(or->set_attr.total_bytes);
888 cdbh->attrs_list.set_attr_offset =
889 osd_req_encode_offset(or, or->out.total_bytes, &padding);
890
891 ret = _req_append_segment(or, padding, &or->set_attr,
892 or->out.last_seg, &or->out);
893 if (ret)
894 return ret;
895
896 or->out.last_seg = &or->set_attr;
897 return 0;
898}
899
900int osd_req_add_get_attr_list(struct osd_request *or,
901 const struct osd_attr *oa, unsigned nelem)
902{
903 unsigned total_bytes = or->enc_get_attr.total_bytes;
904 void *attr_last;
905 int ret;
906
907 if (or->attributes_mode &&
908 or->attributes_mode != OSD_CDB_GET_SET_ATTR_LISTS) {
909 WARN_ON(1);
910 return -EINVAL;
911 }
912 or->attributes_mode = OSD_CDB_GET_SET_ATTR_LISTS;
913
914 /* first time calc data-in list header size */
915 if (!or->get_attr.total_bytes)
916 or->get_attr.total_bytes = _osd_req_sizeof_alist_header(or);
917
918 /* calc data-out info */
919 if (!total_bytes) { /* first-time: allocate and put list header */
920 unsigned max_bytes;
921
922 total_bytes = _osd_req_sizeof_alist_header(or);
923 max_bytes = total_bytes +
924 nelem * sizeof(struct osd_attributes_list_attrid);
925 ret = _alloc_get_attr_desc(or, max_bytes);
926 if (ret)
927 return ret;
928
929 _osd_req_set_alist_type(or, or->enc_get_attr.buff,
930 OSD_ATTR_LIST_GET);
931 }
932 attr_last = or->enc_get_attr.buff + total_bytes;
933
934 for (; nelem; --nelem) {
935 struct osd_attributes_list_attrid *attrid;
936 const unsigned cur_size = sizeof(*attrid);
937
938 total_bytes += cur_size;
939 if (unlikely(or->enc_get_attr.alloc_size < total_bytes)) {
940 or->enc_get_attr.total_bytes = total_bytes - cur_size;
941 ret = _alloc_get_attr_desc(or,
942 total_bytes + nelem * sizeof(*attrid));
943 if (ret)
944 return ret;
945 attr_last = or->enc_get_attr.buff +
946 or->enc_get_attr.total_bytes;
947 }
948
949 attrid = attr_last;
950 attrid->attr_page = cpu_to_be32(oa->attr_page);
951 attrid->attr_id = cpu_to_be32(oa->attr_id);
952
953 attr_last += cur_size;
954
955 /* calc data-in size */
956 or->get_attr.total_bytes +=
957 _osd_req_alist_elem_size(or, oa->len);
958 ++oa;
959 }
960
961 or->enc_get_attr.total_bytes = total_bytes;
962
963 OSD_DEBUG(
964 "get_attr.total_bytes=%u(%u) enc_get_attr.total_bytes=%u(%Zu)\n",
965 or->get_attr.total_bytes,
966 or->get_attr.total_bytes - _osd_req_sizeof_alist_header(or),
967 or->enc_get_attr.total_bytes,
968 (or->enc_get_attr.total_bytes - _osd_req_sizeof_alist_header(or))
969 / sizeof(struct osd_attributes_list_attrid));
970
971 return 0;
972}
973EXPORT_SYMBOL(osd_req_add_get_attr_list);
974
975static int _osd_req_finalize_get_attr_list(struct osd_request *or)
976{
977 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
978 unsigned out_padding;
979 unsigned in_padding;
980 int ret;
981
982 if (!or->enc_get_attr.total_bytes) {
983 cdbh->attrs_list.get_attr_desc_offset = OSD_OFFSET_UNUSED;
984 cdbh->attrs_list.get_attr_offset = OSD_OFFSET_UNUSED;
985 return 0;
986 }
987
988 ret = _alloc_get_attr_list(or);
989 if (ret)
990 return ret;
991
992 /* The out-going buffer info update */
993 OSD_DEBUG("out-going\n");
994 cdbh->attrs_list.get_attr_desc_bytes =
995 cpu_to_be32(or->enc_get_attr.total_bytes);
996
997 cdbh->attrs_list.get_attr_desc_offset =
998 osd_req_encode_offset(or, or->out.total_bytes, &out_padding);
999
1000 ret = _req_append_segment(or, out_padding, &or->enc_get_attr,
1001 or->out.last_seg, &or->out);
1002 if (ret)
1003 return ret;
1004 or->out.last_seg = &or->enc_get_attr;
1005
1006 /* The incoming buffer info update */
1007 OSD_DEBUG("in-coming\n");
1008 cdbh->attrs_list.get_attr_alloc_length =
1009 cpu_to_be32(or->get_attr.total_bytes);
1010
1011 cdbh->attrs_list.get_attr_offset =
1012 osd_req_encode_offset(or, or->in.total_bytes, &in_padding);
1013
1014 ret = _req_append_segment(or, in_padding, &or->get_attr, NULL,
1015 &or->in);
1016 if (ret)
1017 return ret;
1018 or->in.last_seg = &or->get_attr;
1019
1020 return 0;
1021}
1022
1023int osd_req_decode_get_attr_list(struct osd_request *or,
1024 struct osd_attr *oa, int *nelem, void **iterator)
1025{
1026 unsigned cur_bytes, returned_bytes;
1027 int n;
1028 const unsigned sizeof_attr_list = _osd_req_sizeof_alist_header(or);
1029 void *cur_p;
1030
1031 if (!_osd_req_is_alist_type(or, or->get_attr.buff,
1032 OSD_ATTR_LIST_SET_RETRIEVE)) {
1033 oa->attr_page = 0;
1034 oa->attr_id = 0;
1035 oa->val_ptr = NULL;
1036 oa->len = 0;
1037 *iterator = NULL;
1038 return 0;
1039 }
1040
1041 if (*iterator) {
1042 BUG_ON((*iterator < or->get_attr.buff) ||
1043 (or->get_attr.buff + or->get_attr.alloc_size < *iterator));
1044 cur_p = *iterator;
1045 cur_bytes = (*iterator - or->get_attr.buff) - sizeof_attr_list;
1046 returned_bytes = or->get_attr.total_bytes;
1047 } else { /* first time decode the list header */
1048 cur_bytes = sizeof_attr_list;
1049 returned_bytes = _osd_req_alist_size(or, or->get_attr.buff) +
1050 sizeof_attr_list;
1051
1052 cur_p = or->get_attr.buff + sizeof_attr_list;
1053
1054 if (returned_bytes > or->get_attr.alloc_size) {
1055 OSD_DEBUG("target report: space was not big enough! "
1056 "Allocate=%u Needed=%u\n",
1057 or->get_attr.alloc_size,
1058 returned_bytes + sizeof_attr_list);
1059
1060 returned_bytes =
1061 or->get_attr.alloc_size - sizeof_attr_list;
1062 }
1063 or->get_attr.total_bytes = returned_bytes;
1064 }
1065
1066 for (n = 0; (n < *nelem) && (cur_bytes < returned_bytes); ++n) {
1067 struct osd_attributes_list_element *attr = cur_p;
1068 unsigned inc;
1069
1070 oa->len = be16_to_cpu(attr->attr_bytes);
1071 inc = _osd_req_alist_elem_size(or, oa->len);
1072 OSD_DEBUG("oa->len=%d inc=%d cur_bytes=%d\n",
1073 oa->len, inc, cur_bytes);
1074 cur_bytes += inc;
1075 if (cur_bytes > returned_bytes) {
1076 OSD_ERR("BAD FOOD from target. list not valid!"
1077 "c=%d r=%d n=%d\n",
1078 cur_bytes, returned_bytes, n);
1079 oa->val_ptr = NULL;
1080 break;
1081 }
1082
1083 oa->attr_page = be32_to_cpu(attr->attr_page);
1084 oa->attr_id = be32_to_cpu(attr->attr_id);
1085 oa->val_ptr = attr->attr_val;
1086
1087 cur_p += inc;
1088 ++oa;
1089 }
1090
1091 *iterator = (returned_bytes - cur_bytes) ? cur_p : NULL;
1092 *nelem = n;
1093 return returned_bytes - cur_bytes;
1094}
1095EXPORT_SYMBOL(osd_req_decode_get_attr_list);
1096
1097/*
1098 * Attributes Page-mode
1099 */
1100
1101int osd_req_add_get_attr_page(struct osd_request *or,
1102 u32 page_id, void *attar_page, unsigned max_page_len,
1103 const struct osd_attr *set_one_attr)
1104{
1105 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
1106
1107 if (or->attributes_mode &&
1108 or->attributes_mode != OSD_CDB_GET_ATTR_PAGE_SET_ONE) {
1109 WARN_ON(1);
1110 return -EINVAL;
1111 }
1112 or->attributes_mode = OSD_CDB_GET_ATTR_PAGE_SET_ONE;
1113
1114 or->get_attr.buff = attar_page;
1115 or->get_attr.total_bytes = max_page_len;
1116
1117 or->set_attr.buff = set_one_attr->val_ptr;
1118 or->set_attr.total_bytes = set_one_attr->len;
1119
1120 cdbh->attrs_page.get_attr_page = cpu_to_be32(page_id);
1121 cdbh->attrs_page.get_attr_alloc_length = cpu_to_be32(max_page_len);
1122 /* ocdb->attrs_page.get_attr_offset; */
1123
1124 cdbh->attrs_page.set_attr_page = cpu_to_be32(set_one_attr->attr_page);
1125 cdbh->attrs_page.set_attr_id = cpu_to_be32(set_one_attr->attr_id);
1126 cdbh->attrs_page.set_attr_length = cpu_to_be32(set_one_attr->len);
1127 /* ocdb->attrs_page.set_attr_offset; */
1128 return 0;
1129}
1130EXPORT_SYMBOL(osd_req_add_get_attr_page);
1131
1132static int _osd_req_finalize_attr_page(struct osd_request *or)
1133{
1134 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
1135 unsigned in_padding, out_padding;
1136 int ret;
1137
1138 /* returned page */
1139 cdbh->attrs_page.get_attr_offset =
1140 osd_req_encode_offset(or, or->in.total_bytes, &in_padding);
1141
1142 ret = _req_append_segment(or, in_padding, &or->get_attr, NULL,
1143 &or->in);
1144 if (ret)
1145 return ret;
1146
1147 /* set one value */
1148 cdbh->attrs_page.set_attr_offset =
1149 osd_req_encode_offset(or, or->out.total_bytes, &out_padding);
1150
1151 ret = _req_append_segment(or, out_padding, &or->enc_get_attr, NULL,
1152 &or->out);
1153 return ret;
1154}
1155
1156static int _osd_req_finalize_data_integrity(struct osd_request *or,
1157 bool has_in, bool has_out, const u8 *cap_key)
1158{
1159 struct osd_security_parameters *sec_parms = _osd_req_sec_params(or);
1160 int ret;
1161
1162 if (!osd_is_sec_alldata(sec_parms))
1163 return 0;
1164
1165 if (has_out) {
1166 struct _osd_req_data_segment seg = {
1167 .buff = &or->out_data_integ,
1168 .total_bytes = sizeof(or->out_data_integ),
1169 };
1170 unsigned pad;
1171
1172 or->out_data_integ.data_bytes = cpu_to_be64(
1173 or->out.bio ? or->out.bio->bi_size : 0);
1174 or->out_data_integ.set_attributes_bytes = cpu_to_be64(
1175 or->set_attr.total_bytes);
1176 or->out_data_integ.get_attributes_bytes = cpu_to_be64(
1177 or->enc_get_attr.total_bytes);
1178
1179 sec_parms->data_out_integrity_check_offset =
1180 osd_req_encode_offset(or, or->out.total_bytes, &pad);
1181
1182 ret = _req_append_segment(or, pad, &seg, or->out.last_seg,
1183 &or->out);
1184 if (ret)
1185 return ret;
1186 or->out.last_seg = NULL;
1187
1188 /* they are now all chained to request sign them all together */
1189 osd_sec_sign_data(&or->out_data_integ, or->out.req->bio,
1190 cap_key);
1191 }
1192
1193 if (has_in) {
1194 struct _osd_req_data_segment seg = {
1195 .buff = &or->in_data_integ,
1196 .total_bytes = sizeof(or->in_data_integ),
1197 };
1198 unsigned pad;
1199
1200 sec_parms->data_in_integrity_check_offset =
1201 osd_req_encode_offset(or, or->in.total_bytes, &pad);
1202
1203 ret = _req_append_segment(or, pad, &seg, or->in.last_seg,
1204 &or->in);
1205 if (ret)
1206 return ret;
1207
1208 or->in.last_seg = NULL;
1209 }
1210
1211 return 0;
1212}
1213
1214/*
1215 * osd_finalize_request and helpers
1216 */
1217
1218static int _init_blk_request(struct osd_request *or,
1219 bool has_in, bool has_out)
1220{
1221 gfp_t flags = or->alloc_flags;
1222 struct scsi_device *scsi_device = or->osd_dev->scsi_device;
1223 struct request_queue *q = scsi_device->request_queue;
1224 struct request *req;
1225 int ret = -ENOMEM;
1226
1227 req = blk_get_request(q, has_out, flags);
1228 if (!req)
1229 goto out;
1230
1231 or->request = req;
1232 req->cmd_type = REQ_TYPE_BLOCK_PC;
1233 req->timeout = or->timeout;
1234 req->retries = or->retries;
1235 req->sense = or->sense;
1236 req->sense_len = 0;
1237
1238 if (has_out) {
1239 or->out.req = req;
1240 if (has_in) {
1241 /* allocate bidi request */
1242 req = blk_get_request(q, READ, flags);
1243 if (!req) {
1244 OSD_DEBUG("blk_get_request for bidi failed\n");
1245 goto out;
1246 }
1247 req->cmd_type = REQ_TYPE_BLOCK_PC;
1248 or->in.req = or->request->next_rq = req;
1249 }
1250 } else if (has_in)
1251 or->in.req = req;
1252
1253 ret = 0;
1254out:
1255 OSD_DEBUG("or=%p has_in=%d has_out=%d => %d, %p\n",
1256 or, has_in, has_out, ret, or->request);
1257 return ret;
1258}
1259
1260int osd_finalize_request(struct osd_request *or,
1261 u8 options, const void *cap, const u8 *cap_key)
1262{
1263 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
1264 bool has_in, has_out;
1265 int ret;
1266
1267 if (options & OSD_REQ_FUA)
1268 cdbh->options |= OSD_CDB_FUA;
1269
1270 if (options & OSD_REQ_DPO)
1271 cdbh->options |= OSD_CDB_DPO;
1272
1273 if (options & OSD_REQ_BYPASS_TIMESTAMPS)
1274 cdbh->timestamp_control = OSD_CDB_BYPASS_TIMESTAMPS;
1275
1276 osd_set_caps(&or->cdb, cap);
1277
1278 has_in = or->in.bio || or->get_attr.total_bytes;
1279 has_out = or->out.bio || or->set_attr.total_bytes ||
1280 or->enc_get_attr.total_bytes;
1281
1282 ret = _init_blk_request(or, has_in, has_out);
1283 if (ret) {
1284 OSD_DEBUG("_init_blk_request failed\n");
1285 return ret;
1286 }
1287
1288 if (or->out.bio) {
1289 ret = blk_rq_append_bio(or->request->q, or->out.req,
1290 or->out.bio);
1291 if (ret) {
1292 OSD_DEBUG("blk_rq_append_bio out failed\n");
1293 return ret;
1294 }
1295 OSD_DEBUG("out bytes=%llu (bytes_req=%u)\n",
1296 _LLU(or->out.total_bytes), or->out.req->data_len);
1297 }
1298 if (or->in.bio) {
1299 ret = blk_rq_append_bio(or->request->q, or->in.req, or->in.bio);
1300 if (ret) {
1301 OSD_DEBUG("blk_rq_append_bio in failed\n");
1302 return ret;
1303 }
1304 OSD_DEBUG("in bytes=%llu (bytes_req=%u)\n",
1305 _LLU(or->in.total_bytes), or->in.req->data_len);
1306 }
1307
1308 or->out.pad_buff = sg_out_pad_buffer;
1309 or->in.pad_buff = sg_in_pad_buffer;
1310
1311 if (!or->attributes_mode)
1312 or->attributes_mode = OSD_CDB_GET_SET_ATTR_LISTS;
1313 cdbh->command_specific_options |= or->attributes_mode;
1314 if (or->attributes_mode == OSD_CDB_GET_ATTR_PAGE_SET_ONE) {
1315 ret = _osd_req_finalize_attr_page(or);
1316 } else {
1317 /* TODO: I think that for the GET_ATTR command these 2 should
1318 * be reversed to keep them in execution order (for embeded
1319 * targets with low memory footprint)
1320 */
1321 ret = _osd_req_finalize_set_attr_list(or);
1322 if (ret) {
1323 OSD_DEBUG("_osd_req_finalize_set_attr_list failed\n");
1324 return ret;
1325 }
1326
1327 ret = _osd_req_finalize_get_attr_list(or);
1328 if (ret) {
1329 OSD_DEBUG("_osd_req_finalize_get_attr_list failed\n");
1330 return ret;
1331 }
1332 }
1333
1334 ret = _osd_req_finalize_data_integrity(or, has_in, has_out, cap_key);
1335 if (ret)
1336 return ret;
1337
1338 osd_sec_sign_cdb(&or->cdb, cap_key);
1339
1340 or->request->cmd = or->cdb.buff;
1341 or->request->cmd_len = _osd_req_cdb_len(or);
1342
1343 return 0;
1344}
1345EXPORT_SYMBOL(osd_finalize_request);
1346
1347#define OSD_SENSE_PRINT1(fmt, a...) \
1348 do { \
1349 if (__cur_sense_need_output) \
1350 OSD_ERR(fmt, ##a); \
1351 } while (0)
1352
1353#define OSD_SENSE_PRINT2(fmt, a...) OSD_SENSE_PRINT1(" " fmt, ##a)
1354
1355int osd_req_decode_sense_full(struct osd_request *or,
1356 struct osd_sense_info *osi, bool silent,
1357 struct osd_obj_id *bad_obj_list __unused, int max_obj __unused,
1358 struct osd_attr *bad_attr_list, int max_attr)
1359{
1360 int sense_len, original_sense_len;
1361 struct osd_sense_info local_osi;
1362 struct scsi_sense_descriptor_based *ssdb;
1363 void *cur_descriptor;
1364#if (CONFIG_SCSI_OSD_DPRINT_SENSE == 0)
1365 const bool __cur_sense_need_output = false;
1366#else
1367 bool __cur_sense_need_output = !silent;
1368#endif
1369
1370 if (!or->request->errors)
1371 return 0;
1372
1373 ssdb = or->request->sense;
1374 sense_len = or->request->sense_len;
1375 if ((sense_len < (int)sizeof(*ssdb) || !ssdb->sense_key)) {
1376 OSD_ERR("Block-layer returned error(0x%x) but "
1377 "sense_len(%u) || key(%d) is empty\n",
1378 or->request->errors, sense_len, ssdb->sense_key);
1379 return -EIO;
1380 }
1381
1382 if ((ssdb->response_code != 0x72) && (ssdb->response_code != 0x73)) {
1383 OSD_ERR("Unrecognized scsi sense: rcode=%x length=%d\n",
1384 ssdb->response_code, sense_len);
1385 return -EIO;
1386 }
1387
1388 osi = osi ? : &local_osi;
1389 memset(osi, 0, sizeof(*osi));
1390 osi->key = ssdb->sense_key;
1391 osi->additional_code = be16_to_cpu(ssdb->additional_sense_code);
1392 original_sense_len = ssdb->additional_sense_length + 8;
1393
1394#if (CONFIG_SCSI_OSD_DPRINT_SENSE == 1)
1395 if (__cur_sense_need_output)
1396 __cur_sense_need_output = (osi->key > scsi_sk_recovered_error);
1397#endif
1398 OSD_SENSE_PRINT1("Main Sense information key=0x%x length(%d, %d) "
1399 "additional_code=0x%x\n",
1400 osi->key, original_sense_len, sense_len,
1401 osi->additional_code);
1402
1403 if (original_sense_len < sense_len)
1404 sense_len = original_sense_len;
1405
1406 cur_descriptor = ssdb->ssd;
1407 sense_len -= sizeof(*ssdb);
1408 while (sense_len > 0) {
1409 struct scsi_sense_descriptor *ssd = cur_descriptor;
1410 int cur_len = ssd->additional_length + 2;
1411
1412 sense_len -= cur_len;
1413
1414 if (sense_len < 0)
1415 break; /* sense was truncated */
1416
1417 switch (ssd->descriptor_type) {
1418 case scsi_sense_information:
1419 case scsi_sense_command_specific_information:
1420 {
1421 struct scsi_sense_command_specific_data_descriptor
1422 *sscd = cur_descriptor;
1423
1424 osi->command_info =
1425 get_unaligned_be64(&sscd->information) ;
1426 OSD_SENSE_PRINT2(
1427 "command_specific_information 0x%llx \n",
1428 _LLU(osi->command_info));
1429 break;
1430 }
1431 case scsi_sense_key_specific:
1432 {
1433 struct scsi_sense_key_specific_data_descriptor
1434 *ssks = cur_descriptor;
1435
1436 osi->sense_info = get_unaligned_be16(&ssks->value);
1437 OSD_SENSE_PRINT2(
1438 "sense_key_specific_information %u"
1439 "sksv_cd_bpv_bp (0x%x)\n",
1440 osi->sense_info, ssks->sksv_cd_bpv_bp);
1441 break;
1442 }
1443 case osd_sense_object_identification:
1444 { /*FIXME: Keep first not last, Store in array*/
1445 struct osd_sense_identification_data_descriptor
1446 *osidd = cur_descriptor;
1447
1448 osi->not_initiated_command_functions =
1449 le32_to_cpu(osidd->not_initiated_functions);
1450 osi->completed_command_functions =
1451 le32_to_cpu(osidd->completed_functions);
1452 osi->obj.partition = be64_to_cpu(osidd->partition_id);
1453 osi->obj.id = be64_to_cpu(osidd->object_id);
1454 OSD_SENSE_PRINT2(
1455 "object_identification pid=0x%llx oid=0x%llx\n",
1456 _LLU(osi->obj.partition), _LLU(osi->obj.id));
1457 OSD_SENSE_PRINT2(
1458 "not_initiated_bits(%x) "
1459 "completed_command_bits(%x)\n",
1460 osi->not_initiated_command_functions,
1461 osi->completed_command_functions);
1462 break;
1463 }
1464 case osd_sense_response_integrity_check:
1465 {
1466 struct osd_sense_response_integrity_check_descriptor
1467 *osricd = cur_descriptor;
1468 const unsigned len =
1469 sizeof(osricd->integrity_check_value);
1470 char key_dump[len*4 + 2]; /* 2nibbles+space+ASCII */
1471
1472 hex_dump_to_buffer(osricd->integrity_check_value, len,
1473 32, 1, key_dump, sizeof(key_dump), true);
1474 OSD_SENSE_PRINT2("response_integrity [%s]\n", key_dump);
1475 }
1476 case osd_sense_attribute_identification:
1477 {
1478 struct osd_sense_attributes_data_descriptor
1479 *osadd = cur_descriptor;
1480 int len = min(cur_len, sense_len);
1481 int i = 0;
1482 struct osd_sense_attr *pattr = osadd->sense_attrs;
1483
1484 while (len < 0) {
1485 u32 attr_page = be32_to_cpu(pattr->attr_page);
1486 u32 attr_id = be32_to_cpu(pattr->attr_id);
1487
1488 if (i++ == 0) {
1489 osi->attr.attr_page = attr_page;
1490 osi->attr.attr_id = attr_id;
1491 }
1492
1493 if (bad_attr_list && max_attr) {
1494 bad_attr_list->attr_page = attr_page;
1495 bad_attr_list->attr_id = attr_id;
1496 bad_attr_list++;
1497 max_attr--;
1498 }
1499 OSD_SENSE_PRINT2(
1500 "osd_sense_attribute_identification"
1501 "attr_page=0x%x attr_id=0x%x\n",
1502 attr_page, attr_id);
1503 }
1504 }
1505 /*These are not legal for OSD*/
1506 case scsi_sense_field_replaceable_unit:
1507 OSD_SENSE_PRINT2("scsi_sense_field_replaceable_unit\n");
1508 break;
1509 case scsi_sense_stream_commands:
1510 OSD_SENSE_PRINT2("scsi_sense_stream_commands\n");
1511 break;
1512 case scsi_sense_block_commands:
1513 OSD_SENSE_PRINT2("scsi_sense_block_commands\n");
1514 break;
1515 case scsi_sense_ata_return:
1516 OSD_SENSE_PRINT2("scsi_sense_ata_return\n");
1517 break;
1518 default:
1519 if (ssd->descriptor_type <= scsi_sense_Reserved_last)
1520 OSD_SENSE_PRINT2(
1521 "scsi_sense Reserved descriptor (0x%x)",
1522 ssd->descriptor_type);
1523 else
1524 OSD_SENSE_PRINT2(
1525 "scsi_sense Vendor descriptor (0x%x)",
1526 ssd->descriptor_type);
1527 }
1528
1529 cur_descriptor += cur_len;
1530 }
1531
1532 return (osi->key > scsi_sk_recovered_error) ? -EIO : 0;
1533}
1534EXPORT_SYMBOL(osd_req_decode_sense_full);
1535
1536/*
1537 * Implementation of osd_sec.h API
1538 * TODO: Move to a separate osd_sec.c file at a later stage.
1539 */
1540
1541enum { OSD_SEC_CAP_V1_ALL_CAPS =
1542 OSD_SEC_CAP_APPEND | OSD_SEC_CAP_OBJ_MGMT | OSD_SEC_CAP_REMOVE |
1543 OSD_SEC_CAP_CREATE | OSD_SEC_CAP_SET_ATTR | OSD_SEC_CAP_GET_ATTR |
1544 OSD_SEC_CAP_WRITE | OSD_SEC_CAP_READ | OSD_SEC_CAP_POL_SEC |
1545 OSD_SEC_CAP_GLOBAL | OSD_SEC_CAP_DEV_MGMT
1546};
1547
1548enum { OSD_SEC_CAP_V2_ALL_CAPS =
1549 OSD_SEC_CAP_V1_ALL_CAPS | OSD_SEC_CAP_QUERY | OSD_SEC_CAP_M_OBJECT
1550};
1551
1552void osd_sec_init_nosec_doall_caps(void *caps,
1553 const struct osd_obj_id *obj, bool is_collection, const bool is_v1)
1554{
1555 struct osd_capability *cap = caps;
1556 u8 type;
1557 u8 descriptor_type;
1558
1559 if (likely(obj->id)) {
1560 if (unlikely(is_collection)) {
1561 type = OSD_SEC_OBJ_COLLECTION;
1562 descriptor_type = is_v1 ? OSD_SEC_OBJ_DESC_OBJ :
1563 OSD_SEC_OBJ_DESC_COL;
1564 } else {
1565 type = OSD_SEC_OBJ_USER;
1566 descriptor_type = OSD_SEC_OBJ_DESC_OBJ;
1567 }
1568 WARN_ON(!obj->partition);
1569 } else {
1570 type = obj->partition ? OSD_SEC_OBJ_PARTITION :
1571 OSD_SEC_OBJ_ROOT;
1572 descriptor_type = OSD_SEC_OBJ_DESC_PAR;
1573 }
1574
1575 memset(cap, 0, sizeof(*cap));
1576
1577 cap->h.format = OSD_SEC_CAP_FORMAT_VER1;
1578 cap->h.integrity_algorithm__key_version = 0; /* MAKE_BYTE(0, 0); */
1579 cap->h.security_method = OSD_SEC_NOSEC;
1580/* cap->expiration_time;
1581 cap->AUDIT[30-10];
1582 cap->discriminator[42-30];
1583 cap->object_created_time; */
1584 cap->h.object_type = type;
1585 osd_sec_set_caps(&cap->h, OSD_SEC_CAP_V1_ALL_CAPS);
1586 cap->h.object_descriptor_type = descriptor_type;
1587 cap->od.obj_desc.policy_access_tag = 0;
1588 cap->od.obj_desc.allowed_partition_id = cpu_to_be64(obj->partition);
1589 cap->od.obj_desc.allowed_object_id = cpu_to_be64(obj->id);
1590}
1591EXPORT_SYMBOL(osd_sec_init_nosec_doall_caps);
1592
1593/* FIXME: Extract version from caps pointer.
1594 * Also Pete's target only supports caps from OSDv1 for now
1595 */
1596void osd_set_caps(struct osd_cdb *cdb, const void *caps)
1597{
1598 bool is_ver1 = true;
1599 /* NOTE: They start at same address */
1600 memcpy(&cdb->v1.caps, caps, is_ver1 ? OSDv1_CAP_LEN : OSD_CAP_LEN);
1601}
1602
1603bool osd_is_sec_alldata(struct osd_security_parameters *sec_parms __unused)
1604{
1605 return false;
1606}
1607
1608void osd_sec_sign_cdb(struct osd_cdb *ocdb __unused, const u8 *cap_key __unused)
1609{
1610}
1611
1612void osd_sec_sign_data(void *data_integ __unused,
1613 struct bio *bio __unused, const u8 *cap_key __unused)
1614{
1615}
1616
1617/*
1618 * Declared in osd_protocol.h
1619 * 4.12.5 Data-In and Data-Out buffer offsets
1620 * byte offset = mantissa * (2^(exponent+8))
1621 * Returns the smallest allowed encoded offset that contains given @offset
1622 * The actual encoded offset returned is @offset + *@padding.
1623 */
1624osd_cdb_offset __osd_encode_offset(
1625 u64 offset, unsigned *padding, int min_shift, int max_shift)
1626{
1627 u64 try_offset = -1, mod, align;
1628 osd_cdb_offset be32_offset;
1629 int shift;
1630
1631 *padding = 0;
1632 if (!offset)
1633 return 0;
1634
1635 for (shift = min_shift; shift < max_shift; ++shift) {
1636 try_offset = offset >> shift;
1637 if (try_offset < (1 << OSD_OFFSET_MAX_BITS))
1638 break;
1639 }
1640
1641 BUG_ON(shift == max_shift);
1642
1643 align = 1 << shift;
1644 mod = offset & (align - 1);
1645 if (mod) {
1646 *padding = align - mod;
1647 try_offset += 1;
1648 }
1649
1650 try_offset |= ((shift - 8) & 0xf) << 28;
1651 be32_offset = cpu_to_be32((u32)try_offset);
1652
1653 OSD_DEBUG("offset=%llu mantissa=%llu exp=%d encoded=%x pad=%d\n",
1654 _LLU(offset), _LLU(try_offset & 0x0FFFFFFF), shift,
1655 be32_offset, *padding);
1656 return be32_offset;
1657}
diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c
new file mode 100644
index 000000000000..f8b1a749958b
--- /dev/null
+++ b/drivers/scsi/osd/osd_uld.c
@@ -0,0 +1,487 @@
1/*
2 * osd_uld.c - OSD Upper Layer Driver
3 *
4 * A Linux driver module that registers as a SCSI ULD and probes
5 * for OSD type SCSI devices.
6 * It's main function is to export osd devices to in-kernel users like
7 * osdfs and pNFS-objects-LD. It also provides one ioctl for running
8 * in Kernel tests.
9 *
10 * Copyright (C) 2008 Panasas Inc. All rights reserved.
11 *
12 * Authors:
13 * Boaz Harrosh <bharrosh@panasas.com>
14 * Benny Halevy <bhalevy@panasas.com>
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License version 2
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions
21 * are met:
22 *
23 * 1. Redistributions of source code must retain the above copyright
24 * notice, this list of conditions and the following disclaimer.
25 * 2. Redistributions in binary form must reproduce the above copyright
26 * notice, this list of conditions and the following disclaimer in the
27 * documentation and/or other materials provided with the distribution.
28 * 3. Neither the name of the Panasas company nor the names of its
29 * contributors may be used to endorse or promote products derived
30 * from this software without specific prior written permission.
31 *
32 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
33 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
34 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
35 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
37 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
38 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
39 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
40 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
41 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
42 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 */
44
45#include <linux/namei.h>
46#include <linux/cdev.h>
47#include <linux/fs.h>
48#include <linux/module.h>
49#include <linux/device.h>
50#include <linux/idr.h>
51#include <linux/major.h>
52
53#include <scsi/scsi.h>
54#include <scsi/scsi_driver.h>
55#include <scsi/scsi_device.h>
56#include <scsi/scsi_ioctl.h>
57
58#include <scsi/osd_initiator.h>
59#include <scsi/osd_sec.h>
60
61#include "osd_debug.h"
62
63#ifndef TYPE_OSD
64# define TYPE_OSD 0x11
65#endif
66
67#ifndef SCSI_OSD_MAJOR
68# define SCSI_OSD_MAJOR 260
69#endif
70#define SCSI_OSD_MAX_MINOR 64
71
72static const char osd_name[] = "osd";
73static const char *osd_version_string = "open-osd 0.1.0";
74const char osd_symlink[] = "scsi_osd";
75
76MODULE_AUTHOR("Boaz Harrosh <bharrosh@panasas.com>");
77MODULE_DESCRIPTION("open-osd Upper-Layer-Driver osd.ko");
78MODULE_LICENSE("GPL");
79MODULE_ALIAS_CHARDEV_MAJOR(SCSI_OSD_MAJOR);
80MODULE_ALIAS_SCSI_DEVICE(TYPE_OSD);
81
82struct osd_uld_device {
83 int minor;
84 struct kref kref;
85 struct cdev cdev;
86 struct osd_dev od;
87 struct gendisk *disk;
88 struct device *class_member;
89};
90
91static void __uld_get(struct osd_uld_device *oud);
92static void __uld_put(struct osd_uld_device *oud);
93
94/*
95 * Char Device operations
96 */
97
98static int osd_uld_open(struct inode *inode, struct file *file)
99{
100 struct osd_uld_device *oud = container_of(inode->i_cdev,
101 struct osd_uld_device, cdev);
102
103 __uld_get(oud);
104 /* cache osd_uld_device on file handle */
105 file->private_data = oud;
106 OSD_DEBUG("osd_uld_open %p\n", oud);
107 return 0;
108}
109
110static int osd_uld_release(struct inode *inode, struct file *file)
111{
112 struct osd_uld_device *oud = file->private_data;
113
114 OSD_DEBUG("osd_uld_release %p\n", file->private_data);
115 file->private_data = NULL;
116 __uld_put(oud);
117 return 0;
118}
119
120/* FIXME: Only one vector for now */
121unsigned g_test_ioctl;
122do_test_fn *g_do_test;
123
124int osduld_register_test(unsigned ioctl, do_test_fn *do_test)
125{
126 if (g_test_ioctl)
127 return -EINVAL;
128
129 g_test_ioctl = ioctl;
130 g_do_test = do_test;
131 return 0;
132}
133EXPORT_SYMBOL(osduld_register_test);
134
135void osduld_unregister_test(unsigned ioctl)
136{
137 if (ioctl == g_test_ioctl) {
138 g_test_ioctl = 0;
139 g_do_test = NULL;
140 }
141}
142EXPORT_SYMBOL(osduld_unregister_test);
143
144static do_test_fn *_find_ioctl(unsigned cmd)
145{
146 if (g_test_ioctl == cmd)
147 return g_do_test;
148 else
149 return NULL;
150}
151
152static long osd_uld_ioctl(struct file *file, unsigned int cmd,
153 unsigned long arg)
154{
155 struct osd_uld_device *oud = file->private_data;
156 int ret;
157 do_test_fn *do_test;
158
159 do_test = _find_ioctl(cmd);
160 if (do_test)
161 ret = do_test(&oud->od, cmd, arg);
162 else {
163 OSD_ERR("Unknown ioctl %d: osd_uld_device=%p\n", cmd, oud);
164 ret = -ENOIOCTLCMD;
165 }
166 return ret;
167}
168
169static const struct file_operations osd_fops = {
170 .owner = THIS_MODULE,
171 .open = osd_uld_open,
172 .release = osd_uld_release,
173 .unlocked_ioctl = osd_uld_ioctl,
174};
175
176struct osd_dev *osduld_path_lookup(const char *path)
177{
178 struct nameidata nd;
179 struct inode *inode;
180 struct cdev *cdev;
181 struct osd_uld_device *uninitialized_var(oud);
182 int error;
183
184 if (!path || !*path) {
185 OSD_ERR("Mount with !path || !*path\n");
186 return ERR_PTR(-EINVAL);
187 }
188
189 error = path_lookup(path, LOOKUP_FOLLOW, &nd);
190 if (error) {
191 OSD_ERR("path_lookup of %s faild=>%d\n", path, error);
192 return ERR_PTR(error);
193 }
194
195 inode = nd.path.dentry->d_inode;
196 error = -EINVAL; /* Not the right device e.g osd_uld_device */
197 if (!S_ISCHR(inode->i_mode)) {
198 OSD_DEBUG("!S_ISCHR()\n");
199 goto out;
200 }
201
202 cdev = inode->i_cdev;
203 if (!cdev) {
204 OSD_ERR("Before mounting an OSD Based filesystem\n");
205 OSD_ERR(" user-mode must open+close the %s device\n", path);
206 OSD_ERR(" Example: bash: echo < %s\n", path);
207 goto out;
208 }
209
210 /* The Magic wand. Is it our char-dev */
211 /* TODO: Support sg devices */
212 if (cdev->owner != THIS_MODULE) {
213 OSD_ERR("Error mounting %s - is not an OSD device\n", path);
214 goto out;
215 }
216
217 oud = container_of(cdev, struct osd_uld_device, cdev);
218
219 __uld_get(oud);
220 error = 0;
221
222out:
223 path_put(&nd.path);
224 return error ? ERR_PTR(error) : &oud->od;
225}
226EXPORT_SYMBOL(osduld_path_lookup);
227
228void osduld_put_device(struct osd_dev *od)
229{
230 if (od) {
231 struct osd_uld_device *oud = container_of(od,
232 struct osd_uld_device, od);
233
234 __uld_put(oud);
235 }
236}
237EXPORT_SYMBOL(osduld_put_device);
238
239/*
240 * Scsi Device operations
241 */
242
243static int __detect_osd(struct osd_uld_device *oud)
244{
245 struct scsi_device *scsi_device = oud->od.scsi_device;
246 char caps[OSD_CAP_LEN];
247 int error;
248
249 /* sending a test_unit_ready as first command seems to be needed
250 * by some targets
251 */
252 OSD_DEBUG("start scsi_test_unit_ready %p %p %p\n",
253 oud, scsi_device, scsi_device->request_queue);
254 error = scsi_test_unit_ready(scsi_device, 10*HZ, 5, NULL);
255 if (error)
256 OSD_ERR("warning: scsi_test_unit_ready failed\n");
257
258 osd_sec_init_nosec_doall_caps(caps, &osd_root_object, false, true);
259 if (osd_auto_detect_ver(&oud->od, caps))
260 return -ENODEV;
261
262 return 0;
263}
264
265static struct class *osd_sysfs_class;
266static DEFINE_IDA(osd_minor_ida);
267
268static int osd_probe(struct device *dev)
269{
270 struct scsi_device *scsi_device = to_scsi_device(dev);
271 struct gendisk *disk;
272 struct osd_uld_device *oud;
273 int minor;
274 int error;
275
276 if (scsi_device->type != TYPE_OSD)
277 return -ENODEV;
278
279 do {
280 if (!ida_pre_get(&osd_minor_ida, GFP_KERNEL))
281 return -ENODEV;
282
283 error = ida_get_new(&osd_minor_ida, &minor);
284 } while (error == -EAGAIN);
285
286 if (error)
287 return error;
288 if (minor >= SCSI_OSD_MAX_MINOR) {
289 error = -EBUSY;
290 goto err_retract_minor;
291 }
292
293 error = -ENOMEM;
294 oud = kzalloc(sizeof(*oud), GFP_KERNEL);
295 if (NULL == oud)
296 goto err_retract_minor;
297
298 kref_init(&oud->kref);
299 dev_set_drvdata(dev, oud);
300 oud->minor = minor;
301
302 /* allocate a disk and set it up */
303 /* FIXME: do we need this since sg has already done that */
304 disk = alloc_disk(1);
305 if (!disk) {
306 OSD_ERR("alloc_disk failed\n");
307 goto err_free_osd;
308 }
309 disk->major = SCSI_OSD_MAJOR;
310 disk->first_minor = oud->minor;
311 sprintf(disk->disk_name, "osd%d", oud->minor);
312 oud->disk = disk;
313
314 /* hold one more reference to the scsi_device that will get released
315 * in __release, in case a logout is happening while fs is mounted
316 */
317 scsi_device_get(scsi_device);
318 osd_dev_init(&oud->od, scsi_device);
319
320 /* Detect the OSD Version */
321 error = __detect_osd(oud);
322 if (error) {
323 OSD_ERR("osd detection failed, non-compatible OSD device\n");
324 goto err_put_disk;
325 }
326
327 /* init the char-device for communication with user-mode */
328 cdev_init(&oud->cdev, &osd_fops);
329 oud->cdev.owner = THIS_MODULE;
330 error = cdev_add(&oud->cdev,
331 MKDEV(SCSI_OSD_MAJOR, oud->minor), 1);
332 if (error) {
333 OSD_ERR("cdev_add failed\n");
334 goto err_put_disk;
335 }
336 kobject_get(&oud->cdev.kobj); /* 2nd ref see osd_remove() */
337
338 /* class_member */
339 oud->class_member = device_create(osd_sysfs_class, dev,
340 MKDEV(SCSI_OSD_MAJOR, oud->minor), "%s", disk->disk_name);
341 if (IS_ERR(oud->class_member)) {
342 OSD_ERR("class_device_create failed\n");
343 error = PTR_ERR(oud->class_member);
344 goto err_put_cdev;
345 }
346
347 dev_set_drvdata(oud->class_member, oud);
348 error = sysfs_create_link(&scsi_device->sdev_gendev.kobj,
349 &oud->class_member->kobj, osd_symlink);
350 if (error)
351 OSD_ERR("warning: unable to make symlink\n");
352
353 OSD_INFO("osd_probe %s\n", disk->disk_name);
354 return 0;
355
356err_put_cdev:
357 cdev_del(&oud->cdev);
358err_put_disk:
359 scsi_device_put(scsi_device);
360 put_disk(disk);
361err_free_osd:
362 dev_set_drvdata(dev, NULL);
363 kfree(oud);
364err_retract_minor:
365 ida_remove(&osd_minor_ida, minor);
366 return error;
367}
368
369static int osd_remove(struct device *dev)
370{
371 struct scsi_device *scsi_device = to_scsi_device(dev);
372 struct osd_uld_device *oud = dev_get_drvdata(dev);
373
374 if (!oud || (oud->od.scsi_device != scsi_device)) {
375 OSD_ERR("Half cooked osd-device %p,%p || %p!=%p",
376 dev, oud, oud ? oud->od.scsi_device : NULL,
377 scsi_device);
378 }
379
380 sysfs_remove_link(&oud->od.scsi_device->sdev_gendev.kobj, osd_symlink);
381
382 if (oud->class_member)
383 device_destroy(osd_sysfs_class,
384 MKDEV(SCSI_OSD_MAJOR, oud->minor));
385
386 /* We have 2 references to the cdev. One is released here
387 * and also takes down the /dev/osdX mapping. The second
388 * Will be released in __remove() after all users have released
389 * the osd_uld_device.
390 */
391 if (oud->cdev.owner)
392 cdev_del(&oud->cdev);
393
394 __uld_put(oud);
395 return 0;
396}
397
398static void __remove(struct kref *kref)
399{
400 struct osd_uld_device *oud = container_of(kref,
401 struct osd_uld_device, kref);
402 struct scsi_device *scsi_device = oud->od.scsi_device;
403
404 /* now let delete the char_dev */
405 kobject_put(&oud->cdev.kobj);
406
407 osd_dev_fini(&oud->od);
408 scsi_device_put(scsi_device);
409
410 OSD_INFO("osd_remove %s\n",
411 oud->disk ? oud->disk->disk_name : NULL);
412
413 if (oud->disk)
414 put_disk(oud->disk);
415
416 ida_remove(&osd_minor_ida, oud->minor);
417 kfree(oud);
418}
419
420static void __uld_get(struct osd_uld_device *oud)
421{
422 kref_get(&oud->kref);
423}
424
425static void __uld_put(struct osd_uld_device *oud)
426{
427 kref_put(&oud->kref, __remove);
428}
429
430/*
431 * Global driver and scsi registration
432 */
433
434static struct scsi_driver osd_driver = {
435 .owner = THIS_MODULE,
436 .gendrv = {
437 .name = osd_name,
438 .probe = osd_probe,
439 .remove = osd_remove,
440 }
441};
442
443static int __init osd_uld_init(void)
444{
445 int err;
446
447 osd_sysfs_class = class_create(THIS_MODULE, osd_symlink);
448 if (IS_ERR(osd_sysfs_class)) {
449 OSD_ERR("Unable to register sysfs class => %ld\n",
450 PTR_ERR(osd_sysfs_class));
451 return PTR_ERR(osd_sysfs_class);
452 }
453
454 err = register_chrdev_region(MKDEV(SCSI_OSD_MAJOR, 0),
455 SCSI_OSD_MAX_MINOR, osd_name);
456 if (err) {
457 OSD_ERR("Unable to register major %d for osd ULD => %d\n",
458 SCSI_OSD_MAJOR, err);
459 goto err_out;
460 }
461
462 err = scsi_register_driver(&osd_driver.gendrv);
463 if (err) {
464 OSD_ERR("scsi_register_driver failed => %d\n", err);
465 goto err_out_chrdev;
466 }
467
468 OSD_INFO("LOADED %s\n", osd_version_string);
469 return 0;
470
471err_out_chrdev:
472 unregister_chrdev_region(MKDEV(SCSI_OSD_MAJOR, 0), SCSI_OSD_MAX_MINOR);
473err_out:
474 class_destroy(osd_sysfs_class);
475 return err;
476}
477
478static void __exit osd_uld_exit(void)
479{
480 scsi_unregister_driver(&osd_driver.gendrv);
481 unregister_chrdev_region(MKDEV(SCSI_OSD_MAJOR, 0), SCSI_OSD_MAX_MINOR);
482 class_destroy(osd_sysfs_class);
483 OSD_INFO("UNLOADED %s\n", osd_version_string);
484}
485
486module_init(osd_uld_init);
487module_exit(osd_uld_exit);
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 0ea78d9a37db..acb835837eec 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -280,8 +280,8 @@ static int osst_chk_result(struct osst_tape * STp, struct osst_request * SRpnt)
280 static int notyetprinted = 1; 280 static int notyetprinted = 1;
281 281
282 printk(KERN_WARNING 282 printk(KERN_WARNING
283 "%s:W: Warning %x (sugg. bt 0x%x, driver bt 0x%x, host bt 0x%x).\n", 283 "%s:W: Warning %x (driver bt 0x%x, host bt 0x%x).\n",
284 name, result, suggestion(result), driver_byte(result) & DRIVER_MASK, 284 name, result, driver_byte(result),
285 host_byte(result)); 285 host_byte(result));
286 if (notyetprinted) { 286 if (notyetprinted) {
287 notyetprinted = 0; 287 notyetprinted = 0;
@@ -317,18 +317,25 @@ static int osst_chk_result(struct osst_tape * STp, struct osst_request * SRpnt)
317 317
318 318
319/* Wakeup from interrupt */ 319/* Wakeup from interrupt */
320static void osst_sleep_done(void *data, char *sense, int result, int resid) 320static void osst_end_async(struct request *req, int update)
321{ 321{
322 struct osst_request *SRpnt = data; 322 struct osst_request *SRpnt = req->end_io_data;
323 struct osst_tape *STp = SRpnt->stp; 323 struct osst_tape *STp = SRpnt->stp;
324 struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data;
324 325
325 memcpy(SRpnt->sense, sense, SCSI_SENSE_BUFFERSIZE); 326 STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors;
326 STp->buffer->cmdstat.midlevel_result = SRpnt->result = result;
327#if DEBUG 327#if DEBUG
328 STp->write_pending = 0; 328 STp->write_pending = 0;
329#endif 329#endif
330 if (SRpnt->waiting) 330 if (SRpnt->waiting)
331 complete(SRpnt->waiting); 331 complete(SRpnt->waiting);
332
333 if (SRpnt->bio) {
334 kfree(mdata->pages);
335 blk_rq_unmap_user(SRpnt->bio);
336 }
337
338 __blk_put_request(req->q, req);
332} 339}
333 340
334/* osst_request memory management */ 341/* osst_request memory management */
@@ -342,6 +349,74 @@ static void osst_release_request(struct osst_request *streq)
342 kfree(streq); 349 kfree(streq);
343} 350}
344 351
352static int osst_execute(struct osst_request *SRpnt, const unsigned char *cmd,
353 int cmd_len, int data_direction, void *buffer, unsigned bufflen,
354 int use_sg, int timeout, int retries)
355{
356 struct request *req;
357 struct page **pages = NULL;
358 struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data;
359
360 int err = 0;
361 int write = (data_direction == DMA_TO_DEVICE);
362
363 req = blk_get_request(SRpnt->stp->device->request_queue, write, GFP_KERNEL);
364 if (!req)
365 return DRIVER_ERROR << 24;
366
367 req->cmd_type = REQ_TYPE_BLOCK_PC;
368 req->cmd_flags |= REQ_QUIET;
369
370 SRpnt->bio = NULL;
371
372 if (use_sg) {
373 struct scatterlist *sg, *sgl = (struct scatterlist *)buffer;
374 int i;
375
376 pages = kzalloc(use_sg * sizeof(struct page *), GFP_KERNEL);
377 if (!pages)
378 goto free_req;
379
380 for_each_sg(sgl, sg, use_sg, i)
381 pages[i] = sg_page(sg);
382
383 mdata->null_mapped = 1;
384
385 mdata->page_order = get_order(sgl[0].length);
386 mdata->nr_entries =
387 DIV_ROUND_UP(bufflen, PAGE_SIZE << mdata->page_order);
388 mdata->offset = 0;
389
390 err = blk_rq_map_user(req->q, req, mdata, NULL, bufflen, GFP_KERNEL);
391 if (err) {
392 kfree(pages);
393 goto free_req;
394 }
395 SRpnt->bio = req->bio;
396 mdata->pages = pages;
397
398 } else if (bufflen) {
399 err = blk_rq_map_kern(req->q, req, buffer, bufflen, GFP_KERNEL);
400 if (err)
401 goto free_req;
402 }
403
404 req->cmd_len = cmd_len;
405 memset(req->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
406 memcpy(req->cmd, cmd, req->cmd_len);
407 req->sense = SRpnt->sense;
408 req->sense_len = 0;
409 req->timeout = timeout;
410 req->retries = retries;
411 req->end_io_data = SRpnt;
412
413 blk_execute_rq_nowait(req->q, NULL, req, 1, osst_end_async);
414 return 0;
415free_req:
416 blk_put_request(req);
417 return DRIVER_ERROR << 24;
418}
419
345/* Do the scsi command. Waits until command performed if do_wait is true. 420/* Do the scsi command. Waits until command performed if do_wait is true.
346 Otherwise osst_write_behind_check() is used to check that the command 421 Otherwise osst_write_behind_check() is used to check that the command
347 has finished. */ 422 has finished. */
@@ -403,8 +478,8 @@ static struct osst_request * osst_do_scsi(struct osst_request *SRpnt, struct oss
403 STp->buffer->cmdstat.have_sense = 0; 478 STp->buffer->cmdstat.have_sense = 0;
404 STp->buffer->syscall_result = 0; 479 STp->buffer->syscall_result = 0;
405 480
406 if (scsi_execute_async(STp->device, cmd, COMMAND_SIZE(cmd[0]), direction, bp, bytes, 481 if (osst_execute(SRpnt, cmd, COMMAND_SIZE(cmd[0]), direction, bp, bytes,
407 use_sg, timeout, retries, SRpnt, osst_sleep_done, GFP_KERNEL)) 482 use_sg, timeout, retries))
408 /* could not allocate the buffer or request was too large */ 483 /* could not allocate the buffer or request was too large */
409 (STp->buffer)->syscall_result = (-EBUSY); 484 (STp->buffer)->syscall_result = (-EBUSY);
410 else if (do_wait) { 485 else if (do_wait) {
@@ -5286,11 +5361,6 @@ static int enlarge_buffer(struct osst_buffer *STbuffer, int need_dma)
5286 struct page *page = alloc_pages(priority, (OS_FRAME_SIZE - got <= PAGE_SIZE) ? 0 : order); 5361 struct page *page = alloc_pages(priority, (OS_FRAME_SIZE - got <= PAGE_SIZE) ? 0 : order);
5287 STbuffer->sg[segs].offset = 0; 5362 STbuffer->sg[segs].offset = 0;
5288 if (page == NULL) { 5363 if (page == NULL) {
5289 if (OS_FRAME_SIZE - got <= (max_segs - segs) * b_size / 2 && order) {
5290 b_size /= 2; /* Large enough for the rest of the buffers */
5291 order--;
5292 continue;
5293 }
5294 printk(KERN_WARNING "osst :W: Failed to enlarge buffer to %d bytes.\n", 5364 printk(KERN_WARNING "osst :W: Failed to enlarge buffer to %d bytes.\n",
5295 OS_FRAME_SIZE); 5365 OS_FRAME_SIZE);
5296#if DEBUG 5366#if DEBUG
diff --git a/drivers/scsi/osst.h b/drivers/scsi/osst.h
index 5aa22740b5df..11d26c57f3f8 100644
--- a/drivers/scsi/osst.h
+++ b/drivers/scsi/osst.h
@@ -520,6 +520,7 @@ struct osst_buffer {
520 int syscall_result; 520 int syscall_result;
521 struct osst_request *last_SRpnt; 521 struct osst_request *last_SRpnt;
522 struct st_cmdstatus cmdstat; 522 struct st_cmdstatus cmdstat;
523 struct rq_map_data map_data;
523 unsigned char *b_data; 524 unsigned char *b_data;
524 os_aux_t *aux; /* onstream AUX structure at end of each block */ 525 os_aux_t *aux; /* onstream AUX structure at end of each block */
525 unsigned short use_sg; /* zero or number of s/g segments for this adapter */ 526 unsigned short use_sg; /* zero or number of s/g segments for this adapter */
@@ -634,6 +635,7 @@ struct osst_request {
634 int result; 635 int result;
635 struct osst_tape *stp; 636 struct osst_tape *stp;
636 struct completion *waiting; 637 struct completion *waiting;
638 struct bio *bio;
637}; 639};
638 640
639/* Values of write_type */ 641/* Values of write_type */
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index cbcd3f681b62..a2ef03243a2c 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -967,6 +967,110 @@ int scsi_track_queue_full(struct scsi_device *sdev, int depth)
967EXPORT_SYMBOL(scsi_track_queue_full); 967EXPORT_SYMBOL(scsi_track_queue_full);
968 968
969/** 969/**
970 * scsi_vpd_inquiry - Request a device provide us with a VPD page
971 * @sdev: The device to ask
972 * @buffer: Where to put the result
973 * @page: Which Vital Product Data to return
974 * @len: The length of the buffer
975 *
976 * This is an internal helper function. You probably want to use
977 * scsi_get_vpd_page instead.
978 *
979 * Returns 0 on success or a negative error number.
980 */
981static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
982 u8 page, unsigned len)
983{
984 int result;
985 unsigned char cmd[16];
986
987 cmd[0] = INQUIRY;
988 cmd[1] = 1; /* EVPD */
989 cmd[2] = page;
990 cmd[3] = len >> 8;
991 cmd[4] = len & 0xff;
992 cmd[5] = 0; /* Control byte */
993
994 /*
995 * I'm not convinced we need to try quite this hard to get VPD, but
996 * all the existing users tried this hard.
997 */
998 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer,
999 len + 4, NULL, 30 * HZ, 3, NULL);
1000 if (result)
1001 return result;
1002
1003 /* Sanity check that we got the page back that we asked for */
1004 if (buffer[1] != page)
1005 return -EIO;
1006
1007 return 0;
1008}
1009
1010/**
1011 * scsi_get_vpd_page - Get Vital Product Data from a SCSI device
1012 * @sdev: The device to ask
1013 * @page: Which Vital Product Data to return
1014 *
1015 * SCSI devices may optionally supply Vital Product Data. Each 'page'
1016 * of VPD is defined in the appropriate SCSI document (eg SPC, SBC).
1017 * If the device supports this VPD page, this routine returns a pointer
1018 * to a buffer containing the data from that page. The caller is
1019 * responsible for calling kfree() on this pointer when it is no longer
1020 * needed. If we cannot retrieve the VPD page this routine returns %NULL.
1021 */
1022unsigned char *scsi_get_vpd_page(struct scsi_device *sdev, u8 page)
1023{
1024 int i, result;
1025 unsigned int len;
1026 unsigned char *buf = kmalloc(259, GFP_KERNEL);
1027
1028 if (!buf)
1029 return NULL;
1030
1031 /* Ask for all the pages supported by this device */
1032 result = scsi_vpd_inquiry(sdev, buf, 0, 255);
1033 if (result)
1034 goto fail;
1035
1036 /* If the user actually wanted this page, we can skip the rest */
1037 if (page == 0)
1038 return buf;
1039
1040 for (i = 0; i < buf[3]; i++)
1041 if (buf[i + 4] == page)
1042 goto found;
1043 /* The device claims it doesn't support the requested page */
1044 goto fail;
1045
1046 found:
1047 result = scsi_vpd_inquiry(sdev, buf, page, 255);
1048 if (result)
1049 goto fail;
1050
1051 /*
1052 * Some pages are longer than 255 bytes. The actual length of
1053 * the page is returned in the header.
1054 */
1055 len = (buf[2] << 8) | buf[3];
1056 if (len <= 255)
1057 return buf;
1058
1059 kfree(buf);
1060 buf = kmalloc(len + 4, GFP_KERNEL);
1061 result = scsi_vpd_inquiry(sdev, buf, page, len);
1062 if (result)
1063 goto fail;
1064
1065 return buf;
1066
1067 fail:
1068 kfree(buf);
1069 return NULL;
1070}
1071EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
1072
1073/**
970 * scsi_device_get - get an additional reference to a scsi_device 1074 * scsi_device_get - get an additional reference to a scsi_device
971 * @sdev: device to get a reference to 1075 * @sdev: device to get a reference to
972 * 1076 *
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 6eebd0bbe8a8..213123b0486b 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -40,6 +40,9 @@
40#include <linux/moduleparam.h> 40#include <linux/moduleparam.h>
41#include <linux/scatterlist.h> 41#include <linux/scatterlist.h>
42#include <linux/blkdev.h> 42#include <linux/blkdev.h>
43#include <linux/crc-t10dif.h>
44
45#include <net/checksum.h>
43 46
44#include <scsi/scsi.h> 47#include <scsi/scsi.h>
45#include <scsi/scsi_cmnd.h> 48#include <scsi/scsi_cmnd.h>
@@ -48,8 +51,7 @@
48#include <scsi/scsicam.h> 51#include <scsi/scsicam.h>
49#include <scsi/scsi_eh.h> 52#include <scsi/scsi_eh.h>
50 53
51#include <linux/stat.h> 54#include "sd.h"
52
53#include "scsi_logging.h" 55#include "scsi_logging.h"
54 56
55#define SCSI_DEBUG_VERSION "1.81" 57#define SCSI_DEBUG_VERSION "1.81"
@@ -95,6 +97,10 @@ static const char * scsi_debug_version_date = "20070104";
95#define DEF_FAKE_RW 0 97#define DEF_FAKE_RW 0
96#define DEF_VPD_USE_HOSTNO 1 98#define DEF_VPD_USE_HOSTNO 1
97#define DEF_SECTOR_SIZE 512 99#define DEF_SECTOR_SIZE 512
100#define DEF_DIX 0
101#define DEF_DIF 0
102#define DEF_GUARD 0
103#define DEF_ATO 1
98 104
99/* bit mask values for scsi_debug_opts */ 105/* bit mask values for scsi_debug_opts */
100#define SCSI_DEBUG_OPT_NOISE 1 106#define SCSI_DEBUG_OPT_NOISE 1
@@ -102,6 +108,8 @@ static const char * scsi_debug_version_date = "20070104";
102#define SCSI_DEBUG_OPT_TIMEOUT 4 108#define SCSI_DEBUG_OPT_TIMEOUT 4
103#define SCSI_DEBUG_OPT_RECOVERED_ERR 8 109#define SCSI_DEBUG_OPT_RECOVERED_ERR 8
104#define SCSI_DEBUG_OPT_TRANSPORT_ERR 16 110#define SCSI_DEBUG_OPT_TRANSPORT_ERR 16
111#define SCSI_DEBUG_OPT_DIF_ERR 32
112#define SCSI_DEBUG_OPT_DIX_ERR 64
105/* When "every_nth" > 0 then modulo "every_nth" commands: 113/* When "every_nth" > 0 then modulo "every_nth" commands:
106 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set 114 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
107 * - a RECOVERED_ERROR is simulated on successful read and write 115 * - a RECOVERED_ERROR is simulated on successful read and write
@@ -144,6 +152,10 @@ static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
144static int scsi_debug_fake_rw = DEF_FAKE_RW; 152static int scsi_debug_fake_rw = DEF_FAKE_RW;
145static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO; 153static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
146static int scsi_debug_sector_size = DEF_SECTOR_SIZE; 154static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
155static int scsi_debug_dix = DEF_DIX;
156static int scsi_debug_dif = DEF_DIF;
157static int scsi_debug_guard = DEF_GUARD;
158static int scsi_debug_ato = DEF_ATO;
147 159
148static int scsi_debug_cmnd_count = 0; 160static int scsi_debug_cmnd_count = 0;
149 161
@@ -204,11 +216,15 @@ struct sdebug_queued_cmd {
204static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE]; 216static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
205 217
206static unsigned char * fake_storep; /* ramdisk storage */ 218static unsigned char * fake_storep; /* ramdisk storage */
219static unsigned char *dif_storep; /* protection info */
207 220
208static int num_aborts = 0; 221static int num_aborts = 0;
209static int num_dev_resets = 0; 222static int num_dev_resets = 0;
210static int num_bus_resets = 0; 223static int num_bus_resets = 0;
211static int num_host_resets = 0; 224static int num_host_resets = 0;
225static int dix_writes;
226static int dix_reads;
227static int dif_errors;
212 228
213static DEFINE_SPINLOCK(queued_arr_lock); 229static DEFINE_SPINLOCK(queued_arr_lock);
214static DEFINE_RWLOCK(atomic_rw); 230static DEFINE_RWLOCK(atomic_rw);
@@ -217,6 +233,11 @@ static char sdebug_proc_name[] = "scsi_debug";
217 233
218static struct bus_type pseudo_lld_bus; 234static struct bus_type pseudo_lld_bus;
219 235
236static inline sector_t dif_offset(sector_t sector)
237{
238 return sector << 3;
239}
240
220static struct device_driver sdebug_driverfs_driver = { 241static struct device_driver sdebug_driverfs_driver = {
221 .name = sdebug_proc_name, 242 .name = sdebug_proc_name,
222 .bus = &pseudo_lld_bus, 243 .bus = &pseudo_lld_bus,
@@ -225,6 +246,9 @@ static struct device_driver sdebug_driverfs_driver = {
225static const int check_condition_result = 246static const int check_condition_result =
226 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; 247 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
227 248
249static const int illegal_condition_result =
250 (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
251
228static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0, 252static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
229 0, 0, 0x2, 0x4b}; 253 0, 0, 0x2, 0x4b};
230static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0, 254static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
@@ -726,7 +750,12 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target,
726 } else if (0x86 == cmd[2]) { /* extended inquiry */ 750 } else if (0x86 == cmd[2]) { /* extended inquiry */
727 arr[1] = cmd[2]; /*sanity */ 751 arr[1] = cmd[2]; /*sanity */
728 arr[3] = 0x3c; /* number of following entries */ 752 arr[3] = 0x3c; /* number of following entries */
729 arr[4] = 0x0; /* no protection stuff */ 753 if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
754 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
755 else if (scsi_debug_dif)
756 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
757 else
758 arr[4] = 0x0; /* no protection stuff */
730 arr[5] = 0x7; /* head of q, ordered + simple q's */ 759 arr[5] = 0x7; /* head of q, ordered + simple q's */
731 } else if (0x87 == cmd[2]) { /* mode page policy */ 760 } else if (0x87 == cmd[2]) { /* mode page policy */
732 arr[1] = cmd[2]; /*sanity */ 761 arr[1] = cmd[2]; /*sanity */
@@ -767,6 +796,7 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target,
767 arr[2] = scsi_debug_scsi_level; 796 arr[2] = scsi_debug_scsi_level;
768 arr[3] = 2; /* response_data_format==2 */ 797 arr[3] = 2; /* response_data_format==2 */
769 arr[4] = SDEBUG_LONG_INQ_SZ - 5; 798 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
799 arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
770 if (0 == scsi_debug_vpd_use_hostno) 800 if (0 == scsi_debug_vpd_use_hostno)
771 arr[5] = 0x10; /* claim: implicit TGPS */ 801 arr[5] = 0x10; /* claim: implicit TGPS */
772 arr[6] = 0x10; /* claim: MultiP */ 802 arr[6] = 0x10; /* claim: MultiP */
@@ -915,6 +945,12 @@ static int resp_readcap16(struct scsi_cmnd * scp,
915 arr[9] = (scsi_debug_sector_size >> 16) & 0xff; 945 arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
916 arr[10] = (scsi_debug_sector_size >> 8) & 0xff; 946 arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
917 arr[11] = scsi_debug_sector_size & 0xff; 947 arr[11] = scsi_debug_sector_size & 0xff;
948
949 if (scsi_debug_dif) {
950 arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
951 arr[12] |= 1; /* PROT_EN */
952 }
953
918 return fill_from_dev_buffer(scp, arr, 954 return fill_from_dev_buffer(scp, arr,
919 min(alloc_len, SDEBUG_READCAP16_ARR_SZ)); 955 min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
920} 956}
@@ -1066,6 +1102,10 @@ static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1066 ctrl_m_pg[2] |= 0x4; 1102 ctrl_m_pg[2] |= 0x4;
1067 else 1103 else
1068 ctrl_m_pg[2] &= ~0x4; 1104 ctrl_m_pg[2] &= ~0x4;
1105
1106 if (scsi_debug_ato)
1107 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1108
1069 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg)); 1109 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1070 if (1 == pcontrol) 1110 if (1 == pcontrol)
1071 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg)); 1111 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
@@ -1536,6 +1576,87 @@ static int do_device_access(struct scsi_cmnd *scmd,
1536 return ret; 1576 return ret;
1537} 1577}
1538 1578
1579static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
1580 unsigned int sectors)
1581{
1582 unsigned int i, resid;
1583 struct scatterlist *psgl;
1584 struct sd_dif_tuple *sdt;
1585 sector_t sector;
1586 sector_t tmp_sec = start_sec;
1587 void *paddr;
1588
1589 start_sec = do_div(tmp_sec, sdebug_store_sectors);
1590
1591 sdt = (struct sd_dif_tuple *)(dif_storep + dif_offset(start_sec));
1592
1593 for (i = 0 ; i < sectors ; i++) {
1594 u16 csum;
1595
1596 if (sdt[i].app_tag == 0xffff)
1597 continue;
1598
1599 sector = start_sec + i;
1600
1601 switch (scsi_debug_guard) {
1602 case 1:
1603 csum = ip_compute_csum(fake_storep +
1604 sector * scsi_debug_sector_size,
1605 scsi_debug_sector_size);
1606 break;
1607 case 0:
1608 csum = crc_t10dif(fake_storep +
1609 sector * scsi_debug_sector_size,
1610 scsi_debug_sector_size);
1611 csum = cpu_to_be16(csum);
1612 break;
1613 default:
1614 BUG();
1615 }
1616
1617 if (sdt[i].guard_tag != csum) {
1618 printk(KERN_ERR "%s: GUARD check failed on sector %lu" \
1619 " rcvd 0x%04x, data 0x%04x\n", __func__,
1620 (unsigned long)sector,
1621 be16_to_cpu(sdt[i].guard_tag),
1622 be16_to_cpu(csum));
1623 dif_errors++;
1624 return 0x01;
1625 }
1626
1627 if (scsi_debug_dif != SD_DIF_TYPE3_PROTECTION &&
1628 be32_to_cpu(sdt[i].ref_tag) != (sector & 0xffffffff)) {
1629 printk(KERN_ERR "%s: REF check failed on sector %lu\n",
1630 __func__, (unsigned long)sector);
1631 dif_errors++;
1632 return 0x03;
1633 }
1634 }
1635
1636 resid = sectors * 8; /* Bytes of protection data to copy into sgl */
1637 sector = start_sec;
1638
1639 scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
1640 int len = min(psgl->length, resid);
1641
1642 paddr = kmap_atomic(sg_page(psgl), KM_IRQ0) + psgl->offset;
1643 memcpy(paddr, dif_storep + dif_offset(sector), len);
1644
1645 sector += len >> 3;
1646 if (sector >= sdebug_store_sectors) {
1647 /* Force wrap */
1648 tmp_sec = sector;
1649 sector = do_div(tmp_sec, sdebug_store_sectors);
1650 }
1651 resid -= len;
1652 kunmap_atomic(paddr, KM_IRQ0);
1653 }
1654
1655 dix_reads++;
1656
1657 return 0;
1658}
1659
1539static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba, 1660static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
1540 unsigned int num, struct sdebug_dev_info *devip) 1661 unsigned int num, struct sdebug_dev_info *devip)
1541{ 1662{
@@ -1563,12 +1684,162 @@ static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
1563 } 1684 }
1564 return check_condition_result; 1685 return check_condition_result;
1565 } 1686 }
1687
1688 /* DIX + T10 DIF */
1689 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
1690 int prot_ret = prot_verify_read(SCpnt, lba, num);
1691
1692 if (prot_ret) {
1693 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, prot_ret);
1694 return illegal_condition_result;
1695 }
1696 }
1697
1566 read_lock_irqsave(&atomic_rw, iflags); 1698 read_lock_irqsave(&atomic_rw, iflags);
1567 ret = do_device_access(SCpnt, devip, lba, num, 0); 1699 ret = do_device_access(SCpnt, devip, lba, num, 0);
1568 read_unlock_irqrestore(&atomic_rw, iflags); 1700 read_unlock_irqrestore(&atomic_rw, iflags);
1569 return ret; 1701 return ret;
1570} 1702}
1571 1703
1704void dump_sector(unsigned char *buf, int len)
1705{
1706 int i, j;
1707
1708 printk(KERN_ERR ">>> Sector Dump <<<\n");
1709
1710 for (i = 0 ; i < len ; i += 16) {
1711 printk(KERN_ERR "%04d: ", i);
1712
1713 for (j = 0 ; j < 16 ; j++) {
1714 unsigned char c = buf[i+j];
1715 if (c >= 0x20 && c < 0x7e)
1716 printk(" %c ", buf[i+j]);
1717 else
1718 printk("%02x ", buf[i+j]);
1719 }
1720
1721 printk("\n");
1722 }
1723}
1724
1725static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
1726 unsigned int sectors)
1727{
1728 int i, j, ret;
1729 struct sd_dif_tuple *sdt;
1730 struct scatterlist *dsgl = scsi_sglist(SCpnt);
1731 struct scatterlist *psgl = scsi_prot_sglist(SCpnt);
1732 void *daddr, *paddr;
1733 sector_t tmp_sec = start_sec;
1734 sector_t sector;
1735 int ppage_offset;
1736 unsigned short csum;
1737
1738 sector = do_div(tmp_sec, sdebug_store_sectors);
1739
1740 if (((SCpnt->cmnd[1] >> 5) & 7) != 1) {
1741 printk(KERN_WARNING "scsi_debug: WRPROTECT != 1\n");
1742 return 0;
1743 }
1744
1745 BUG_ON(scsi_sg_count(SCpnt) == 0);
1746 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
1747
1748 paddr = kmap_atomic(sg_page(psgl), KM_IRQ1) + psgl->offset;
1749 ppage_offset = 0;
1750
1751 /* For each data page */
1752 scsi_for_each_sg(SCpnt, dsgl, scsi_sg_count(SCpnt), i) {
1753 daddr = kmap_atomic(sg_page(dsgl), KM_IRQ0) + dsgl->offset;
1754
1755 /* For each sector-sized chunk in data page */
1756 for (j = 0 ; j < dsgl->length ; j += scsi_debug_sector_size) {
1757
1758 /* If we're at the end of the current
1759 * protection page advance to the next one
1760 */
1761 if (ppage_offset >= psgl->length) {
1762 kunmap_atomic(paddr, KM_IRQ1);
1763 psgl = sg_next(psgl);
1764 BUG_ON(psgl == NULL);
1765 paddr = kmap_atomic(sg_page(psgl), KM_IRQ1)
1766 + psgl->offset;
1767 ppage_offset = 0;
1768 }
1769
1770 sdt = paddr + ppage_offset;
1771
1772 switch (scsi_debug_guard) {
1773 case 1:
1774 csum = ip_compute_csum(daddr,
1775 scsi_debug_sector_size);
1776 break;
1777 case 0:
1778 csum = cpu_to_be16(crc_t10dif(daddr,
1779 scsi_debug_sector_size));
1780 break;
1781 default:
1782 BUG();
1783 ret = 0;
1784 goto out;
1785 }
1786
1787 if (sdt->guard_tag != csum) {
1788 printk(KERN_ERR
1789 "%s: GUARD check failed on sector %lu " \
1790 "rcvd 0x%04x, calculated 0x%04x\n",
1791 __func__, (unsigned long)sector,
1792 be16_to_cpu(sdt->guard_tag),
1793 be16_to_cpu(csum));
1794 ret = 0x01;
1795 dump_sector(daddr, scsi_debug_sector_size);
1796 goto out;
1797 }
1798
1799 if (scsi_debug_dif != SD_DIF_TYPE3_PROTECTION &&
1800 be32_to_cpu(sdt->ref_tag)
1801 != (start_sec & 0xffffffff)) {
1802 printk(KERN_ERR
1803 "%s: REF check failed on sector %lu\n",
1804 __func__, (unsigned long)sector);
1805 ret = 0x03;
1806 dump_sector(daddr, scsi_debug_sector_size);
1807 goto out;
1808 }
1809
1810 /* Would be great to copy this in bigger
1811 * chunks. However, for the sake of
1812 * correctness we need to verify each sector
1813 * before writing it to "stable" storage
1814 */
1815 memcpy(dif_storep + dif_offset(sector), sdt, 8);
1816
1817 sector++;
1818
1819 if (sector == sdebug_store_sectors)
1820 sector = 0; /* Force wrap */
1821
1822 start_sec++;
1823 daddr += scsi_debug_sector_size;
1824 ppage_offset += sizeof(struct sd_dif_tuple);
1825 }
1826
1827 kunmap_atomic(daddr, KM_IRQ0);
1828 }
1829
1830 kunmap_atomic(paddr, KM_IRQ1);
1831
1832 dix_writes++;
1833
1834 return 0;
1835
1836out:
1837 dif_errors++;
1838 kunmap_atomic(daddr, KM_IRQ0);
1839 kunmap_atomic(paddr, KM_IRQ1);
1840 return ret;
1841}
1842
1572static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba, 1843static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
1573 unsigned int num, struct sdebug_dev_info *devip) 1844 unsigned int num, struct sdebug_dev_info *devip)
1574{ 1845{
@@ -1579,6 +1850,16 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
1579 if (ret) 1850 if (ret)
1580 return ret; 1851 return ret;
1581 1852
1853 /* DIX + T10 DIF */
1854 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
1855 int prot_ret = prot_verify_write(SCpnt, lba, num);
1856
1857 if (prot_ret) {
1858 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, prot_ret);
1859 return illegal_condition_result;
1860 }
1861 }
1862
1582 write_lock_irqsave(&atomic_rw, iflags); 1863 write_lock_irqsave(&atomic_rw, iflags);
1583 ret = do_device_access(SCpnt, devip, lba, num, 1); 1864 ret = do_device_access(SCpnt, devip, lba, num, 1);
1584 write_unlock_irqrestore(&atomic_rw, iflags); 1865 write_unlock_irqrestore(&atomic_rw, iflags);
@@ -2095,6 +2376,10 @@ module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
2095module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int, 2376module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
2096 S_IRUGO | S_IWUSR); 2377 S_IRUGO | S_IWUSR);
2097module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO); 2378module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
2379module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
2380module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
2381module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
2382module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
2098 2383
2099MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert"); 2384MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
2100MODULE_DESCRIPTION("SCSI debug adapter driver"); 2385MODULE_DESCRIPTION("SCSI debug adapter driver");
@@ -2117,7 +2402,10 @@ MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
2117MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)"); 2402MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
2118MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)"); 2403MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
2119MODULE_PARM_DESC(sector_size, "hardware sector size in bytes (def=512)"); 2404MODULE_PARM_DESC(sector_size, "hardware sector size in bytes (def=512)");
2120 2405MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
2406MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
2407MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
2408MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
2121 2409
2122static char sdebug_info[256]; 2410static char sdebug_info[256];
2123 2411
@@ -2164,14 +2452,14 @@ static int scsi_debug_proc_info(struct Scsi_Host *host, char *buffer, char **sta
2164 "delay=%d, max_luns=%d, scsi_level=%d\n" 2452 "delay=%d, max_luns=%d, scsi_level=%d\n"
2165 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n" 2453 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
2166 "number of aborts=%d, device_reset=%d, bus_resets=%d, " 2454 "number of aborts=%d, device_reset=%d, bus_resets=%d, "
2167 "host_resets=%d\n", 2455 "host_resets=%d\ndix_reads=%d dix_writes=%d dif_errors=%d\n",
2168 SCSI_DEBUG_VERSION, scsi_debug_version_date, scsi_debug_num_tgts, 2456 SCSI_DEBUG_VERSION, scsi_debug_version_date, scsi_debug_num_tgts,
2169 scsi_debug_dev_size_mb, scsi_debug_opts, scsi_debug_every_nth, 2457 scsi_debug_dev_size_mb, scsi_debug_opts, scsi_debug_every_nth,
2170 scsi_debug_cmnd_count, scsi_debug_delay, 2458 scsi_debug_cmnd_count, scsi_debug_delay,
2171 scsi_debug_max_luns, scsi_debug_scsi_level, 2459 scsi_debug_max_luns, scsi_debug_scsi_level,
2172 scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads, 2460 scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
2173 sdebug_sectors_per, num_aborts, num_dev_resets, num_bus_resets, 2461 sdebug_sectors_per, num_aborts, num_dev_resets, num_bus_resets,
2174 num_host_resets); 2462 num_host_resets, dix_reads, dix_writes, dif_errors);
2175 if (pos < offset) { 2463 if (pos < offset) {
2176 len = 0; 2464 len = 0;
2177 begin = pos; 2465 begin = pos;
@@ -2452,6 +2740,31 @@ static ssize_t sdebug_sector_size_show(struct device_driver * ddp, char * buf)
2452} 2740}
2453DRIVER_ATTR(sector_size, S_IRUGO, sdebug_sector_size_show, NULL); 2741DRIVER_ATTR(sector_size, S_IRUGO, sdebug_sector_size_show, NULL);
2454 2742
2743static ssize_t sdebug_dix_show(struct device_driver *ddp, char *buf)
2744{
2745 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
2746}
2747DRIVER_ATTR(dix, S_IRUGO, sdebug_dix_show, NULL);
2748
2749static ssize_t sdebug_dif_show(struct device_driver *ddp, char *buf)
2750{
2751 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
2752}
2753DRIVER_ATTR(dif, S_IRUGO, sdebug_dif_show, NULL);
2754
2755static ssize_t sdebug_guard_show(struct device_driver *ddp, char *buf)
2756{
2757 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_guard);
2758}
2759DRIVER_ATTR(guard, S_IRUGO, sdebug_guard_show, NULL);
2760
2761static ssize_t sdebug_ato_show(struct device_driver *ddp, char *buf)
2762{
2763 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
2764}
2765DRIVER_ATTR(ato, S_IRUGO, sdebug_ato_show, NULL);
2766
2767
2455/* Note: The following function creates attribute files in the 2768/* Note: The following function creates attribute files in the
2456 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these 2769 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
2457 files (over those found in the /sys/module/scsi_debug/parameters 2770 files (over those found in the /sys/module/scsi_debug/parameters
@@ -2478,11 +2791,19 @@ static int do_create_driverfs_files(void)
2478 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb); 2791 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
2479 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno); 2792 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
2480 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_sector_size); 2793 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
2794 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dix);
2795 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dif);
2796 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_guard);
2797 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ato);
2481 return ret; 2798 return ret;
2482} 2799}
2483 2800
2484static void do_remove_driverfs_files(void) 2801static void do_remove_driverfs_files(void)
2485{ 2802{
2803 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ato);
2804 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_guard);
2805 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dif);
2806 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dix);
2486 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_sector_size); 2807 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
2487 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno); 2808 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
2488 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb); 2809 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
@@ -2526,11 +2847,33 @@ static int __init scsi_debug_init(void)
2526 case 4096: 2847 case 4096:
2527 break; 2848 break;
2528 default: 2849 default:
2529 printk(KERN_ERR "scsi_debug_init: invalid sector_size %u\n", 2850 printk(KERN_ERR "scsi_debug_init: invalid sector_size %d\n",
2530 scsi_debug_sector_size); 2851 scsi_debug_sector_size);
2531 return -EINVAL; 2852 return -EINVAL;
2532 } 2853 }
2533 2854
2855 switch (scsi_debug_dif) {
2856
2857 case SD_DIF_TYPE0_PROTECTION:
2858 case SD_DIF_TYPE1_PROTECTION:
2859 case SD_DIF_TYPE3_PROTECTION:
2860 break;
2861
2862 default:
2863 printk(KERN_ERR "scsi_debug_init: dif must be 0, 1 or 3\n");
2864 return -EINVAL;
2865 }
2866
2867 if (scsi_debug_guard > 1) {
2868 printk(KERN_ERR "scsi_debug_init: guard must be 0 or 1\n");
2869 return -EINVAL;
2870 }
2871
2872 if (scsi_debug_ato > 1) {
2873 printk(KERN_ERR "scsi_debug_init: ato must be 0 or 1\n");
2874 return -EINVAL;
2875 }
2876
2534 if (scsi_debug_dev_size_mb < 1) 2877 if (scsi_debug_dev_size_mb < 1)
2535 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */ 2878 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
2536 sz = (unsigned long)scsi_debug_dev_size_mb * 1048576; 2879 sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
@@ -2563,6 +2906,24 @@ static int __init scsi_debug_init(void)
2563 if (scsi_debug_num_parts > 0) 2906 if (scsi_debug_num_parts > 0)
2564 sdebug_build_parts(fake_storep, sz); 2907 sdebug_build_parts(fake_storep, sz);
2565 2908
2909 if (scsi_debug_dif) {
2910 int dif_size;
2911
2912 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
2913 dif_storep = vmalloc(dif_size);
2914
2915 printk(KERN_ERR "scsi_debug_init: dif_storep %u bytes @ %p\n",
2916 dif_size, dif_storep);
2917
2918 if (dif_storep == NULL) {
2919 printk(KERN_ERR "scsi_debug_init: out of mem. (DIX)\n");
2920 ret = -ENOMEM;
2921 goto free_vm;
2922 }
2923
2924 memset(dif_storep, 0xff, dif_size);
2925 }
2926
2566 ret = device_register(&pseudo_primary); 2927 ret = device_register(&pseudo_primary);
2567 if (ret < 0) { 2928 if (ret < 0) {
2568 printk(KERN_WARNING "scsi_debug: device_register error: %d\n", 2929 printk(KERN_WARNING "scsi_debug: device_register error: %d\n",
@@ -2615,6 +2976,8 @@ bus_unreg:
2615dev_unreg: 2976dev_unreg:
2616 device_unregister(&pseudo_primary); 2977 device_unregister(&pseudo_primary);
2617free_vm: 2978free_vm:
2979 if (dif_storep)
2980 vfree(dif_storep);
2618 vfree(fake_storep); 2981 vfree(fake_storep);
2619 2982
2620 return ret; 2983 return ret;
@@ -2632,6 +2995,9 @@ static void __exit scsi_debug_exit(void)
2632 bus_unregister(&pseudo_lld_bus); 2995 bus_unregister(&pseudo_lld_bus);
2633 device_unregister(&pseudo_primary); 2996 device_unregister(&pseudo_primary);
2634 2997
2998 if (dif_storep)
2999 vfree(dif_storep);
3000
2635 vfree(fake_storep); 3001 vfree(fake_storep);
2636} 3002}
2637 3003
@@ -2732,6 +3098,8 @@ int scsi_debug_queuecommand(struct scsi_cmnd *SCpnt, done_funct_t done)
2732 struct sdebug_dev_info *devip = NULL; 3098 struct sdebug_dev_info *devip = NULL;
2733 int inj_recovered = 0; 3099 int inj_recovered = 0;
2734 int inj_transport = 0; 3100 int inj_transport = 0;
3101 int inj_dif = 0;
3102 int inj_dix = 0;
2735 int delay_override = 0; 3103 int delay_override = 0;
2736 3104
2737 scsi_set_resid(SCpnt, 0); 3105 scsi_set_resid(SCpnt, 0);
@@ -2769,6 +3137,10 @@ int scsi_debug_queuecommand(struct scsi_cmnd *SCpnt, done_funct_t done)
2769 inj_recovered = 1; /* to reads and writes below */ 3137 inj_recovered = 1; /* to reads and writes below */
2770 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts) 3138 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts)
2771 inj_transport = 1; /* to reads and writes below */ 3139 inj_transport = 1; /* to reads and writes below */
3140 else if (SCSI_DEBUG_OPT_DIF_ERR & scsi_debug_opts)
3141 inj_dif = 1; /* to reads and writes below */
3142 else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts)
3143 inj_dix = 1; /* to reads and writes below */
2772 } 3144 }
2773 3145
2774 if (devip->wlun) { 3146 if (devip->wlun) {
@@ -2870,6 +3242,12 @@ int scsi_debug_queuecommand(struct scsi_cmnd *SCpnt, done_funct_t done)
2870 mk_sense_buffer(devip, ABORTED_COMMAND, 3242 mk_sense_buffer(devip, ABORTED_COMMAND,
2871 TRANSPORT_PROBLEM, ACK_NAK_TO); 3243 TRANSPORT_PROBLEM, ACK_NAK_TO);
2872 errsts = check_condition_result; 3244 errsts = check_condition_result;
3245 } else if (inj_dif && (0 == errsts)) {
3246 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3247 errsts = illegal_condition_result;
3248 } else if (inj_dix && (0 == errsts)) {
3249 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3250 errsts = illegal_condition_result;
2873 } 3251 }
2874 break; 3252 break;
2875 case REPORT_LUNS: /* mandatory, ignore unit attention */ 3253 case REPORT_LUNS: /* mandatory, ignore unit attention */
@@ -2894,6 +3272,12 @@ int scsi_debug_queuecommand(struct scsi_cmnd *SCpnt, done_funct_t done)
2894 mk_sense_buffer(devip, RECOVERED_ERROR, 3272 mk_sense_buffer(devip, RECOVERED_ERROR,
2895 THRESHOLD_EXCEEDED, 0); 3273 THRESHOLD_EXCEEDED, 0);
2896 errsts = check_condition_result; 3274 errsts = check_condition_result;
3275 } else if (inj_dif && (0 == errsts)) {
3276 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3277 errsts = illegal_condition_result;
3278 } else if (inj_dix && (0 == errsts)) {
3279 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3280 errsts = illegal_condition_result;
2897 } 3281 }
2898 break; 3282 break;
2899 case MODE_SENSE: 3283 case MODE_SENSE:
@@ -2982,6 +3366,7 @@ static int sdebug_driver_probe(struct device * dev)
2982 int error = 0; 3366 int error = 0;
2983 struct sdebug_host_info *sdbg_host; 3367 struct sdebug_host_info *sdbg_host;
2984 struct Scsi_Host *hpnt; 3368 struct Scsi_Host *hpnt;
3369 int host_prot;
2985 3370
2986 sdbg_host = to_sdebug_host(dev); 3371 sdbg_host = to_sdebug_host(dev);
2987 3372
@@ -3000,6 +3385,50 @@ static int sdebug_driver_probe(struct device * dev)
3000 hpnt->max_id = scsi_debug_num_tgts; 3385 hpnt->max_id = scsi_debug_num_tgts;
3001 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS; /* = scsi_debug_max_luns; */ 3386 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS; /* = scsi_debug_max_luns; */
3002 3387
3388 host_prot = 0;
3389
3390 switch (scsi_debug_dif) {
3391
3392 case SD_DIF_TYPE1_PROTECTION:
3393 host_prot = SHOST_DIF_TYPE1_PROTECTION;
3394 if (scsi_debug_dix)
3395 host_prot |= SHOST_DIX_TYPE1_PROTECTION;
3396 break;
3397
3398 case SD_DIF_TYPE2_PROTECTION:
3399 host_prot = SHOST_DIF_TYPE2_PROTECTION;
3400 if (scsi_debug_dix)
3401 host_prot |= SHOST_DIX_TYPE2_PROTECTION;
3402 break;
3403
3404 case SD_DIF_TYPE3_PROTECTION:
3405 host_prot = SHOST_DIF_TYPE3_PROTECTION;
3406 if (scsi_debug_dix)
3407 host_prot |= SHOST_DIX_TYPE3_PROTECTION;
3408 break;
3409
3410 default:
3411 if (scsi_debug_dix)
3412 host_prot |= SHOST_DIX_TYPE0_PROTECTION;
3413 break;
3414 }
3415
3416 scsi_host_set_prot(hpnt, host_prot);
3417
3418 printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n",
3419 (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
3420 (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
3421 (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
3422 (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
3423 (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
3424 (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
3425 (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
3426
3427 if (scsi_debug_guard == 1)
3428 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
3429 else
3430 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
3431
3003 error = scsi_add_host(hpnt, &sdbg_host->dev); 3432 error = scsi_add_host(hpnt, &sdbg_host->dev);
3004 if (error) { 3433 if (error) {
3005 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__); 3434 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index ad6a1370761e..0c2c73be1974 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1441,6 +1441,11 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
1441 } 1441 }
1442} 1442}
1443 1443
1444static void eh_lock_door_done(struct request *req, int uptodate)
1445{
1446 __blk_put_request(req->q, req);
1447}
1448
1444/** 1449/**
1445 * scsi_eh_lock_door - Prevent medium removal for the specified device 1450 * scsi_eh_lock_door - Prevent medium removal for the specified device
1446 * @sdev: SCSI device to prevent medium removal 1451 * @sdev: SCSI device to prevent medium removal
@@ -1463,19 +1468,28 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
1463 */ 1468 */
1464static void scsi_eh_lock_door(struct scsi_device *sdev) 1469static void scsi_eh_lock_door(struct scsi_device *sdev)
1465{ 1470{
1466 unsigned char cmnd[MAX_COMMAND_SIZE]; 1471 struct request *req;
1467 1472
1468 cmnd[0] = ALLOW_MEDIUM_REMOVAL; 1473 req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL);
1469 cmnd[1] = 0; 1474 if (!req)
1470 cmnd[2] = 0; 1475 return;
1471 cmnd[3] = 0;
1472 cmnd[4] = SCSI_REMOVAL_PREVENT;
1473 cmnd[5] = 0;
1474 1476
1475 scsi_execute_async(sdev, cmnd, 6, DMA_NONE, NULL, 0, 0, 10 * HZ, 1477 req->cmd[0] = ALLOW_MEDIUM_REMOVAL;
1476 5, NULL, NULL, GFP_KERNEL); 1478 req->cmd[1] = 0;
1477} 1479 req->cmd[2] = 0;
1480 req->cmd[3] = 0;
1481 req->cmd[4] = SCSI_REMOVAL_PREVENT;
1482 req->cmd[5] = 0;
1478 1483
1484 req->cmd_len = COMMAND_SIZE(req->cmd[0]);
1485
1486 req->cmd_type = REQ_TYPE_BLOCK_PC;
1487 req->cmd_flags |= REQ_QUIET;
1488 req->timeout = 10 * HZ;
1489 req->retries = 5;
1490
1491 blk_execute_rq_nowait(req->q, NULL, req, 1, eh_lock_door_done);
1492}
1479 1493
1480/** 1494/**
1481 * scsi_restart_operations - restart io operations to the specified host. 1495 * scsi_restart_operations - restart io operations to the specified host.
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index b82ffd90632e..4b13e36d3aa0 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -277,196 +277,6 @@ int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
277} 277}
278EXPORT_SYMBOL(scsi_execute_req); 278EXPORT_SYMBOL(scsi_execute_req);
279 279
280struct scsi_io_context {
281 void *data;
282 void (*done)(void *data, char *sense, int result, int resid);
283 char sense[SCSI_SENSE_BUFFERSIZE];
284};
285
286static struct kmem_cache *scsi_io_context_cache;
287
288static void scsi_end_async(struct request *req, int uptodate)
289{
290 struct scsi_io_context *sioc = req->end_io_data;
291
292 if (sioc->done)
293 sioc->done(sioc->data, sioc->sense, req->errors, req->data_len);
294
295 kmem_cache_free(scsi_io_context_cache, sioc);
296 __blk_put_request(req->q, req);
297}
298
299static int scsi_merge_bio(struct request *rq, struct bio *bio)
300{
301 struct request_queue *q = rq->q;
302
303 bio->bi_flags &= ~(1 << BIO_SEG_VALID);
304 if (rq_data_dir(rq) == WRITE)
305 bio->bi_rw |= (1 << BIO_RW);
306 blk_queue_bounce(q, &bio);
307
308 return blk_rq_append_bio(q, rq, bio);
309}
310
311static void scsi_bi_endio(struct bio *bio, int error)
312{
313 bio_put(bio);
314}
315
316/**
317 * scsi_req_map_sg - map a scatterlist into a request
318 * @rq: request to fill
319 * @sgl: scatterlist
320 * @nsegs: number of elements
321 * @bufflen: len of buffer
322 * @gfp: memory allocation flags
323 *
324 * scsi_req_map_sg maps a scatterlist into a request so that the
325 * request can be sent to the block layer. We do not trust the scatterlist
326 * sent to use, as some ULDs use that struct to only organize the pages.
327 */
328static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl,
329 int nsegs, unsigned bufflen, gfp_t gfp)
330{
331 struct request_queue *q = rq->q;
332 int nr_pages = (bufflen + sgl[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
333 unsigned int data_len = bufflen, len, bytes, off;
334 struct scatterlist *sg;
335 struct page *page;
336 struct bio *bio = NULL;
337 int i, err, nr_vecs = 0;
338
339 for_each_sg(sgl, sg, nsegs, i) {
340 page = sg_page(sg);
341 off = sg->offset;
342 len = sg->length;
343
344 while (len > 0 && data_len > 0) {
345 /*
346 * sg sends a scatterlist that is larger than
347 * the data_len it wants transferred for certain
348 * IO sizes
349 */
350 bytes = min_t(unsigned int, len, PAGE_SIZE - off);
351 bytes = min(bytes, data_len);
352
353 if (!bio) {
354 nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
355 nr_pages -= nr_vecs;
356
357 bio = bio_alloc(gfp, nr_vecs);
358 if (!bio) {
359 err = -ENOMEM;
360 goto free_bios;
361 }
362 bio->bi_end_io = scsi_bi_endio;
363 }
364
365 if (bio_add_pc_page(q, bio, page, bytes, off) !=
366 bytes) {
367 bio_put(bio);
368 err = -EINVAL;
369 goto free_bios;
370 }
371
372 if (bio->bi_vcnt >= nr_vecs) {
373 err = scsi_merge_bio(rq, bio);
374 if (err) {
375 bio_endio(bio, 0);
376 goto free_bios;
377 }
378 bio = NULL;
379 }
380
381 page++;
382 len -= bytes;
383 data_len -=bytes;
384 off = 0;
385 }
386 }
387
388 rq->buffer = rq->data = NULL;
389 rq->data_len = bufflen;
390 return 0;
391
392free_bios:
393 while ((bio = rq->bio) != NULL) {
394 rq->bio = bio->bi_next;
395 /*
396 * call endio instead of bio_put incase it was bounced
397 */
398 bio_endio(bio, 0);
399 }
400
401 return err;
402}
403
404/**
405 * scsi_execute_async - insert request
406 * @sdev: scsi device
407 * @cmd: scsi command
408 * @cmd_len: length of scsi cdb
409 * @data_direction: DMA_TO_DEVICE, DMA_FROM_DEVICE, or DMA_NONE
410 * @buffer: data buffer (this can be a kernel buffer or scatterlist)
411 * @bufflen: len of buffer
412 * @use_sg: if buffer is a scatterlist this is the number of elements
413 * @timeout: request timeout in seconds
414 * @retries: number of times to retry request
415 * @privdata: data passed to done()
416 * @done: callback function when done
417 * @gfp: memory allocation flags
418 */
419int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
420 int cmd_len, int data_direction, void *buffer, unsigned bufflen,
421 int use_sg, int timeout, int retries, void *privdata,
422 void (*done)(void *, char *, int, int), gfp_t gfp)
423{
424 struct request *req;
425 struct scsi_io_context *sioc;
426 int err = 0;
427 int write = (data_direction == DMA_TO_DEVICE);
428
429 sioc = kmem_cache_zalloc(scsi_io_context_cache, gfp);
430 if (!sioc)
431 return DRIVER_ERROR << 24;
432
433 req = blk_get_request(sdev->request_queue, write, gfp);
434 if (!req)
435 goto free_sense;
436 req->cmd_type = REQ_TYPE_BLOCK_PC;
437 req->cmd_flags |= REQ_QUIET;
438
439 if (use_sg)
440 err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp);
441 else if (bufflen)
442 err = blk_rq_map_kern(req->q, req, buffer, bufflen, gfp);
443
444 if (err)
445 goto free_req;
446
447 req->cmd_len = cmd_len;
448 memset(req->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
449 memcpy(req->cmd, cmd, req->cmd_len);
450 req->sense = sioc->sense;
451 req->sense_len = 0;
452 req->timeout = timeout;
453 req->retries = retries;
454 req->end_io_data = sioc;
455
456 sioc->data = privdata;
457 sioc->done = done;
458
459 blk_execute_rq_nowait(req->q, NULL, req, 1, scsi_end_async);
460 return 0;
461
462free_req:
463 blk_put_request(req);
464free_sense:
465 kmem_cache_free(scsi_io_context_cache, sioc);
466 return DRIVER_ERROR << 24;
467}
468EXPORT_SYMBOL_GPL(scsi_execute_async);
469
470/* 280/*
471 * Function: scsi_init_cmd_errh() 281 * Function: scsi_init_cmd_errh()
472 * 282 *
@@ -1920,20 +1730,12 @@ int __init scsi_init_queue(void)
1920{ 1730{
1921 int i; 1731 int i;
1922 1732
1923 scsi_io_context_cache = kmem_cache_create("scsi_io_context",
1924 sizeof(struct scsi_io_context),
1925 0, 0, NULL);
1926 if (!scsi_io_context_cache) {
1927 printk(KERN_ERR "SCSI: can't init scsi io context cache\n");
1928 return -ENOMEM;
1929 }
1930
1931 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer", 1733 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
1932 sizeof(struct scsi_data_buffer), 1734 sizeof(struct scsi_data_buffer),
1933 0, 0, NULL); 1735 0, 0, NULL);
1934 if (!scsi_sdb_cache) { 1736 if (!scsi_sdb_cache) {
1935 printk(KERN_ERR "SCSI: can't init scsi sdb cache\n"); 1737 printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
1936 goto cleanup_io_context; 1738 return -ENOMEM;
1937 } 1739 }
1938 1740
1939 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1741 for (i = 0; i < SG_MEMPOOL_NR; i++) {
@@ -1968,8 +1770,6 @@ cleanup_sdb:
1968 kmem_cache_destroy(sgp->slab); 1770 kmem_cache_destroy(sgp->slab);
1969 } 1771 }
1970 kmem_cache_destroy(scsi_sdb_cache); 1772 kmem_cache_destroy(scsi_sdb_cache);
1971cleanup_io_context:
1972 kmem_cache_destroy(scsi_io_context_cache);
1973 1773
1974 return -ENOMEM; 1774 return -ENOMEM;
1975} 1775}
@@ -1978,7 +1778,6 @@ void scsi_exit_queue(void)
1978{ 1778{
1979 int i; 1779 int i;
1980 1780
1981 kmem_cache_destroy(scsi_io_context_cache);
1982 kmem_cache_destroy(scsi_sdb_cache); 1781 kmem_cache_destroy(scsi_sdb_cache);
1983 1782
1984 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1783 for (i = 0; i < SG_MEMPOOL_NR; i++) {
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 8f4de20c9deb..a14d245a66b8 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -797,6 +797,7 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
797 case TYPE_ENCLOSURE: 797 case TYPE_ENCLOSURE:
798 case TYPE_COMM: 798 case TYPE_COMM:
799 case TYPE_RAID: 799 case TYPE_RAID:
800 case TYPE_OSD:
800 sdev->writeable = 1; 801 sdev->writeable = 1;
801 break; 802 break;
802 case TYPE_ROM: 803 case TYPE_ROM:
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index da63802cbf9d..fa4711d12744 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1043,7 +1043,6 @@ EXPORT_SYMBOL(scsi_register_interface);
1043/** 1043/**
1044 * scsi_sysfs_add_host - add scsi host to subsystem 1044 * scsi_sysfs_add_host - add scsi host to subsystem
1045 * @shost: scsi host struct to add to subsystem 1045 * @shost: scsi host struct to add to subsystem
1046 * @dev: parent struct device pointer
1047 **/ 1046 **/
1048int scsi_sysfs_add_host(struct Scsi_Host *shost) 1047int scsi_sysfs_add_host(struct Scsi_Host *shost)
1049{ 1048{
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 3ee4eb40abcf..a152f89ae51c 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -95,7 +95,7 @@ static struct {
95 { FC_PORTTYPE_NPORT, "NPort (fabric via point-to-point)" }, 95 { FC_PORTTYPE_NPORT, "NPort (fabric via point-to-point)" },
96 { FC_PORTTYPE_NLPORT, "NLPort (fabric via loop)" }, 96 { FC_PORTTYPE_NLPORT, "NLPort (fabric via loop)" },
97 { FC_PORTTYPE_LPORT, "LPort (private loop)" }, 97 { FC_PORTTYPE_LPORT, "LPort (private loop)" },
98 { FC_PORTTYPE_PTP, "Point-To-Point (direct nport connection" }, 98 { FC_PORTTYPE_PTP, "Point-To-Point (direct nport connection)" },
99 { FC_PORTTYPE_NPIV, "NPIV VPORT" }, 99 { FC_PORTTYPE_NPIV, "NPIV VPORT" },
100}; 100};
101fc_enum_name_search(port_type, fc_port_type, fc_port_type_names) 101fc_enum_name_search(port_type, fc_port_type, fc_port_type_names)
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 2adfab8c11c1..094795455293 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -246,30 +246,13 @@ static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
246 memset(ihost, 0, sizeof(*ihost)); 246 memset(ihost, 0, sizeof(*ihost));
247 atomic_set(&ihost->nr_scans, 0); 247 atomic_set(&ihost->nr_scans, 0);
248 mutex_init(&ihost->mutex); 248 mutex_init(&ihost->mutex);
249
250 snprintf(ihost->scan_workq_name, sizeof(ihost->scan_workq_name),
251 "iscsi_scan_%d", shost->host_no);
252 ihost->scan_workq = create_singlethread_workqueue(
253 ihost->scan_workq_name);
254 if (!ihost->scan_workq)
255 return -ENOMEM;
256 return 0;
257}
258
259static int iscsi_remove_host(struct transport_container *tc, struct device *dev,
260 struct device *cdev)
261{
262 struct Scsi_Host *shost = dev_to_shost(dev);
263 struct iscsi_cls_host *ihost = shost->shost_data;
264
265 destroy_workqueue(ihost->scan_workq);
266 return 0; 249 return 0;
267} 250}
268 251
269static DECLARE_TRANSPORT_CLASS(iscsi_host_class, 252static DECLARE_TRANSPORT_CLASS(iscsi_host_class,
270 "iscsi_host", 253 "iscsi_host",
271 iscsi_setup_host, 254 iscsi_setup_host,
272 iscsi_remove_host, 255 NULL,
273 NULL); 256 NULL);
274 257
275static DECLARE_TRANSPORT_CLASS(iscsi_session_class, 258static DECLARE_TRANSPORT_CLASS(iscsi_session_class,
@@ -568,7 +551,7 @@ static void __iscsi_unblock_session(struct work_struct *work)
568 * scanning from userspace). 551 * scanning from userspace).
569 */ 552 */
570 if (shost->hostt->scan_finished) { 553 if (shost->hostt->scan_finished) {
571 if (queue_work(ihost->scan_workq, &session->scan_work)) 554 if (scsi_queue_work(shost, &session->scan_work))
572 atomic_inc(&ihost->nr_scans); 555 atomic_inc(&ihost->nr_scans);
573 } 556 }
574} 557}
@@ -636,14 +619,6 @@ static void __iscsi_unbind_session(struct work_struct *work)
636 iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION); 619 iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION);
637} 620}
638 621
639static int iscsi_unbind_session(struct iscsi_cls_session *session)
640{
641 struct Scsi_Host *shost = iscsi_session_to_shost(session);
642 struct iscsi_cls_host *ihost = shost->shost_data;
643
644 return queue_work(ihost->scan_workq, &session->unbind_work);
645}
646
647struct iscsi_cls_session * 622struct iscsi_cls_session *
648iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport, 623iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
649 int dd_size) 624 int dd_size)
@@ -796,7 +771,6 @@ static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data)
796void iscsi_remove_session(struct iscsi_cls_session *session) 771void iscsi_remove_session(struct iscsi_cls_session *session)
797{ 772{
798 struct Scsi_Host *shost = iscsi_session_to_shost(session); 773 struct Scsi_Host *shost = iscsi_session_to_shost(session);
799 struct iscsi_cls_host *ihost = shost->shost_data;
800 unsigned long flags; 774 unsigned long flags;
801 int err; 775 int err;
802 776
@@ -821,7 +795,7 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
821 795
822 scsi_target_unblock(&session->dev); 796 scsi_target_unblock(&session->dev);
823 /* flush running scans then delete devices */ 797 /* flush running scans then delete devices */
824 flush_workqueue(ihost->scan_workq); 798 scsi_flush_work(shost);
825 __iscsi_unbind_session(&session->unbind_work); 799 __iscsi_unbind_session(&session->unbind_work);
826 800
827 /* hw iscsi may not have removed all connections from session */ 801 /* hw iscsi may not have removed all connections from session */
@@ -1215,14 +1189,15 @@ iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep,
1215{ 1189{
1216 struct iscsi_transport *transport = priv->iscsi_transport; 1190 struct iscsi_transport *transport = priv->iscsi_transport;
1217 struct iscsi_cls_session *session; 1191 struct iscsi_cls_session *session;
1218 uint32_t host_no; 1192 struct Scsi_Host *shost;
1219 1193
1220 session = transport->create_session(ep, cmds_max, queue_depth, 1194 session = transport->create_session(ep, cmds_max, queue_depth,
1221 initial_cmdsn, &host_no); 1195 initial_cmdsn);
1222 if (!session) 1196 if (!session)
1223 return -ENOMEM; 1197 return -ENOMEM;
1224 1198
1225 ev->r.c_session_ret.host_no = host_no; 1199 shost = iscsi_session_to_shost(session);
1200 ev->r.c_session_ret.host_no = shost->host_no;
1226 ev->r.c_session_ret.sid = session->sid; 1201 ev->r.c_session_ret.sid = session->sid;
1227 return 0; 1202 return 0;
1228} 1203}
@@ -1439,7 +1414,8 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1439 case ISCSI_UEVENT_UNBIND_SESSION: 1414 case ISCSI_UEVENT_UNBIND_SESSION:
1440 session = iscsi_session_lookup(ev->u.d_session.sid); 1415 session = iscsi_session_lookup(ev->u.d_session.sid);
1441 if (session) 1416 if (session)
1442 iscsi_unbind_session(session); 1417 scsi_queue_work(iscsi_session_to_shost(session),
1418 &session->unbind_work);
1443 else 1419 else
1444 err = -EINVAL; 1420 err = -EINVAL;
1445 break; 1421 break;
@@ -1801,8 +1777,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
1801 priv->daemon_pid = -1; 1777 priv->daemon_pid = -1;
1802 priv->iscsi_transport = tt; 1778 priv->iscsi_transport = tt;
1803 priv->t.user_scan = iscsi_user_scan; 1779 priv->t.user_scan = iscsi_user_scan;
1804 if (!(tt->caps & CAP_DATA_PATH_OFFLOAD)) 1780 priv->t.create_work_queue = 1;
1805 priv->t.create_work_queue = 1;
1806 1781
1807 priv->dev.class = &iscsi_transport_class; 1782 priv->dev.class = &iscsi_transport_class;
1808 dev_set_name(&priv->dev, "%s", tt->name); 1783 dev_set_name(&priv->dev, "%s", tt->name);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 4970ae4a62d6..aeab5d9dff27 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1273,42 +1273,126 @@ disable:
1273 sdkp->capacity = 0; 1273 sdkp->capacity = 0;
1274} 1274}
1275 1275
1276/* 1276static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
1277 * read disk capacity 1277 struct scsi_sense_hdr *sshdr, int sense_valid,
1278 */ 1278 int the_result)
1279static void 1279{
1280sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer) 1280 sd_print_result(sdkp, the_result);
1281 if (driver_byte(the_result) & DRIVER_SENSE)
1282 sd_print_sense_hdr(sdkp, sshdr);
1283 else
1284 sd_printk(KERN_NOTICE, sdkp, "Sense not available.\n");
1285
1286 /*
1287 * Set dirty bit for removable devices if not ready -
1288 * sometimes drives will not report this properly.
1289 */
1290 if (sdp->removable &&
1291 sense_valid && sshdr->sense_key == NOT_READY)
1292 sdp->changed = 1;
1293
1294 /*
1295 * We used to set media_present to 0 here to indicate no media
1296 * in the drive, but some drives fail read capacity even with
1297 * media present, so we can't do that.
1298 */
1299 sdkp->capacity = 0; /* unknown mapped to zero - as usual */
1300}
1301
1302#define RC16_LEN 32
1303#if RC16_LEN > SD_BUF_SIZE
1304#error RC16_LEN must not be more than SD_BUF_SIZE
1305#endif
1306
1307static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
1308 unsigned char *buffer)
1281{ 1309{
1282 unsigned char cmd[16]; 1310 unsigned char cmd[16];
1283 int the_result, retries;
1284 int sector_size = 0;
1285 /* Force READ CAPACITY(16) when PROTECT=1 */
1286 int longrc = scsi_device_protection(sdkp->device) ? 1 : 0;
1287 struct scsi_sense_hdr sshdr; 1311 struct scsi_sense_hdr sshdr;
1288 int sense_valid = 0; 1312 int sense_valid = 0;
1289 struct scsi_device *sdp = sdkp->device; 1313 int the_result;
1314 int retries = 3;
1315 unsigned long long lba;
1316 unsigned sector_size;
1290 1317
1291repeat:
1292 retries = 3;
1293 do { 1318 do {
1294 if (longrc) { 1319 memset(cmd, 0, 16);
1295 memset((void *) cmd, 0, 16); 1320 cmd[0] = SERVICE_ACTION_IN;
1296 cmd[0] = SERVICE_ACTION_IN; 1321 cmd[1] = SAI_READ_CAPACITY_16;
1297 cmd[1] = SAI_READ_CAPACITY_16; 1322 cmd[13] = RC16_LEN;
1298 cmd[13] = 13; 1323 memset(buffer, 0, RC16_LEN);
1299 memset((void *) buffer, 0, 13); 1324
1300 } else { 1325 the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
1301 cmd[0] = READ_CAPACITY; 1326 buffer, RC16_LEN, &sshdr,
1302 memset((void *) &cmd[1], 0, 9); 1327 SD_TIMEOUT, SD_MAX_RETRIES, NULL);
1303 memset((void *) buffer, 0, 8); 1328
1329 if (media_not_present(sdkp, &sshdr))
1330 return -ENODEV;
1331
1332 if (the_result) {
1333 sense_valid = scsi_sense_valid(&sshdr);
1334 if (sense_valid &&
1335 sshdr.sense_key == ILLEGAL_REQUEST &&
1336 (sshdr.asc == 0x20 || sshdr.asc == 0x24) &&
1337 sshdr.ascq == 0x00)
1338 /* Invalid Command Operation Code or
1339 * Invalid Field in CDB, just retry
1340 * silently with RC10 */
1341 return -EINVAL;
1304 } 1342 }
1305 1343 retries--;
1344
1345 } while (the_result && retries);
1346
1347 if (the_result) {
1348 sd_printk(KERN_NOTICE, sdkp, "READ CAPACITY(16) failed\n");
1349 read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result);
1350 return -EINVAL;
1351 }
1352
1353 sector_size = (buffer[8] << 24) | (buffer[9] << 16) |
1354 (buffer[10] << 8) | buffer[11];
1355 lba = (((u64)buffer[0] << 56) | ((u64)buffer[1] << 48) |
1356 ((u64)buffer[2] << 40) | ((u64)buffer[3] << 32) |
1357 ((u64)buffer[4] << 24) | ((u64)buffer[5] << 16) |
1358 ((u64)buffer[6] << 8) | (u64)buffer[7]);
1359
1360 sd_read_protection_type(sdkp, buffer);
1361
1362 if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) {
1363 sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
1364 "kernel compiled with support for large block "
1365 "devices.\n");
1366 sdkp->capacity = 0;
1367 return -EOVERFLOW;
1368 }
1369
1370 sdkp->capacity = lba + 1;
1371 return sector_size;
1372}
1373
1374static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
1375 unsigned char *buffer)
1376{
1377 unsigned char cmd[16];
1378 struct scsi_sense_hdr sshdr;
1379 int sense_valid = 0;
1380 int the_result;
1381 int retries = 3;
1382 sector_t lba;
1383 unsigned sector_size;
1384
1385 do {
1386 cmd[0] = READ_CAPACITY;
1387 memset(&cmd[1], 0, 9);
1388 memset(buffer, 0, 8);
1389
1306 the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE, 1390 the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
1307 buffer, longrc ? 13 : 8, &sshdr, 1391 buffer, 8, &sshdr,
1308 SD_TIMEOUT, SD_MAX_RETRIES, NULL); 1392 SD_TIMEOUT, SD_MAX_RETRIES, NULL);
1309 1393
1310 if (media_not_present(sdkp, &sshdr)) 1394 if (media_not_present(sdkp, &sshdr))
1311 return; 1395 return -ENODEV;
1312 1396
1313 if (the_result) 1397 if (the_result)
1314 sense_valid = scsi_sense_valid(&sshdr); 1398 sense_valid = scsi_sense_valid(&sshdr);
@@ -1316,85 +1400,96 @@ repeat:
1316 1400
1317 } while (the_result && retries); 1401 } while (the_result && retries);
1318 1402
1319 if (the_result && !longrc) { 1403 if (the_result) {
1320 sd_printk(KERN_NOTICE, sdkp, "READ CAPACITY failed\n"); 1404 sd_printk(KERN_NOTICE, sdkp, "READ CAPACITY failed\n");
1321 sd_print_result(sdkp, the_result); 1405 read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result);
1322 if (driver_byte(the_result) & DRIVER_SENSE) 1406 return -EINVAL;
1323 sd_print_sense_hdr(sdkp, &sshdr); 1407 }
1324 else
1325 sd_printk(KERN_NOTICE, sdkp, "Sense not available.\n");
1326 1408
1327 /* Set dirty bit for removable devices if not ready - 1409 sector_size = (buffer[4] << 24) | (buffer[5] << 16) |
1328 * sometimes drives will not report this properly. */ 1410 (buffer[6] << 8) | buffer[7];
1329 if (sdp->removable && 1411 lba = (buffer[0] << 24) | (buffer[1] << 16) |
1330 sense_valid && sshdr.sense_key == NOT_READY) 1412 (buffer[2] << 8) | buffer[3];
1331 sdp->changed = 1;
1332 1413
1333 /* Either no media are present but the drive didn't tell us, 1414 if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) {
1334 or they are present but the read capacity command fails */ 1415 sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
1335 /* sdkp->media_present = 0; -- not always correct */ 1416 "kernel compiled with support for large block "
1336 sdkp->capacity = 0; /* unknown mapped to zero - as usual */ 1417 "devices.\n");
1418 sdkp->capacity = 0;
1419 return -EOVERFLOW;
1420 }
1337 1421
1338 return; 1422 sdkp->capacity = lba + 1;
1339 } else if (the_result && longrc) { 1423 return sector_size;
1340 /* READ CAPACITY(16) has been failed */ 1424}
1341 sd_printk(KERN_NOTICE, sdkp, "READ CAPACITY(16) failed\n");
1342 sd_print_result(sdkp, the_result);
1343 sd_printk(KERN_NOTICE, sdkp, "Use 0xffffffff as device size\n");
1344 1425
1345 sdkp->capacity = 1 + (sector_t) 0xffffffff; 1426static int sd_try_rc16_first(struct scsi_device *sdp)
1346 goto got_data; 1427{
1347 } 1428 if (sdp->scsi_level > SCSI_SPC_2)
1348 1429 return 1;
1349 if (!longrc) { 1430 if (scsi_device_protection(sdp))
1350 sector_size = (buffer[4] << 24) | 1431 return 1;
1351 (buffer[5] << 16) | (buffer[6] << 8) | buffer[7]; 1432 return 0;
1352 if (buffer[0] == 0xff && buffer[1] == 0xff && 1433}
1353 buffer[2] == 0xff && buffer[3] == 0xff) { 1434
1354 if(sizeof(sdkp->capacity) > 4) { 1435/*
1355 sd_printk(KERN_NOTICE, sdkp, "Very big device. " 1436 * read disk capacity
1356 "Trying to use READ CAPACITY(16).\n"); 1437 */
1357 longrc = 1; 1438static void
1358 goto repeat; 1439sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer)
1359 } 1440{
1360 sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use " 1441 int sector_size;
1361 "a kernel compiled with support for large " 1442 struct scsi_device *sdp = sdkp->device;
1362 "block devices.\n"); 1443 sector_t old_capacity = sdkp->capacity;
1363 sdkp->capacity = 0; 1444
1445 if (sd_try_rc16_first(sdp)) {
1446 sector_size = read_capacity_16(sdkp, sdp, buffer);
1447 if (sector_size == -EOVERFLOW)
1364 goto got_data; 1448 goto got_data;
1365 } 1449 if (sector_size == -ENODEV)
1366 sdkp->capacity = 1 + (((sector_t)buffer[0] << 24) | 1450 return;
1367 (buffer[1] << 16) | 1451 if (sector_size < 0)
1368 (buffer[2] << 8) | 1452 sector_size = read_capacity_10(sdkp, sdp, buffer);
1369 buffer[3]); 1453 if (sector_size < 0)
1454 return;
1370 } else { 1455 } else {
1371 sdkp->capacity = 1 + (((u64)buffer[0] << 56) | 1456 sector_size = read_capacity_10(sdkp, sdp, buffer);
1372 ((u64)buffer[1] << 48) | 1457 if (sector_size == -EOVERFLOW)
1373 ((u64)buffer[2] << 40) | 1458 goto got_data;
1374 ((u64)buffer[3] << 32) | 1459 if (sector_size < 0)
1375 ((sector_t)buffer[4] << 24) | 1460 return;
1376 ((sector_t)buffer[5] << 16) | 1461 if ((sizeof(sdkp->capacity) > 4) &&
1377 ((sector_t)buffer[6] << 8) | 1462 (sdkp->capacity > 0xffffffffULL)) {
1378 (sector_t)buffer[7]); 1463 int old_sector_size = sector_size;
1379 1464 sd_printk(KERN_NOTICE, sdkp, "Very big device. "
1380 sector_size = (buffer[8] << 24) | 1465 "Trying to use READ CAPACITY(16).\n");
1381 (buffer[9] << 16) | (buffer[10] << 8) | buffer[11]; 1466 sector_size = read_capacity_16(sdkp, sdp, buffer);
1382 1467 if (sector_size < 0) {
1383 sd_read_protection_type(sdkp, buffer); 1468 sd_printk(KERN_NOTICE, sdkp,
1384 } 1469 "Using 0xffffffff as device size\n");
1385 1470 sdkp->capacity = 1 + (sector_t) 0xffffffff;
1386 /* Some devices return the total number of sectors, not the 1471 sector_size = old_sector_size;
1387 * highest sector number. Make the necessary adjustment. */ 1472 goto got_data;
1388 if (sdp->fix_capacity) { 1473 }
1389 --sdkp->capacity; 1474 }
1475 }
1390 1476
1391 /* Some devices have version which report the correct sizes 1477 /* Some devices are known to return the total number of blocks,
1392 * and others which do not. We guess size according to a heuristic 1478 * not the highest block number. Some devices have versions
1393 * and err on the side of lowering the capacity. */ 1479 * which do this and others which do not. Some devices we might
1394 } else { 1480 * suspect of doing this but we don't know for certain.
1395 if (sdp->guess_capacity) 1481 *
1396 if (sdkp->capacity & 0x01) /* odd sizes are odd */ 1482 * If we know the reported capacity is wrong, decrement it. If
1397 --sdkp->capacity; 1483 * we can only guess, then assume the number of blocks is even
1484 * (usually true but not always) and err on the side of lowering
1485 * the capacity.
1486 */
1487 if (sdp->fix_capacity ||
1488 (sdp->guess_capacity && (sdkp->capacity & 0x01))) {
1489 sd_printk(KERN_INFO, sdkp, "Adjusting the sector count "
1490 "from its reported value: %llu\n",
1491 (unsigned long long) sdkp->capacity);
1492 --sdkp->capacity;
1398 } 1493 }
1399 1494
1400got_data: 1495got_data:
@@ -1437,10 +1532,11 @@ got_data:
1437 string_get_size(sz, STRING_UNITS_10, cap_str_10, 1532 string_get_size(sz, STRING_UNITS_10, cap_str_10,
1438 sizeof(cap_str_10)); 1533 sizeof(cap_str_10));
1439 1534
1440 sd_printk(KERN_NOTICE, sdkp, 1535 if (sdkp->first_scan || old_capacity != sdkp->capacity)
1441 "%llu %d-byte hardware sectors: (%s/%s)\n", 1536 sd_printk(KERN_NOTICE, sdkp,
1442 (unsigned long long)sdkp->capacity, 1537 "%llu %d-byte hardware sectors: (%s/%s)\n",
1443 sector_size, cap_str_10, cap_str_2); 1538 (unsigned long long)sdkp->capacity,
1539 sector_size, cap_str_10, cap_str_2);
1444 } 1540 }
1445 1541
1446 /* Rescale capacity to 512-byte units */ 1542 /* Rescale capacity to 512-byte units */
@@ -1477,6 +1573,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
1477 int res; 1573 int res;
1478 struct scsi_device *sdp = sdkp->device; 1574 struct scsi_device *sdp = sdkp->device;
1479 struct scsi_mode_data data; 1575 struct scsi_mode_data data;
1576 int old_wp = sdkp->write_prot;
1480 1577
1481 set_disk_ro(sdkp->disk, 0); 1578 set_disk_ro(sdkp->disk, 0);
1482 if (sdp->skip_ms_page_3f) { 1579 if (sdp->skip_ms_page_3f) {
@@ -1517,11 +1614,13 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
1517 } else { 1614 } else {
1518 sdkp->write_prot = ((data.device_specific & 0x80) != 0); 1615 sdkp->write_prot = ((data.device_specific & 0x80) != 0);
1519 set_disk_ro(sdkp->disk, sdkp->write_prot); 1616 set_disk_ro(sdkp->disk, sdkp->write_prot);
1520 sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n", 1617 if (sdkp->first_scan || old_wp != sdkp->write_prot) {
1521 sdkp->write_prot ? "on" : "off"); 1618 sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
1522 sd_printk(KERN_DEBUG, sdkp, 1619 sdkp->write_prot ? "on" : "off");
1523 "Mode Sense: %02x %02x %02x %02x\n", 1620 sd_printk(KERN_DEBUG, sdkp,
1524 buffer[0], buffer[1], buffer[2], buffer[3]); 1621 "Mode Sense: %02x %02x %02x %02x\n",
1622 buffer[0], buffer[1], buffer[2], buffer[3]);
1623 }
1525 } 1624 }
1526} 1625}
1527 1626
@@ -1539,6 +1638,9 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
1539 int modepage; 1638 int modepage;
1540 struct scsi_mode_data data; 1639 struct scsi_mode_data data;
1541 struct scsi_sense_hdr sshdr; 1640 struct scsi_sense_hdr sshdr;
1641 int old_wce = sdkp->WCE;
1642 int old_rcd = sdkp->RCD;
1643 int old_dpofua = sdkp->DPOFUA;
1542 1644
1543 if (sdp->skip_ms_page_8) 1645 if (sdp->skip_ms_page_8)
1544 goto defaults; 1646 goto defaults;
@@ -1610,12 +1712,14 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
1610 sdkp->DPOFUA = 0; 1712 sdkp->DPOFUA = 0;
1611 } 1713 }
1612 1714
1613 sd_printk(KERN_NOTICE, sdkp, 1715 if (sdkp->first_scan || old_wce != sdkp->WCE ||
1614 "Write cache: %s, read cache: %s, %s\n", 1716 old_rcd != sdkp->RCD || old_dpofua != sdkp->DPOFUA)
1615 sdkp->WCE ? "enabled" : "disabled", 1717 sd_printk(KERN_NOTICE, sdkp,
1616 sdkp->RCD ? "disabled" : "enabled", 1718 "Write cache: %s, read cache: %s, %s\n",
1617 sdkp->DPOFUA ? "supports DPO and FUA" 1719 sdkp->WCE ? "enabled" : "disabled",
1618 : "doesn't support DPO or FUA"); 1720 sdkp->RCD ? "disabled" : "enabled",
1721 sdkp->DPOFUA ? "supports DPO and FUA"
1722 : "doesn't support DPO or FUA");
1619 1723
1620 return; 1724 return;
1621 } 1725 }
@@ -1711,15 +1815,6 @@ static int sd_revalidate_disk(struct gendisk *disk)
1711 goto out; 1815 goto out;
1712 } 1816 }
1713 1817
1714 /* defaults, until the device tells us otherwise */
1715 sdp->sector_size = 512;
1716 sdkp->capacity = 0;
1717 sdkp->media_present = 1;
1718 sdkp->write_prot = 0;
1719 sdkp->WCE = 0;
1720 sdkp->RCD = 0;
1721 sdkp->ATO = 0;
1722
1723 sd_spinup_disk(sdkp); 1818 sd_spinup_disk(sdkp);
1724 1819
1725 /* 1820 /*
@@ -1733,6 +1828,8 @@ static int sd_revalidate_disk(struct gendisk *disk)
1733 sd_read_app_tag_own(sdkp, buffer); 1828 sd_read_app_tag_own(sdkp, buffer);
1734 } 1829 }
1735 1830
1831 sdkp->first_scan = 0;
1832
1736 /* 1833 /*
1737 * We now have all cache related info, determine how we deal 1834 * We now have all cache related info, determine how we deal
1738 * with ordered requests. Note that as the current SCSI 1835 * with ordered requests. Note that as the current SCSI
@@ -1843,6 +1940,16 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
1843 gd->private_data = &sdkp->driver; 1940 gd->private_data = &sdkp->driver;
1844 gd->queue = sdkp->device->request_queue; 1941 gd->queue = sdkp->device->request_queue;
1845 1942
1943 /* defaults, until the device tells us otherwise */
1944 sdp->sector_size = 512;
1945 sdkp->capacity = 0;
1946 sdkp->media_present = 1;
1947 sdkp->write_prot = 0;
1948 sdkp->WCE = 0;
1949 sdkp->RCD = 0;
1950 sdkp->ATO = 0;
1951 sdkp->first_scan = 1;
1952
1846 sd_revalidate_disk(gd); 1953 sd_revalidate_disk(gd);
1847 1954
1848 blk_queue_prep_rq(sdp->request_queue, sd_prep_fn); 1955 blk_queue_prep_rq(sdp->request_queue, sd_prep_fn);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 75638e7d3f66..708778cf5f06 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -53,6 +53,7 @@ struct scsi_disk {
53 unsigned WCE : 1; /* state of disk WCE bit */ 53 unsigned WCE : 1; /* state of disk WCE bit */
54 unsigned RCD : 1; /* state of disk RCD bit, unused */ 54 unsigned RCD : 1; /* state of disk RCD bit, unused */
55 unsigned DPOFUA : 1; /* state of disk DPOFUA bit */ 55 unsigned DPOFUA : 1; /* state of disk DPOFUA bit */
56 unsigned first_scan : 1;
56}; 57};
57#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev) 58#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
58 59
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index e946e05db7f7..c9146d751cbf 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -345,44 +345,21 @@ static int ses_enclosure_find_by_addr(struct enclosure_device *edev,
345 return 0; 345 return 0;
346} 346}
347 347
348#define VPD_INQUIRY_SIZE 36
349
350static void ses_match_to_enclosure(struct enclosure_device *edev, 348static void ses_match_to_enclosure(struct enclosure_device *edev,
351 struct scsi_device *sdev) 349 struct scsi_device *sdev)
352{ 350{
353 unsigned char *buf = kmalloc(VPD_INQUIRY_SIZE, GFP_KERNEL); 351 unsigned char *buf;
354 unsigned char *desc; 352 unsigned char *desc;
355 u16 vpd_len; 353 unsigned int vpd_len;
356 struct efd efd = { 354 struct efd efd = {
357 .addr = 0, 355 .addr = 0,
358 }; 356 };
359 unsigned char cmd[] = {
360 INQUIRY,
361 1,
362 0x83,
363 VPD_INQUIRY_SIZE >> 8,
364 VPD_INQUIRY_SIZE & 0xff,
365 0
366 };
367 357
358 buf = scsi_get_vpd_page(sdev, 0x83);
368 if (!buf) 359 if (!buf)
369 return; 360 return;
370 361
371 if (scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, 362 vpd_len = ((buf[2] << 8) | buf[3]) + 4;
372 VPD_INQUIRY_SIZE, NULL, SES_TIMEOUT, SES_RETRIES,
373 NULL))
374 goto free;
375
376 vpd_len = (buf[2] << 8) + buf[3];
377 kfree(buf);
378 buf = kmalloc(vpd_len, GFP_KERNEL);
379 if (!buf)
380 return;
381 cmd[3] = vpd_len >> 8;
382 cmd[4] = vpd_len & 0xff;
383 if (scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf,
384 vpd_len, NULL, SES_TIMEOUT, SES_RETRIES, NULL))
385 goto free;
386 363
387 desc = buf + 4; 364 desc = buf + 4;
388 while (desc < buf + vpd_len) { 365 while (desc < buf + vpd_len) {
@@ -393,7 +370,7 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
393 u8 type = desc[1] & 0x0f; 370 u8 type = desc[1] & 0x0f;
394 u8 len = desc[3]; 371 u8 len = desc[3];
395 372
396 if (piv && code_set == 1 && assoc == 1 && code_set == 1 373 if (piv && code_set == 1 && assoc == 1
397 && proto == SCSI_PROTOCOL_SAS && type == 3 && len == 8) 374 && proto == SCSI_PROTOCOL_SAS && type == 3 && len == 8)
398 efd.addr = (u64)desc[4] << 56 | 375 efd.addr = (u64)desc[4] << 56 |
399 (u64)desc[5] << 48 | 376 (u64)desc[5] << 48 |
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index b4ef2f84ea32..ffc87851f2e8 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -98,7 +98,6 @@ static int scatter_elem_sz = SG_SCATTER_SZ;
98static int scatter_elem_sz_prev = SG_SCATTER_SZ; 98static int scatter_elem_sz_prev = SG_SCATTER_SZ;
99 99
100#define SG_SECTOR_SZ 512 100#define SG_SECTOR_SZ 512
101#define SG_SECTOR_MSK (SG_SECTOR_SZ - 1)
102 101
103static int sg_add(struct device *, struct class_interface *); 102static int sg_add(struct device *, struct class_interface *);
104static void sg_remove(struct device *, struct class_interface *); 103static void sg_remove(struct device *, struct class_interface *);
@@ -137,10 +136,11 @@ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
137 volatile char done; /* 0->before bh, 1->before read, 2->read */ 136 volatile char done; /* 0->before bh, 1->before read, 2->read */
138 struct request *rq; 137 struct request *rq;
139 struct bio *bio; 138 struct bio *bio;
139 struct execute_work ew;
140} Sg_request; 140} Sg_request;
141 141
142typedef struct sg_fd { /* holds the state of a file descriptor */ 142typedef struct sg_fd { /* holds the state of a file descriptor */
143 struct sg_fd *nextfp; /* NULL when last opened fd on this device */ 143 struct list_head sfd_siblings;
144 struct sg_device *parentdp; /* owning device */ 144 struct sg_device *parentdp; /* owning device */
145 wait_queue_head_t read_wait; /* queue read until command done */ 145 wait_queue_head_t read_wait; /* queue read until command done */
146 rwlock_t rq_list_lock; /* protect access to list in req_arr */ 146 rwlock_t rq_list_lock; /* protect access to list in req_arr */
@@ -158,6 +158,8 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
158 char next_cmd_len; /* 0 -> automatic (def), >0 -> use on next write() */ 158 char next_cmd_len; /* 0 -> automatic (def), >0 -> use on next write() */
159 char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */ 159 char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */
160 char mmap_called; /* 0 -> mmap() never called on this fd */ 160 char mmap_called; /* 0 -> mmap() never called on this fd */
161 struct kref f_ref;
162 struct execute_work ew;
161} Sg_fd; 163} Sg_fd;
162 164
163typedef struct sg_device { /* holds the state of each scsi generic device */ 165typedef struct sg_device { /* holds the state of each scsi generic device */
@@ -165,27 +167,25 @@ typedef struct sg_device { /* holds the state of each scsi generic device */
165 wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */ 167 wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */
166 int sg_tablesize; /* adapter's max scatter-gather table size */ 168 int sg_tablesize; /* adapter's max scatter-gather table size */
167 u32 index; /* device index number */ 169 u32 index; /* device index number */
168 Sg_fd *headfp; /* first open fd belonging to this device */ 170 struct list_head sfds;
169 volatile char detached; /* 0->attached, 1->detached pending removal */ 171 volatile char detached; /* 0->attached, 1->detached pending removal */
170 volatile char exclude; /* opened for exclusive access */ 172 volatile char exclude; /* opened for exclusive access */
171 char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */ 173 char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
172 struct gendisk *disk; 174 struct gendisk *disk;
173 struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */ 175 struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */
176 struct kref d_ref;
174} Sg_device; 177} Sg_device;
175 178
176static int sg_fasync(int fd, struct file *filp, int mode);
177/* tasklet or soft irq callback */ 179/* tasklet or soft irq callback */
178static void sg_rq_end_io(struct request *rq, int uptodate); 180static void sg_rq_end_io(struct request *rq, int uptodate);
179static int sg_start_req(Sg_request *srp, unsigned char *cmd); 181static int sg_start_req(Sg_request *srp, unsigned char *cmd);
180static void sg_finish_rem_req(Sg_request * srp); 182static void sg_finish_rem_req(Sg_request * srp);
181static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size); 183static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
182static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp,
183 int tablesize);
184static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, 184static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count,
185 Sg_request * srp); 185 Sg_request * srp);
186static ssize_t sg_new_write(Sg_fd *sfp, struct file *file, 186static ssize_t sg_new_write(Sg_fd *sfp, struct file *file,
187 const char __user *buf, size_t count, int blocking, 187 const char __user *buf, size_t count, int blocking,
188 int read_only, Sg_request **o_srp); 188 int read_only, int sg_io_owned, Sg_request **o_srp);
189static int sg_common_write(Sg_fd * sfp, Sg_request * srp, 189static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
190 unsigned char *cmnd, int timeout, int blocking); 190 unsigned char *cmnd, int timeout, int blocking);
191static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer); 191static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
@@ -194,16 +194,13 @@ static void sg_build_reserve(Sg_fd * sfp, int req_size);
194static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size); 194static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
195static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp); 195static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
196static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev); 196static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev);
197static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp); 197static void sg_remove_sfp(struct kref *);
198static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
199static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id); 198static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
200static Sg_request *sg_add_request(Sg_fd * sfp); 199static Sg_request *sg_add_request(Sg_fd * sfp);
201static int sg_remove_request(Sg_fd * sfp, Sg_request * srp); 200static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
202static int sg_res_in_use(Sg_fd * sfp); 201static int sg_res_in_use(Sg_fd * sfp);
203static Sg_device *sg_get_dev(int dev); 202static Sg_device *sg_get_dev(int dev);
204#ifdef CONFIG_SCSI_PROC_FS 203static void sg_put_dev(Sg_device *sdp);
205static int sg_last_dev(void);
206#endif
207 204
208#define SZ_SG_HEADER sizeof(struct sg_header) 205#define SZ_SG_HEADER sizeof(struct sg_header)
209#define SZ_SG_IO_HDR sizeof(sg_io_hdr_t) 206#define SZ_SG_IO_HDR sizeof(sg_io_hdr_t)
@@ -237,22 +234,17 @@ sg_open(struct inode *inode, struct file *filp)
237 nonseekable_open(inode, filp); 234 nonseekable_open(inode, filp);
238 SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags)); 235 SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags));
239 sdp = sg_get_dev(dev); 236 sdp = sg_get_dev(dev);
240 if ((!sdp) || (!sdp->device)) { 237 if (IS_ERR(sdp)) {
241 unlock_kernel(); 238 retval = PTR_ERR(sdp);
242 return -ENXIO; 239 sdp = NULL;
243 } 240 goto sg_put;
244 if (sdp->detached) {
245 unlock_kernel();
246 return -ENODEV;
247 } 241 }
248 242
249 /* This driver's module count bumped by fops_get in <linux/fs.h> */ 243 /* This driver's module count bumped by fops_get in <linux/fs.h> */
250 /* Prevent the device driver from vanishing while we sleep */ 244 /* Prevent the device driver from vanishing while we sleep */
251 retval = scsi_device_get(sdp->device); 245 retval = scsi_device_get(sdp->device);
252 if (retval) { 246 if (retval)
253 unlock_kernel(); 247 goto sg_put;
254 return retval;
255 }
256 248
257 if (!((flags & O_NONBLOCK) || 249 if (!((flags & O_NONBLOCK) ||
258 scsi_block_when_processing_errors(sdp->device))) { 250 scsi_block_when_processing_errors(sdp->device))) {
@@ -266,13 +258,13 @@ sg_open(struct inode *inode, struct file *filp)
266 retval = -EPERM; /* Can't lock it with read only access */ 258 retval = -EPERM; /* Can't lock it with read only access */
267 goto error_out; 259 goto error_out;
268 } 260 }
269 if (sdp->headfp && (flags & O_NONBLOCK)) { 261 if (!list_empty(&sdp->sfds) && (flags & O_NONBLOCK)) {
270 retval = -EBUSY; 262 retval = -EBUSY;
271 goto error_out; 263 goto error_out;
272 } 264 }
273 res = 0; 265 res = 0;
274 __wait_event_interruptible(sdp->o_excl_wait, 266 __wait_event_interruptible(sdp->o_excl_wait,
275 ((sdp->headfp || sdp->exclude) ? 0 : (sdp->exclude = 1)), res); 267 ((!list_empty(&sdp->sfds) || sdp->exclude) ? 0 : (sdp->exclude = 1)), res);
276 if (res) { 268 if (res) {
277 retval = res; /* -ERESTARTSYS because signal hit process */ 269 retval = res; /* -ERESTARTSYS because signal hit process */
278 goto error_out; 270 goto error_out;
@@ -294,7 +286,7 @@ sg_open(struct inode *inode, struct file *filp)
294 retval = -ENODEV; 286 retval = -ENODEV;
295 goto error_out; 287 goto error_out;
296 } 288 }
297 if (!sdp->headfp) { /* no existing opens on this device */ 289 if (list_empty(&sdp->sfds)) { /* no existing opens on this device */
298 sdp->sgdebug = 0; 290 sdp->sgdebug = 0;
299 q = sdp->device->request_queue; 291 q = sdp->device->request_queue;
300 sdp->sg_tablesize = min(q->max_hw_segments, 292 sdp->sg_tablesize = min(q->max_hw_segments,
@@ -303,16 +295,20 @@ sg_open(struct inode *inode, struct file *filp)
303 if ((sfp = sg_add_sfp(sdp, dev))) 295 if ((sfp = sg_add_sfp(sdp, dev)))
304 filp->private_data = sfp; 296 filp->private_data = sfp;
305 else { 297 else {
306 if (flags & O_EXCL) 298 if (flags & O_EXCL) {
307 sdp->exclude = 0; /* undo if error */ 299 sdp->exclude = 0; /* undo if error */
300 wake_up_interruptible(&sdp->o_excl_wait);
301 }
308 retval = -ENOMEM; 302 retval = -ENOMEM;
309 goto error_out; 303 goto error_out;
310 } 304 }
311 unlock_kernel(); 305 retval = 0;
312 return 0; 306error_out:
313 307 if (retval)
314 error_out: 308 scsi_device_put(sdp->device);
315 scsi_device_put(sdp->device); 309sg_put:
310 if (sdp)
311 sg_put_dev(sdp);
316 unlock_kernel(); 312 unlock_kernel();
317 return retval; 313 return retval;
318} 314}
@@ -327,13 +323,13 @@ sg_release(struct inode *inode, struct file *filp)
327 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 323 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
328 return -ENXIO; 324 return -ENXIO;
329 SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name)); 325 SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
330 if (0 == sg_remove_sfp(sdp, sfp)) { /* Returns 1 when sdp gone */ 326
331 if (!sdp->detached) { 327 sfp->closed = 1;
332 scsi_device_put(sdp->device); 328
333 } 329 sdp->exclude = 0;
334 sdp->exclude = 0; 330 wake_up_interruptible(&sdp->o_excl_wait);
335 wake_up_interruptible(&sdp->o_excl_wait); 331
336 } 332 kref_put(&sfp->f_ref, sg_remove_sfp);
337 return 0; 333 return 0;
338} 334}
339 335
@@ -557,7 +553,8 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
557 return -EFAULT; 553 return -EFAULT;
558 blocking = !(filp->f_flags & O_NONBLOCK); 554 blocking = !(filp->f_flags & O_NONBLOCK);
559 if (old_hdr.reply_len < 0) 555 if (old_hdr.reply_len < 0)
560 return sg_new_write(sfp, filp, buf, count, blocking, 0, NULL); 556 return sg_new_write(sfp, filp, buf, count,
557 blocking, 0, 0, NULL);
561 if (count < (SZ_SG_HEADER + 6)) 558 if (count < (SZ_SG_HEADER + 6))
562 return -EIO; /* The minimum scsi command length is 6 bytes. */ 559 return -EIO; /* The minimum scsi command length is 6 bytes. */
563 560
@@ -638,7 +635,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
638 635
639static ssize_t 636static ssize_t
640sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, 637sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
641 size_t count, int blocking, int read_only, 638 size_t count, int blocking, int read_only, int sg_io_owned,
642 Sg_request **o_srp) 639 Sg_request **o_srp)
643{ 640{
644 int k; 641 int k;
@@ -658,6 +655,7 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
658 SCSI_LOG_TIMEOUT(1, printk("sg_new_write: queue full\n")); 655 SCSI_LOG_TIMEOUT(1, printk("sg_new_write: queue full\n"));
659 return -EDOM; 656 return -EDOM;
660 } 657 }
658 srp->sg_io_owned = sg_io_owned;
661 hp = &srp->header; 659 hp = &srp->header;
662 if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) { 660 if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) {
663 sg_remove_request(sfp, srp); 661 sg_remove_request(sfp, srp);
@@ -755,24 +753,13 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
755 hp->duration = jiffies_to_msecs(jiffies); 753 hp->duration = jiffies_to_msecs(jiffies);
756 754
757 srp->rq->timeout = timeout; 755 srp->rq->timeout = timeout;
756 kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
758 blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk, 757 blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk,
759 srp->rq, 1, sg_rq_end_io); 758 srp->rq, 1, sg_rq_end_io);
760 return 0; 759 return 0;
761} 760}
762 761
763static int 762static int
764sg_srp_done(Sg_request *srp, Sg_fd *sfp)
765{
766 unsigned long iflags;
767 int done;
768
769 read_lock_irqsave(&sfp->rq_list_lock, iflags);
770 done = srp->done;
771 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
772 return done;
773}
774
775static int
776sg_ioctl(struct inode *inode, struct file *filp, 763sg_ioctl(struct inode *inode, struct file *filp,
777 unsigned int cmd_in, unsigned long arg) 764 unsigned int cmd_in, unsigned long arg)
778{ 765{
@@ -804,27 +791,26 @@ sg_ioctl(struct inode *inode, struct file *filp,
804 return -EFAULT; 791 return -EFAULT;
805 result = 792 result =
806 sg_new_write(sfp, filp, p, SZ_SG_IO_HDR, 793 sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
807 blocking, read_only, &srp); 794 blocking, read_only, 1, &srp);
808 if (result < 0) 795 if (result < 0)
809 return result; 796 return result;
810 srp->sg_io_owned = 1;
811 while (1) { 797 while (1) {
812 result = 0; /* following macro to beat race condition */ 798 result = 0; /* following macro to beat race condition */
813 __wait_event_interruptible(sfp->read_wait, 799 __wait_event_interruptible(sfp->read_wait,
814 (sdp->detached || sfp->closed || sg_srp_done(srp, sfp)), 800 (srp->done || sdp->detached),
815 result); 801 result);
816 if (sdp->detached) 802 if (sdp->detached)
817 return -ENODEV; 803 return -ENODEV;
818 if (sfp->closed) 804 write_lock_irq(&sfp->rq_list_lock);
819 return 0; /* request packet dropped already */ 805 if (srp->done) {
820 if (0 == result) 806 srp->done = 2;
807 write_unlock_irq(&sfp->rq_list_lock);
821 break; 808 break;
809 }
822 srp->orphan = 1; 810 srp->orphan = 1;
811 write_unlock_irq(&sfp->rq_list_lock);
823 return result; /* -ERESTARTSYS because signal hit process */ 812 return result; /* -ERESTARTSYS because signal hit process */
824 } 813 }
825 write_lock_irqsave(&sfp->rq_list_lock, iflags);
826 srp->done = 2;
827 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
828 result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp); 814 result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp);
829 return (result < 0) ? result : 0; 815 return (result < 0) ? result : 0;
830 } 816 }
@@ -1238,6 +1224,15 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
1238 return 0; 1224 return 0;
1239} 1225}
1240 1226
1227static void sg_rq_end_io_usercontext(struct work_struct *work)
1228{
1229 struct sg_request *srp = container_of(work, struct sg_request, ew.work);
1230 struct sg_fd *sfp = srp->parentfp;
1231
1232 sg_finish_rem_req(srp);
1233 kref_put(&sfp->f_ref, sg_remove_sfp);
1234}
1235
1241/* 1236/*
1242 * This function is a "bottom half" handler that is called by the mid 1237 * This function is a "bottom half" handler that is called by the mid
1243 * level when a command is completed (or has failed). 1238 * level when a command is completed (or has failed).
@@ -1245,24 +1240,23 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
1245static void sg_rq_end_io(struct request *rq, int uptodate) 1240static void sg_rq_end_io(struct request *rq, int uptodate)
1246{ 1241{
1247 struct sg_request *srp = rq->end_io_data; 1242 struct sg_request *srp = rq->end_io_data;
1248 Sg_device *sdp = NULL; 1243 Sg_device *sdp;
1249 Sg_fd *sfp; 1244 Sg_fd *sfp;
1250 unsigned long iflags; 1245 unsigned long iflags;
1251 unsigned int ms; 1246 unsigned int ms;
1252 char *sense; 1247 char *sense;
1253 int result, resid; 1248 int result, resid, done = 1;
1254 1249
1255 if (NULL == srp) { 1250 if (WARN_ON(srp->done != 0))
1256 printk(KERN_ERR "sg_cmd_done: NULL request\n");
1257 return; 1251 return;
1258 } 1252
1259 sfp = srp->parentfp; 1253 sfp = srp->parentfp;
1260 if (sfp) 1254 if (WARN_ON(sfp == NULL))
1261 sdp = sfp->parentdp;
1262 if ((NULL == sdp) || sdp->detached) {
1263 printk(KERN_INFO "sg_cmd_done: device detached\n");
1264 return; 1255 return;
1265 } 1256
1257 sdp = sfp->parentdp;
1258 if (unlikely(sdp->detached))
1259 printk(KERN_INFO "sg_rq_end_io: device detached\n");
1266 1260
1267 sense = rq->sense; 1261 sense = rq->sense;
1268 result = rq->errors; 1262 result = rq->errors;
@@ -1301,33 +1295,25 @@ static void sg_rq_end_io(struct request *rq, int uptodate)
1301 } 1295 }
1302 /* Rely on write phase to clean out srp status values, so no "else" */ 1296 /* Rely on write phase to clean out srp status values, so no "else" */
1303 1297
1304 if (sfp->closed) { /* whoops this fd already released, cleanup */ 1298 write_lock_irqsave(&sfp->rq_list_lock, iflags);
1305 SCSI_LOG_TIMEOUT(1, printk("sg_cmd_done: already closed, freeing ...\n")); 1299 if (unlikely(srp->orphan)) {
1306 sg_finish_rem_req(srp);
1307 srp = NULL;
1308 if (NULL == sfp->headrp) {
1309 SCSI_LOG_TIMEOUT(1, printk("sg_cmd_done: already closed, final cleanup\n"));
1310 if (0 == sg_remove_sfp(sdp, sfp)) { /* device still present */
1311 scsi_device_put(sdp->device);
1312 }
1313 sfp = NULL;
1314 }
1315 } else if (srp && srp->orphan) {
1316 if (sfp->keep_orphan) 1300 if (sfp->keep_orphan)
1317 srp->sg_io_owned = 0; 1301 srp->sg_io_owned = 0;
1318 else { 1302 else
1319 sg_finish_rem_req(srp); 1303 done = 0;
1320 srp = NULL;
1321 }
1322 } 1304 }
1323 if (sfp && srp) { 1305 srp->done = done;
1324 /* Now wake up any sg_read() that is waiting for this packet. */ 1306 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1325 kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN); 1307
1326 write_lock_irqsave(&sfp->rq_list_lock, iflags); 1308 if (likely(done)) {
1327 srp->done = 1; 1309 /* Now wake up any sg_read() that is waiting for this
1310 * packet.
1311 */
1328 wake_up_interruptible(&sfp->read_wait); 1312 wake_up_interruptible(&sfp->read_wait);
1329 write_unlock_irqrestore(&sfp->rq_list_lock, iflags); 1313 kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN);
1330 } 1314 kref_put(&sfp->f_ref, sg_remove_sfp);
1315 } else
1316 execute_in_process_context(sg_rq_end_io_usercontext, &srp->ew);
1331} 1317}
1332 1318
1333static struct file_operations sg_fops = { 1319static struct file_operations sg_fops = {
@@ -1362,17 +1348,18 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
1362 printk(KERN_WARNING "kmalloc Sg_device failure\n"); 1348 printk(KERN_WARNING "kmalloc Sg_device failure\n");
1363 return ERR_PTR(-ENOMEM); 1349 return ERR_PTR(-ENOMEM);
1364 } 1350 }
1365 error = -ENOMEM; 1351
1366 if (!idr_pre_get(&sg_index_idr, GFP_KERNEL)) { 1352 if (!idr_pre_get(&sg_index_idr, GFP_KERNEL)) {
1367 printk(KERN_WARNING "idr expansion Sg_device failure\n"); 1353 printk(KERN_WARNING "idr expansion Sg_device failure\n");
1354 error = -ENOMEM;
1368 goto out; 1355 goto out;
1369 } 1356 }
1370 1357
1371 write_lock_irqsave(&sg_index_lock, iflags); 1358 write_lock_irqsave(&sg_index_lock, iflags);
1372 error = idr_get_new(&sg_index_idr, sdp, &k);
1373 write_unlock_irqrestore(&sg_index_lock, iflags);
1374 1359
1360 error = idr_get_new(&sg_index_idr, sdp, &k);
1375 if (error) { 1361 if (error) {
1362 write_unlock_irqrestore(&sg_index_lock, iflags);
1376 printk(KERN_WARNING "idr allocation Sg_device failure: %d\n", 1363 printk(KERN_WARNING "idr allocation Sg_device failure: %d\n",
1377 error); 1364 error);
1378 goto out; 1365 goto out;
@@ -1386,9 +1373,13 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
1386 disk->first_minor = k; 1373 disk->first_minor = k;
1387 sdp->disk = disk; 1374 sdp->disk = disk;
1388 sdp->device = scsidp; 1375 sdp->device = scsidp;
1376 INIT_LIST_HEAD(&sdp->sfds);
1389 init_waitqueue_head(&sdp->o_excl_wait); 1377 init_waitqueue_head(&sdp->o_excl_wait);
1390 sdp->sg_tablesize = min(q->max_hw_segments, q->max_phys_segments); 1378 sdp->sg_tablesize = min(q->max_hw_segments, q->max_phys_segments);
1391 sdp->index = k; 1379 sdp->index = k;
1380 kref_init(&sdp->d_ref);
1381
1382 write_unlock_irqrestore(&sg_index_lock, iflags);
1392 1383
1393 error = 0; 1384 error = 0;
1394 out: 1385 out:
@@ -1399,6 +1390,8 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
1399 return sdp; 1390 return sdp;
1400 1391
1401 overflow: 1392 overflow:
1393 idr_remove(&sg_index_idr, k);
1394 write_unlock_irqrestore(&sg_index_lock, iflags);
1402 sdev_printk(KERN_WARNING, scsidp, 1395 sdev_printk(KERN_WARNING, scsidp,
1403 "Unable to attach sg device type=%d, minor " 1396 "Unable to attach sg device type=%d, minor "
1404 "number exceeds %d\n", scsidp->type, SG_MAX_DEVS - 1); 1397 "number exceeds %d\n", scsidp->type, SG_MAX_DEVS - 1);
@@ -1486,49 +1479,46 @@ out:
1486 return error; 1479 return error;
1487} 1480}
1488 1481
1489static void 1482static void sg_device_destroy(struct kref *kref)
1490sg_remove(struct device *cl_dev, struct class_interface *cl_intf) 1483{
1484 struct sg_device *sdp = container_of(kref, struct sg_device, d_ref);
1485 unsigned long flags;
1486
1487 /* CAUTION! Note that the device can still be found via idr_find()
1488 * even though the refcount is 0. Therefore, do idr_remove() BEFORE
1489 * any other cleanup.
1490 */
1491
1492 write_lock_irqsave(&sg_index_lock, flags);
1493 idr_remove(&sg_index_idr, sdp->index);
1494 write_unlock_irqrestore(&sg_index_lock, flags);
1495
1496 SCSI_LOG_TIMEOUT(3,
1497 printk("sg_device_destroy: %s\n",
1498 sdp->disk->disk_name));
1499
1500 put_disk(sdp->disk);
1501 kfree(sdp);
1502}
1503
1504static void sg_remove(struct device *cl_dev, struct class_interface *cl_intf)
1491{ 1505{
1492 struct scsi_device *scsidp = to_scsi_device(cl_dev->parent); 1506 struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
1493 Sg_device *sdp = dev_get_drvdata(cl_dev); 1507 Sg_device *sdp = dev_get_drvdata(cl_dev);
1494 unsigned long iflags; 1508 unsigned long iflags;
1495 Sg_fd *sfp; 1509 Sg_fd *sfp;
1496 Sg_fd *tsfp;
1497 Sg_request *srp;
1498 Sg_request *tsrp;
1499 int delay;
1500 1510
1501 if (!sdp) 1511 if (!sdp || sdp->detached)
1502 return; 1512 return;
1503 1513
1504 delay = 0; 1514 SCSI_LOG_TIMEOUT(3, printk("sg_remove: %s\n", sdp->disk->disk_name));
1515
1516 /* Need a write lock to set sdp->detached. */
1505 write_lock_irqsave(&sg_index_lock, iflags); 1517 write_lock_irqsave(&sg_index_lock, iflags);
1506 if (sdp->headfp) { 1518 sdp->detached = 1;
1507 sdp->detached = 1; 1519 list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) {
1508 for (sfp = sdp->headfp; sfp; sfp = tsfp) { 1520 wake_up_interruptible(&sfp->read_wait);
1509 tsfp = sfp->nextfp; 1521 kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP);
1510 for (srp = sfp->headrp; srp; srp = tsrp) {
1511 tsrp = srp->nextrp;
1512 if (sfp->closed || (0 == sg_srp_done(srp, sfp)))
1513 sg_finish_rem_req(srp);
1514 }
1515 if (sfp->closed) {
1516 scsi_device_put(sdp->device);
1517 __sg_remove_sfp(sdp, sfp);
1518 } else {
1519 delay = 1;
1520 wake_up_interruptible(&sfp->read_wait);
1521 kill_fasync(&sfp->async_qp, SIGPOLL,
1522 POLL_HUP);
1523 }
1524 }
1525 SCSI_LOG_TIMEOUT(3, printk("sg_remove: dev=%d, dirty\n", sdp->index));
1526 if (NULL == sdp->headfp) {
1527 idr_remove(&sg_index_idr, sdp->index);
1528 }
1529 } else { /* nothing active, simple case */
1530 SCSI_LOG_TIMEOUT(3, printk("sg_remove: dev=%d\n", sdp->index));
1531 idr_remove(&sg_index_idr, sdp->index);
1532 } 1522 }
1533 write_unlock_irqrestore(&sg_index_lock, iflags); 1523 write_unlock_irqrestore(&sg_index_lock, iflags);
1534 1524
@@ -1536,13 +1526,8 @@ sg_remove(struct device *cl_dev, struct class_interface *cl_intf)
1536 device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index)); 1526 device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index));
1537 cdev_del(sdp->cdev); 1527 cdev_del(sdp->cdev);
1538 sdp->cdev = NULL; 1528 sdp->cdev = NULL;
1539 put_disk(sdp->disk);
1540 sdp->disk = NULL;
1541 if (NULL == sdp->headfp)
1542 kfree(sdp);
1543 1529
1544 if (delay) 1530 sg_put_dev(sdp);
1545 msleep(10); /* dirty detach so delay device destruction */
1546} 1531}
1547 1532
1548module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR); 1533module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR);
@@ -1736,8 +1721,8 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1736 return -EFAULT; 1721 return -EFAULT;
1737 if (0 == blk_size) 1722 if (0 == blk_size)
1738 ++blk_size; /* don't know why */ 1723 ++blk_size; /* don't know why */
1739/* round request up to next highest SG_SECTOR_SZ byte boundary */ 1724 /* round request up to next highest SG_SECTOR_SZ byte boundary */
1740 blk_size = (blk_size + SG_SECTOR_MSK) & (~SG_SECTOR_MSK); 1725 blk_size = ALIGN(blk_size, SG_SECTOR_SZ);
1741 SCSI_LOG_TIMEOUT(4, printk("sg_build_indirect: buff_size=%d, blk_size=%d\n", 1726 SCSI_LOG_TIMEOUT(4, printk("sg_build_indirect: buff_size=%d, blk_size=%d\n",
1742 buff_size, blk_size)); 1727 buff_size, blk_size));
1743 1728
@@ -1939,22 +1924,6 @@ sg_get_rq_mark(Sg_fd * sfp, int pack_id)
1939 return resp; 1924 return resp;
1940} 1925}
1941 1926
1942#ifdef CONFIG_SCSI_PROC_FS
1943static Sg_request *
1944sg_get_nth_request(Sg_fd * sfp, int nth)
1945{
1946 Sg_request *resp;
1947 unsigned long iflags;
1948 int k;
1949
1950 read_lock_irqsave(&sfp->rq_list_lock, iflags);
1951 for (k = 0, resp = sfp->headrp; resp && (k < nth);
1952 ++k, resp = resp->nextrp) ;
1953 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1954 return resp;
1955}
1956#endif
1957
1958/* always adds to end of list */ 1927/* always adds to end of list */
1959static Sg_request * 1928static Sg_request *
1960sg_add_request(Sg_fd * sfp) 1929sg_add_request(Sg_fd * sfp)
@@ -2030,22 +1999,6 @@ sg_remove_request(Sg_fd * sfp, Sg_request * srp)
2030 return res; 1999 return res;
2031} 2000}
2032 2001
2033#ifdef CONFIG_SCSI_PROC_FS
2034static Sg_fd *
2035sg_get_nth_sfp(Sg_device * sdp, int nth)
2036{
2037 Sg_fd *resp;
2038 unsigned long iflags;
2039 int k;
2040
2041 read_lock_irqsave(&sg_index_lock, iflags);
2042 for (k = 0, resp = sdp->headfp; resp && (k < nth);
2043 ++k, resp = resp->nextfp) ;
2044 read_unlock_irqrestore(&sg_index_lock, iflags);
2045 return resp;
2046}
2047#endif
2048
2049static Sg_fd * 2002static Sg_fd *
2050sg_add_sfp(Sg_device * sdp, int dev) 2003sg_add_sfp(Sg_device * sdp, int dev)
2051{ 2004{
@@ -2060,6 +2013,7 @@ sg_add_sfp(Sg_device * sdp, int dev)
2060 init_waitqueue_head(&sfp->read_wait); 2013 init_waitqueue_head(&sfp->read_wait);
2061 rwlock_init(&sfp->rq_list_lock); 2014 rwlock_init(&sfp->rq_list_lock);
2062 2015
2016 kref_init(&sfp->f_ref);
2063 sfp->timeout = SG_DEFAULT_TIMEOUT; 2017 sfp->timeout = SG_DEFAULT_TIMEOUT;
2064 sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER; 2018 sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
2065 sfp->force_packid = SG_DEF_FORCE_PACK_ID; 2019 sfp->force_packid = SG_DEF_FORCE_PACK_ID;
@@ -2069,14 +2023,7 @@ sg_add_sfp(Sg_device * sdp, int dev)
2069 sfp->keep_orphan = SG_DEF_KEEP_ORPHAN; 2023 sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
2070 sfp->parentdp = sdp; 2024 sfp->parentdp = sdp;
2071 write_lock_irqsave(&sg_index_lock, iflags); 2025 write_lock_irqsave(&sg_index_lock, iflags);
2072 if (!sdp->headfp) 2026 list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
2073 sdp->headfp = sfp;
2074 else { /* add to tail of existing list */
2075 Sg_fd *pfp = sdp->headfp;
2076 while (pfp->nextfp)
2077 pfp = pfp->nextfp;
2078 pfp->nextfp = sfp;
2079 }
2080 write_unlock_irqrestore(&sg_index_lock, iflags); 2027 write_unlock_irqrestore(&sg_index_lock, iflags);
2081 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp)); 2028 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp));
2082 if (unlikely(sg_big_buff != def_reserved_size)) 2029 if (unlikely(sg_big_buff != def_reserved_size))
@@ -2087,75 +2034,52 @@ sg_add_sfp(Sg_device * sdp, int dev)
2087 sg_build_reserve(sfp, bufflen); 2034 sg_build_reserve(sfp, bufflen);
2088 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n", 2035 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
2089 sfp->reserve.bufflen, sfp->reserve.k_use_sg)); 2036 sfp->reserve.bufflen, sfp->reserve.k_use_sg));
2037
2038 kref_get(&sdp->d_ref);
2039 __module_get(THIS_MODULE);
2090 return sfp; 2040 return sfp;
2091} 2041}
2092 2042
2093static void 2043static void sg_remove_sfp_usercontext(struct work_struct *work)
2094__sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp)
2095{ 2044{
2096 Sg_fd *fp; 2045 struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
2097 Sg_fd *prev_fp; 2046 struct sg_device *sdp = sfp->parentdp;
2047
2048 /* Cleanup any responses which were never read(). */
2049 while (sfp->headrp)
2050 sg_finish_rem_req(sfp->headrp);
2098 2051
2099 prev_fp = sdp->headfp;
2100 if (sfp == prev_fp)
2101 sdp->headfp = prev_fp->nextfp;
2102 else {
2103 while ((fp = prev_fp->nextfp)) {
2104 if (sfp == fp) {
2105 prev_fp->nextfp = fp->nextfp;
2106 break;
2107 }
2108 prev_fp = fp;
2109 }
2110 }
2111 if (sfp->reserve.bufflen > 0) { 2052 if (sfp->reserve.bufflen > 0) {
2112 SCSI_LOG_TIMEOUT(6, 2053 SCSI_LOG_TIMEOUT(6,
2113 printk("__sg_remove_sfp: bufflen=%d, k_use_sg=%d\n", 2054 printk("sg_remove_sfp: bufflen=%d, k_use_sg=%d\n",
2114 (int) sfp->reserve.bufflen, (int) sfp->reserve.k_use_sg)); 2055 (int) sfp->reserve.bufflen,
2056 (int) sfp->reserve.k_use_sg));
2115 sg_remove_scat(&sfp->reserve); 2057 sg_remove_scat(&sfp->reserve);
2116 } 2058 }
2117 sfp->parentdp = NULL; 2059
2118 SCSI_LOG_TIMEOUT(6, printk("__sg_remove_sfp: sfp=0x%p\n", sfp)); 2060 SCSI_LOG_TIMEOUT(6,
2061 printk("sg_remove_sfp: %s, sfp=0x%p\n",
2062 sdp->disk->disk_name,
2063 sfp));
2119 kfree(sfp); 2064 kfree(sfp);
2065
2066 scsi_device_put(sdp->device);
2067 sg_put_dev(sdp);
2068 module_put(THIS_MODULE);
2120} 2069}
2121 2070
2122/* Returns 0 in normal case, 1 when detached and sdp object removed */ 2071static void sg_remove_sfp(struct kref *kref)
2123static int
2124sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp)
2125{ 2072{
2126 Sg_request *srp; 2073 struct sg_fd *sfp = container_of(kref, struct sg_fd, f_ref);
2127 Sg_request *tsrp; 2074 struct sg_device *sdp = sfp->parentdp;
2128 int dirty = 0; 2075 unsigned long iflags;
2129 int res = 0;
2130 2076
2131 for (srp = sfp->headrp; srp; srp = tsrp) { 2077 write_lock_irqsave(&sg_index_lock, iflags);
2132 tsrp = srp->nextrp; 2078 list_del(&sfp->sfd_siblings);
2133 if (sg_srp_done(srp, sfp)) 2079 write_unlock_irqrestore(&sg_index_lock, iflags);
2134 sg_finish_rem_req(srp); 2080 wake_up_interruptible(&sdp->o_excl_wait);
2135 else 2081
2136 ++dirty; 2082 execute_in_process_context(sg_remove_sfp_usercontext, &sfp->ew);
2137 }
2138 if (0 == dirty) {
2139 unsigned long iflags;
2140
2141 write_lock_irqsave(&sg_index_lock, iflags);
2142 __sg_remove_sfp(sdp, sfp);
2143 if (sdp->detached && (NULL == sdp->headfp)) {
2144 idr_remove(&sg_index_idr, sdp->index);
2145 kfree(sdp);
2146 res = 1;
2147 }
2148 write_unlock_irqrestore(&sg_index_lock, iflags);
2149 } else {
2150 /* MOD_INC's to inhibit unloading sg and associated adapter driver */
2151 /* only bump the access_count if we actually succeeded in
2152 * throwing another counter on the host module */
2153 scsi_device_get(sdp->device); /* XXX: retval ignored? */
2154 sfp->closed = 1; /* flag dirty state on this fd */
2155 SCSI_LOG_TIMEOUT(1, printk("sg_remove_sfp: worrisome, %d writes pending\n",
2156 dirty));
2157 }
2158 return res;
2159} 2083}
2160 2084
2161static int 2085static int
@@ -2197,19 +2121,38 @@ sg_last_dev(void)
2197} 2121}
2198#endif 2122#endif
2199 2123
2200static Sg_device * 2124/* must be called with sg_index_lock held */
2201sg_get_dev(int dev) 2125static Sg_device *sg_lookup_dev(int dev)
2202{ 2126{
2203 Sg_device *sdp; 2127 return idr_find(&sg_index_idr, dev);
2204 unsigned long iflags; 2128}
2205 2129
2206 read_lock_irqsave(&sg_index_lock, iflags); 2130static Sg_device *sg_get_dev(int dev)
2207 sdp = idr_find(&sg_index_idr, dev); 2131{
2208 read_unlock_irqrestore(&sg_index_lock, iflags); 2132 struct sg_device *sdp;
2133 unsigned long flags;
2134
2135 read_lock_irqsave(&sg_index_lock, flags);
2136 sdp = sg_lookup_dev(dev);
2137 if (!sdp)
2138 sdp = ERR_PTR(-ENXIO);
2139 else if (sdp->detached) {
2140 /* If sdp->detached, then the refcount may already be 0, in
2141 * which case it would be a bug to do kref_get().
2142 */
2143 sdp = ERR_PTR(-ENODEV);
2144 } else
2145 kref_get(&sdp->d_ref);
2146 read_unlock_irqrestore(&sg_index_lock, flags);
2209 2147
2210 return sdp; 2148 return sdp;
2211} 2149}
2212 2150
2151static void sg_put_dev(struct sg_device *sdp)
2152{
2153 kref_put(&sdp->d_ref, sg_device_destroy);
2154}
2155
2213#ifdef CONFIG_SCSI_PROC_FS 2156#ifdef CONFIG_SCSI_PROC_FS
2214 2157
2215static struct proc_dir_entry *sg_proc_sgp = NULL; 2158static struct proc_dir_entry *sg_proc_sgp = NULL;
@@ -2466,8 +2409,10 @@ static int sg_proc_seq_show_dev(struct seq_file *s, void *v)
2466 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; 2409 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2467 Sg_device *sdp; 2410 Sg_device *sdp;
2468 struct scsi_device *scsidp; 2411 struct scsi_device *scsidp;
2412 unsigned long iflags;
2469 2413
2470 sdp = it ? sg_get_dev(it->index) : NULL; 2414 read_lock_irqsave(&sg_index_lock, iflags);
2415 sdp = it ? sg_lookup_dev(it->index) : NULL;
2471 if (sdp && (scsidp = sdp->device) && (!sdp->detached)) 2416 if (sdp && (scsidp = sdp->device) && (!sdp->detached))
2472 seq_printf(s, "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n", 2417 seq_printf(s, "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n",
2473 scsidp->host->host_no, scsidp->channel, 2418 scsidp->host->host_no, scsidp->channel,
@@ -2478,6 +2423,7 @@ static int sg_proc_seq_show_dev(struct seq_file *s, void *v)
2478 (int) scsi_device_online(scsidp)); 2423 (int) scsi_device_online(scsidp));
2479 else 2424 else
2480 seq_printf(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n"); 2425 seq_printf(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
2426 read_unlock_irqrestore(&sg_index_lock, iflags);
2481 return 0; 2427 return 0;
2482} 2428}
2483 2429
@@ -2491,16 +2437,20 @@ static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
2491 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; 2437 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2492 Sg_device *sdp; 2438 Sg_device *sdp;
2493 struct scsi_device *scsidp; 2439 struct scsi_device *scsidp;
2440 unsigned long iflags;
2494 2441
2495 sdp = it ? sg_get_dev(it->index) : NULL; 2442 read_lock_irqsave(&sg_index_lock, iflags);
2443 sdp = it ? sg_lookup_dev(it->index) : NULL;
2496 if (sdp && (scsidp = sdp->device) && (!sdp->detached)) 2444 if (sdp && (scsidp = sdp->device) && (!sdp->detached))
2497 seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n", 2445 seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n",
2498 scsidp->vendor, scsidp->model, scsidp->rev); 2446 scsidp->vendor, scsidp->model, scsidp->rev);
2499 else 2447 else
2500 seq_printf(s, "<no active device>\n"); 2448 seq_printf(s, "<no active device>\n");
2449 read_unlock_irqrestore(&sg_index_lock, iflags);
2501 return 0; 2450 return 0;
2502} 2451}
2503 2452
2453/* must be called while holding sg_index_lock */
2504static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) 2454static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
2505{ 2455{
2506 int k, m, new_interface, blen, usg; 2456 int k, m, new_interface, blen, usg;
@@ -2510,9 +2460,12 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
2510 const char * cp; 2460 const char * cp;
2511 unsigned int ms; 2461 unsigned int ms;
2512 2462
2513 for (k = 0; (fp = sg_get_nth_sfp(sdp, k)); ++k) { 2463 k = 0;
2464 list_for_each_entry(fp, &sdp->sfds, sfd_siblings) {
2465 k++;
2466 read_lock(&fp->rq_list_lock); /* irqs already disabled */
2514 seq_printf(s, " FD(%d): timeout=%dms bufflen=%d " 2467 seq_printf(s, " FD(%d): timeout=%dms bufflen=%d "
2515 "(res)sgat=%d low_dma=%d\n", k + 1, 2468 "(res)sgat=%d low_dma=%d\n", k,
2516 jiffies_to_msecs(fp->timeout), 2469 jiffies_to_msecs(fp->timeout),
2517 fp->reserve.bufflen, 2470 fp->reserve.bufflen,
2518 (int) fp->reserve.k_use_sg, 2471 (int) fp->reserve.k_use_sg,
@@ -2520,7 +2473,9 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
2520 seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=%d\n", 2473 seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=%d\n",
2521 (int) fp->cmd_q, (int) fp->force_packid, 2474 (int) fp->cmd_q, (int) fp->force_packid,
2522 (int) fp->keep_orphan, (int) fp->closed); 2475 (int) fp->keep_orphan, (int) fp->closed);
2523 for (m = 0; (srp = sg_get_nth_request(fp, m)); ++m) { 2476 for (m = 0, srp = fp->headrp;
2477 srp != NULL;
2478 ++m, srp = srp->nextrp) {
2524 hp = &srp->header; 2479 hp = &srp->header;
2525 new_interface = (hp->interface_id == '\0') ? 0 : 1; 2480 new_interface = (hp->interface_id == '\0') ? 0 : 1;
2526 if (srp->res_used) { 2481 if (srp->res_used) {
@@ -2557,6 +2512,7 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
2557 } 2512 }
2558 if (0 == m) 2513 if (0 == m)
2559 seq_printf(s, " No requests active\n"); 2514 seq_printf(s, " No requests active\n");
2515 read_unlock(&fp->rq_list_lock);
2560 } 2516 }
2561} 2517}
2562 2518
@@ -2569,39 +2525,34 @@ static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
2569{ 2525{
2570 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; 2526 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2571 Sg_device *sdp; 2527 Sg_device *sdp;
2528 unsigned long iflags;
2572 2529
2573 if (it && (0 == it->index)) { 2530 if (it && (0 == it->index)) {
2574 seq_printf(s, "max_active_device=%d(origin 1)\n", 2531 seq_printf(s, "max_active_device=%d(origin 1)\n",
2575 (int)it->max); 2532 (int)it->max);
2576 seq_printf(s, " def_reserved_size=%d\n", sg_big_buff); 2533 seq_printf(s, " def_reserved_size=%d\n", sg_big_buff);
2577 } 2534 }
2578 sdp = it ? sg_get_dev(it->index) : NULL;
2579 if (sdp) {
2580 struct scsi_device *scsidp = sdp->device;
2581 2535
2582 if (NULL == scsidp) { 2536 read_lock_irqsave(&sg_index_lock, iflags);
2583 seq_printf(s, "device %d detached ??\n", 2537 sdp = it ? sg_lookup_dev(it->index) : NULL;
2584 (int)it->index); 2538 if (sdp && !list_empty(&sdp->sfds)) {
2585 return 0; 2539 struct scsi_device *scsidp = sdp->device;
2586 }
2587 2540
2588 if (sg_get_nth_sfp(sdp, 0)) { 2541 seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
2589 seq_printf(s, " >>> device=%s ", 2542 if (sdp->detached)
2590 sdp->disk->disk_name); 2543 seq_printf(s, "detached pending close ");
2591 if (sdp->detached) 2544 else
2592 seq_printf(s, "detached pending close "); 2545 seq_printf
2593 else 2546 (s, "scsi%d chan=%d id=%d lun=%d em=%d",
2594 seq_printf 2547 scsidp->host->host_no,
2595 (s, "scsi%d chan=%d id=%d lun=%d em=%d", 2548 scsidp->channel, scsidp->id,
2596 scsidp->host->host_no, 2549 scsidp->lun,
2597 scsidp->channel, scsidp->id, 2550 scsidp->host->hostt->emulated);
2598 scsidp->lun, 2551 seq_printf(s, " sg_tablesize=%d excl=%d\n",
2599 scsidp->host->hostt->emulated); 2552 sdp->sg_tablesize, sdp->exclude);
2600 seq_printf(s, " sg_tablesize=%d excl=%d\n",
2601 sdp->sg_tablesize, sdp->exclude);
2602 }
2603 sg_proc_debug_helper(s, sdp); 2553 sg_proc_debug_helper(s, sdp);
2604 } 2554 }
2555 read_unlock_irqrestore(&sg_index_lock, iflags);
2605 return 0; 2556 return 0;
2606} 2557}
2607 2558
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index c6f19ee8f2cb..eb24efea8f14 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -374,9 +374,9 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
374 if (!debugging) { /* Abnormal conditions for tape */ 374 if (!debugging) { /* Abnormal conditions for tape */
375 if (!cmdstatp->have_sense) 375 if (!cmdstatp->have_sense)
376 printk(KERN_WARNING 376 printk(KERN_WARNING
377 "%s: Error %x (sugg. bt 0x%x, driver bt 0x%x, host bt 0x%x).\n", 377 "%s: Error %x (driver bt 0x%x, host bt 0x%x).\n",
378 name, result, suggestion(result), 378 name, result, driver_byte(result),
379 driver_byte(result) & DRIVER_MASK, host_byte(result)); 379 host_byte(result));
380 else if (cmdstatp->have_sense && 380 else if (cmdstatp->have_sense &&
381 scode != NO_SENSE && 381 scode != NO_SENSE &&
382 scode != RECOVERED_ERROR && 382 scode != RECOVERED_ERROR &&
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index a3a18ad73125..47b614e8580c 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * SuperTrak EX Series Storage Controller driver for Linux 2 * SuperTrak EX Series Storage Controller driver for Linux
3 * 3 *
4 * Copyright (C) 2005, 2006 Promise Technology Inc. 4 * Copyright (C) 2005-2009 Promise Technology Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
@@ -36,8 +36,8 @@
36#include <scsi/scsi_eh.h> 36#include <scsi/scsi_eh.h>
37 37
38#define DRV_NAME "stex" 38#define DRV_NAME "stex"
39#define ST_DRIVER_VERSION "3.6.0000.1" 39#define ST_DRIVER_VERSION "4.6.0000.1"
40#define ST_VER_MAJOR 3 40#define ST_VER_MAJOR 4
41#define ST_VER_MINOR 6 41#define ST_VER_MINOR 6
42#define ST_OEM 0 42#define ST_OEM 0
43#define ST_BUILD_VER 1 43#define ST_BUILD_VER 1
@@ -103,7 +103,7 @@ enum {
103 MU_REQ_COUNT = (MU_MAX_REQUEST + 1), 103 MU_REQ_COUNT = (MU_MAX_REQUEST + 1),
104 MU_STATUS_COUNT = (MU_MAX_REQUEST + 1), 104 MU_STATUS_COUNT = (MU_MAX_REQUEST + 1),
105 105
106 STEX_CDB_LENGTH = MAX_COMMAND_SIZE, 106 STEX_CDB_LENGTH = 16,
107 REQ_VARIABLE_LEN = 1024, 107 REQ_VARIABLE_LEN = 1024,
108 STATUS_VAR_LEN = 128, 108 STATUS_VAR_LEN = 128,
109 ST_CAN_QUEUE = MU_MAX_REQUEST, 109 ST_CAN_QUEUE = MU_MAX_REQUEST,
@@ -114,15 +114,19 @@ enum {
114 SG_CF_EOT = 0x80, /* end of table */ 114 SG_CF_EOT = 0x80, /* end of table */
115 SG_CF_64B = 0x40, /* 64 bit item */ 115 SG_CF_64B = 0x40, /* 64 bit item */
116 SG_CF_HOST = 0x20, /* sg in host memory */ 116 SG_CF_HOST = 0x20, /* sg in host memory */
117 MSG_DATA_DIR_ND = 0,
118 MSG_DATA_DIR_IN = 1,
119 MSG_DATA_DIR_OUT = 2,
117 120
118 st_shasta = 0, 121 st_shasta = 0,
119 st_vsc = 1, 122 st_vsc = 1,
120 st_vsc1 = 2, 123 st_vsc1 = 2,
121 st_yosemite = 3, 124 st_yosemite = 3,
125 st_seq = 4,
122 126
123 PASSTHRU_REQ_TYPE = 0x00000001, 127 PASSTHRU_REQ_TYPE = 0x00000001,
124 PASSTHRU_REQ_NO_WAKEUP = 0x00000100, 128 PASSTHRU_REQ_NO_WAKEUP = 0x00000100,
125 ST_INTERNAL_TIMEOUT = 30, 129 ST_INTERNAL_TIMEOUT = 180,
126 130
127 ST_TO_CMD = 0, 131 ST_TO_CMD = 0,
128 ST_FROM_CMD = 1, 132 ST_FROM_CMD = 1,
@@ -152,35 +156,6 @@ enum {
152 ST_ADDITIONAL_MEM = 0x200000, 156 ST_ADDITIONAL_MEM = 0x200000,
153}; 157};
154 158
155/* SCSI inquiry data */
156typedef struct st_inq {
157 u8 DeviceType :5;
158 u8 DeviceTypeQualifier :3;
159 u8 DeviceTypeModifier :7;
160 u8 RemovableMedia :1;
161 u8 Versions;
162 u8 ResponseDataFormat :4;
163 u8 HiSupport :1;
164 u8 NormACA :1;
165 u8 ReservedBit :1;
166 u8 AERC :1;
167 u8 AdditionalLength;
168 u8 Reserved[2];
169 u8 SoftReset :1;
170 u8 CommandQueue :1;
171 u8 Reserved2 :1;
172 u8 LinkedCommands :1;
173 u8 Synchronous :1;
174 u8 Wide16Bit :1;
175 u8 Wide32Bit :1;
176 u8 RelativeAddressing :1;
177 u8 VendorId[8];
178 u8 ProductId[16];
179 u8 ProductRevisionLevel[4];
180 u8 VendorSpecific[20];
181 u8 Reserved3[40];
182} ST_INQ;
183
184struct st_sgitem { 159struct st_sgitem {
185 u8 ctrl; /* SG_CF_xxx */ 160 u8 ctrl; /* SG_CF_xxx */
186 u8 reserved[3]; 161 u8 reserved[3];
@@ -222,7 +197,7 @@ struct req_msg {
222 u8 target; 197 u8 target;
223 u8 task_attr; 198 u8 task_attr;
224 u8 task_manage; 199 u8 task_manage;
225 u8 prd_entry; 200 u8 data_dir;
226 u8 payload_sz; /* payload size in 4-byte, not used */ 201 u8 payload_sz; /* payload size in 4-byte, not used */
227 u8 cdb[STEX_CDB_LENGTH]; 202 u8 cdb[STEX_CDB_LENGTH];
228 u8 variable[REQ_VARIABLE_LEN]; 203 u8 variable[REQ_VARIABLE_LEN];
@@ -284,7 +259,7 @@ struct st_drvver {
284#define MU_REQ_BUFFER_SIZE (MU_REQ_COUNT * sizeof(struct req_msg)) 259#define MU_REQ_BUFFER_SIZE (MU_REQ_COUNT * sizeof(struct req_msg))
285#define MU_STATUS_BUFFER_SIZE (MU_STATUS_COUNT * sizeof(struct status_msg)) 260#define MU_STATUS_BUFFER_SIZE (MU_STATUS_COUNT * sizeof(struct status_msg))
286#define MU_BUFFER_SIZE (MU_REQ_BUFFER_SIZE + MU_STATUS_BUFFER_SIZE) 261#define MU_BUFFER_SIZE (MU_REQ_BUFFER_SIZE + MU_STATUS_BUFFER_SIZE)
287#define STEX_EXTRA_SIZE max(sizeof(struct st_frame), sizeof(ST_INQ)) 262#define STEX_EXTRA_SIZE sizeof(struct st_frame)
288#define STEX_BUFFER_SIZE (MU_BUFFER_SIZE + STEX_EXTRA_SIZE) 263#define STEX_BUFFER_SIZE (MU_BUFFER_SIZE + STEX_EXTRA_SIZE)
289 264
290struct st_ccb { 265struct st_ccb {
@@ -346,8 +321,8 @@ MODULE_VERSION(ST_DRIVER_VERSION);
346static void stex_gettime(__le32 *time) 321static void stex_gettime(__le32 *time)
347{ 322{
348 struct timeval tv; 323 struct timeval tv;
349 do_gettimeofday(&tv);
350 324
325 do_gettimeofday(&tv);
351 *time = cpu_to_le32(tv.tv_sec & 0xffffffff); 326 *time = cpu_to_le32(tv.tv_sec & 0xffffffff);
352 *(time + 1) = cpu_to_le32((tv.tv_sec >> 16) >> 16); 327 *(time + 1) = cpu_to_le32((tv.tv_sec >> 16) >> 16);
353} 328}
@@ -368,7 +343,7 @@ static void stex_invalid_field(struct scsi_cmnd *cmd,
368{ 343{
369 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; 344 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
370 345
371 /* "Invalid field in cbd" */ 346 /* "Invalid field in cdb" */
372 scsi_build_sense_buffer(0, cmd->sense_buffer, ILLEGAL_REQUEST, 0x24, 347 scsi_build_sense_buffer(0, cmd->sense_buffer, ILLEGAL_REQUEST, 0x24,
373 0x0); 348 0x0);
374 done(cmd); 349 done(cmd);
@@ -497,6 +472,7 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
497 unsigned int id,lun; 472 unsigned int id,lun;
498 struct req_msg *req; 473 struct req_msg *req;
499 u16 tag; 474 u16 tag;
475
500 host = cmd->device->host; 476 host = cmd->device->host;
501 id = cmd->device->id; 477 id = cmd->device->id;
502 lun = cmd->device->lun; 478 lun = cmd->device->lun;
@@ -508,6 +484,7 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
508 static char ms10_caching_page[12] = 484 static char ms10_caching_page[12] =
509 { 0, 0x12, 0, 0, 0, 0, 0, 0, 0x8, 0xa, 0x4, 0 }; 485 { 0, 0x12, 0, 0, 0, 0, 0, 0, 0x8, 0xa, 0x4, 0 };
510 unsigned char page; 486 unsigned char page;
487
511 page = cmd->cmnd[2] & 0x3f; 488 page = cmd->cmnd[2] & 0x3f;
512 if (page == 0x8 || page == 0x3f) { 489 if (page == 0x8 || page == 0x3f) {
513 scsi_sg_copy_from_buffer(cmd, ms10_caching_page, 490 scsi_sg_copy_from_buffer(cmd, ms10_caching_page,
@@ -551,6 +528,7 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
551 if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) { 528 if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) {
552 struct st_drvver ver; 529 struct st_drvver ver;
553 size_t cp_len = sizeof(ver); 530 size_t cp_len = sizeof(ver);
531
554 ver.major = ST_VER_MAJOR; 532 ver.major = ST_VER_MAJOR;
555 ver.minor = ST_VER_MINOR; 533 ver.minor = ST_VER_MINOR;
556 ver.oem = ST_OEM; 534 ver.oem = ST_OEM;
@@ -584,6 +562,13 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
584 /* cdb */ 562 /* cdb */
585 memcpy(req->cdb, cmd->cmnd, STEX_CDB_LENGTH); 563 memcpy(req->cdb, cmd->cmnd, STEX_CDB_LENGTH);
586 564
565 if (cmd->sc_data_direction == DMA_FROM_DEVICE)
566 req->data_dir = MSG_DATA_DIR_IN;
567 else if (cmd->sc_data_direction == DMA_TO_DEVICE)
568 req->data_dir = MSG_DATA_DIR_OUT;
569 else
570 req->data_dir = MSG_DATA_DIR_ND;
571
587 hba->ccb[tag].cmd = cmd; 572 hba->ccb[tag].cmd = cmd;
588 hba->ccb[tag].sense_bufflen = SCSI_SENSE_BUFFERSIZE; 573 hba->ccb[tag].sense_bufflen = SCSI_SENSE_BUFFERSIZE;
589 hba->ccb[tag].sense_buffer = cmd->sense_buffer; 574 hba->ccb[tag].sense_buffer = cmd->sense_buffer;
@@ -642,6 +627,7 @@ static void stex_copy_data(struct st_ccb *ccb,
642 struct status_msg *resp, unsigned int variable) 627 struct status_msg *resp, unsigned int variable)
643{ 628{
644 size_t count = variable; 629 size_t count = variable;
630
645 if (resp->scsi_status != SAM_STAT_GOOD) { 631 if (resp->scsi_status != SAM_STAT_GOOD) {
646 if (ccb->sense_buffer != NULL) 632 if (ccb->sense_buffer != NULL)
647 memcpy(ccb->sense_buffer, resp->variable, 633 memcpy(ccb->sense_buffer, resp->variable,
@@ -661,24 +647,6 @@ static void stex_ys_commands(struct st_hba *hba,
661 resp->scsi_status != SAM_STAT_CHECK_CONDITION) { 647 resp->scsi_status != SAM_STAT_CHECK_CONDITION) {
662 scsi_set_resid(ccb->cmd, scsi_bufflen(ccb->cmd) - 648 scsi_set_resid(ccb->cmd, scsi_bufflen(ccb->cmd) -
663 le32_to_cpu(*(__le32 *)&resp->variable[0])); 649 le32_to_cpu(*(__le32 *)&resp->variable[0]));
664 return;
665 }
666
667 if (resp->srb_status != 0)
668 return;
669
670 /* determine inquiry command status by DeviceTypeQualifier */
671 if (ccb->cmd->cmnd[0] == INQUIRY &&
672 resp->scsi_status == SAM_STAT_GOOD) {
673 ST_INQ *inq_data;
674
675 scsi_sg_copy_to_buffer(ccb->cmd, hba->copy_buffer,
676 STEX_EXTRA_SIZE);
677 inq_data = (ST_INQ *)hba->copy_buffer;
678 if (inq_data->DeviceTypeQualifier != 0)
679 ccb->srb_status = SRB_STATUS_SELECTION_TIMEOUT;
680 else
681 ccb->srb_status = SRB_STATUS_SUCCESS;
682 } 650 }
683} 651}
684 652
@@ -746,6 +714,7 @@ static void stex_mu_intr(struct st_hba *hba, u32 doorbell)
746 stex_copy_data(ccb, resp, size); 714 stex_copy_data(ccb, resp, size);
747 } 715 }
748 716
717 ccb->req = NULL;
749 ccb->srb_status = resp->srb_status; 718 ccb->srb_status = resp->srb_status;
750 ccb->scsi_status = resp->scsi_status; 719 ccb->scsi_status = resp->scsi_status;
751 720
@@ -983,6 +952,7 @@ static int stex_reset(struct scsi_cmnd *cmd)
983 struct st_hba *hba; 952 struct st_hba *hba;
984 unsigned long flags; 953 unsigned long flags;
985 unsigned long before; 954 unsigned long before;
955
986 hba = (struct st_hba *) &cmd->device->host->hostdata[0]; 956 hba = (struct st_hba *) &cmd->device->host->hostdata[0];
987 957
988 printk(KERN_INFO DRV_NAME 958 printk(KERN_INFO DRV_NAME
@@ -1067,6 +1037,7 @@ static struct scsi_host_template driver_template = {
1067static int stex_set_dma_mask(struct pci_dev * pdev) 1037static int stex_set_dma_mask(struct pci_dev * pdev)
1068{ 1038{
1069 int ret; 1039 int ret;
1040
1070 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK) 1041 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)
1071 && !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) 1042 && !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
1072 return 0; 1043 return 0;
@@ -1124,9 +1095,9 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1124 } 1095 }
1125 1096
1126 hba->cardtype = (unsigned int) id->driver_data; 1097 hba->cardtype = (unsigned int) id->driver_data;
1127 if (hba->cardtype == st_vsc && (pdev->subsystem_device & 0xf) == 0x1) 1098 if (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))
1128 hba->cardtype = st_vsc1; 1099 hba->cardtype = st_vsc1;
1129 hba->dma_size = (hba->cardtype == st_vsc1) ? 1100 hba->dma_size = (hba->cardtype == st_vsc1 || hba->cardtype == st_seq) ?
1130 (STEX_BUFFER_SIZE + ST_ADDITIONAL_MEM) : (STEX_BUFFER_SIZE); 1101 (STEX_BUFFER_SIZE + ST_ADDITIONAL_MEM) : (STEX_BUFFER_SIZE);
1131 hba->dma_mem = dma_alloc_coherent(&pdev->dev, 1102 hba->dma_mem = dma_alloc_coherent(&pdev->dev,
1132 hba->dma_size, &hba->dma_handle, GFP_KERNEL); 1103 hba->dma_size, &hba->dma_handle, GFP_KERNEL);
@@ -1146,10 +1117,10 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1146 host->max_lun = 8; 1117 host->max_lun = 8;
1147 host->max_id = 16 + 1; 1118 host->max_id = 16 + 1;
1148 } else if (hba->cardtype == st_yosemite) { 1119 } else if (hba->cardtype == st_yosemite) {
1149 host->max_lun = 128; 1120 host->max_lun = 256;
1150 host->max_id = 1 + 1; 1121 host->max_id = 1 + 1;
1151 } else { 1122 } else {
1152 /* st_vsc and st_vsc1 */ 1123 /* st_vsc , st_vsc1 and st_seq */
1153 host->max_lun = 1; 1124 host->max_lun = 1;
1154 host->max_id = 128 + 1; 1125 host->max_id = 128 + 1;
1155 } 1126 }
@@ -1299,18 +1270,10 @@ static struct pci_device_id stex_pci_tbl[] = {
1299 { 0x105a, 0x7250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_vsc }, 1270 { 0x105a, 0x7250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_vsc },
1300 1271
1301 /* st_yosemite */ 1272 /* st_yosemite */
1302 { 0x105a, 0x8650, PCI_ANY_ID, 0x4600, 0, 0, 1273 { 0x105a, 0x8650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_yosemite },
1303 st_yosemite }, /* SuperTrak EX4650 */ 1274
1304 { 0x105a, 0x8650, PCI_ANY_ID, 0x4610, 0, 0, 1275 /* st_seq */
1305 st_yosemite }, /* SuperTrak EX4650o */ 1276 { 0x105a, 0x3360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_seq },
1306 { 0x105a, 0x8650, PCI_ANY_ID, 0x8600, 0, 0,
1307 st_yosemite }, /* SuperTrak EX8650EL */
1308 { 0x105a, 0x8650, PCI_ANY_ID, 0x8601, 0, 0,
1309 st_yosemite }, /* SuperTrak EX8650 */
1310 { 0x105a, 0x8650, PCI_ANY_ID, 0x8602, 0, 0,
1311 st_yosemite }, /* SuperTrak EX8654 */
1312 { 0x105a, 0x8650, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1313 st_yosemite }, /* generic st_yosemite */
1314 { } /* terminate list */ 1277 { } /* terminate list */
1315}; 1278};
1316MODULE_DEVICE_TABLE(pci, stex_pci_tbl); 1279MODULE_DEVICE_TABLE(pci, stex_pci_tbl);
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index f4e6cde1fd0d..23e782015880 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -792,9 +792,9 @@ static int sym53c8xx_slave_configure(struct scsi_device *sdev)
792 792
793 /* 793 /*
794 * Select queue depth from driver setup. 794 * Select queue depth from driver setup.
795 * Donnot use more than configured by user. 795 * Do not use more than configured by user.
796 * Use at least 2. 796 * Use at least 1.
797 * Donnot use more than our maximum. 797 * Do not use more than our maximum.
798 */ 798 */
799 reqtags = sym_driver_setup.max_tag; 799 reqtags = sym_driver_setup.max_tag;
800 if (reqtags > tp->usrtags) 800 if (reqtags > tp->usrtags)
@@ -803,7 +803,7 @@ static int sym53c8xx_slave_configure(struct scsi_device *sdev)
803 reqtags = 0; 803 reqtags = 0;
804 if (reqtags > SYM_CONF_MAX_TAG) 804 if (reqtags > SYM_CONF_MAX_TAG)
805 reqtags = SYM_CONF_MAX_TAG; 805 reqtags = SYM_CONF_MAX_TAG;
806 depth_to_use = reqtags ? reqtags : 2; 806 depth_to_use = reqtags ? reqtags : 1;
807 scsi_adjust_queue_depth(sdev, 807 scsi_adjust_queue_depth(sdev,
808 sdev->tagged_supported ? MSG_SIMPLE_TAG : 0, 808 sdev->tagged_supported ? MSG_SIMPLE_TAG : 0,
809 depth_to_use); 809 depth_to_use);
@@ -1236,14 +1236,29 @@ static int sym53c8xx_proc_info(struct Scsi_Host *shost, char *buffer,
1236#endif /* SYM_LINUX_PROC_INFO_SUPPORT */ 1236#endif /* SYM_LINUX_PROC_INFO_SUPPORT */
1237 1237
1238/* 1238/*
1239 * Free resources claimed by sym_iomap_device(). Note that
1240 * sym_free_resources() should be used instead of this function after calling
1241 * sym_attach().
1242 */
1243static void __devinit
1244sym_iounmap_device(struct sym_device *device)
1245{
1246 if (device->s.ioaddr)
1247 pci_iounmap(device->pdev, device->s.ioaddr);
1248 if (device->s.ramaddr)
1249 pci_iounmap(device->pdev, device->s.ramaddr);
1250}
1251
1252/*
1239 * Free controller resources. 1253 * Free controller resources.
1240 */ 1254 */
1241static void sym_free_resources(struct sym_hcb *np, struct pci_dev *pdev) 1255static void sym_free_resources(struct sym_hcb *np, struct pci_dev *pdev,
1256 int do_free_irq)
1242{ 1257{
1243 /* 1258 /*
1244 * Free O/S specific resources. 1259 * Free O/S specific resources.
1245 */ 1260 */
1246 if (pdev->irq) 1261 if (do_free_irq)
1247 free_irq(pdev->irq, np->s.host); 1262 free_irq(pdev->irq, np->s.host);
1248 if (np->s.ioaddr) 1263 if (np->s.ioaddr)
1249 pci_iounmap(pdev, np->s.ioaddr); 1264 pci_iounmap(pdev, np->s.ioaddr);
@@ -1271,10 +1286,11 @@ static struct Scsi_Host * __devinit sym_attach(struct scsi_host_template *tpnt,
1271{ 1286{
1272 struct sym_data *sym_data; 1287 struct sym_data *sym_data;
1273 struct sym_hcb *np = NULL; 1288 struct sym_hcb *np = NULL;
1274 struct Scsi_Host *shost; 1289 struct Scsi_Host *shost = NULL;
1275 struct pci_dev *pdev = dev->pdev; 1290 struct pci_dev *pdev = dev->pdev;
1276 unsigned long flags; 1291 unsigned long flags;
1277 struct sym_fw *fw; 1292 struct sym_fw *fw;
1293 int do_free_irq = 0;
1278 1294
1279 printk(KERN_INFO "sym%d: <%s> rev 0x%x at pci %s irq %u\n", 1295 printk(KERN_INFO "sym%d: <%s> rev 0x%x at pci %s irq %u\n",
1280 unit, dev->chip.name, pdev->revision, pci_name(pdev), 1296 unit, dev->chip.name, pdev->revision, pci_name(pdev),
@@ -1285,11 +1301,11 @@ static struct Scsi_Host * __devinit sym_attach(struct scsi_host_template *tpnt,
1285 */ 1301 */
1286 fw = sym_find_firmware(&dev->chip); 1302 fw = sym_find_firmware(&dev->chip);
1287 if (!fw) 1303 if (!fw)
1288 return NULL; 1304 goto attach_failed;
1289 1305
1290 shost = scsi_host_alloc(tpnt, sizeof(*sym_data)); 1306 shost = scsi_host_alloc(tpnt, sizeof(*sym_data));
1291 if (!shost) 1307 if (!shost)
1292 return NULL; 1308 goto attach_failed;
1293 sym_data = shost_priv(shost); 1309 sym_data = shost_priv(shost);
1294 1310
1295 /* 1311 /*
@@ -1319,6 +1335,10 @@ static struct Scsi_Host * __devinit sym_attach(struct scsi_host_template *tpnt,
1319 np->maxoffs = dev->chip.offset_max; 1335 np->maxoffs = dev->chip.offset_max;
1320 np->maxburst = dev->chip.burst_max; 1336 np->maxburst = dev->chip.burst_max;
1321 np->myaddr = dev->host_id; 1337 np->myaddr = dev->host_id;
1338 np->mmio_ba = (u32)dev->mmio_base;
1339 np->ram_ba = (u32)dev->ram_base;
1340 np->s.ioaddr = dev->s.ioaddr;
1341 np->s.ramaddr = dev->s.ramaddr;
1322 1342
1323 /* 1343 /*
1324 * Edit its name. 1344 * Edit its name.
@@ -1334,22 +1354,6 @@ static struct Scsi_Host * __devinit sym_attach(struct scsi_host_template *tpnt,
1334 goto attach_failed; 1354 goto attach_failed;
1335 } 1355 }
1336 1356
1337 /*
1338 * Try to map the controller chip to
1339 * virtual and physical memory.
1340 */
1341 np->mmio_ba = (u32)dev->mmio_base;
1342 np->s.ioaddr = dev->s.ioaddr;
1343 np->s.ramaddr = dev->s.ramaddr;
1344
1345 /*
1346 * Map on-chip RAM if present and supported.
1347 */
1348 if (!(np->features & FE_RAM))
1349 dev->ram_base = 0;
1350 if (dev->ram_base)
1351 np->ram_ba = (u32)dev->ram_base;
1352
1353 if (sym_hcb_attach(shost, fw, dev->nvram)) 1357 if (sym_hcb_attach(shost, fw, dev->nvram))
1354 goto attach_failed; 1358 goto attach_failed;
1355 1359
@@ -1364,6 +1368,7 @@ static struct Scsi_Host * __devinit sym_attach(struct scsi_host_template *tpnt,
1364 sym_name(np), pdev->irq); 1368 sym_name(np), pdev->irq);
1365 goto attach_failed; 1369 goto attach_failed;
1366 } 1370 }
1371 do_free_irq = 1;
1367 1372
1368 /* 1373 /*
1369 * After SCSI devices have been opened, we cannot 1374 * After SCSI devices have been opened, we cannot
@@ -1416,12 +1421,13 @@ static struct Scsi_Host * __devinit sym_attach(struct scsi_host_template *tpnt,
1416 "TERMINATION, DEVICE POWER etc.!\n", sym_name(np)); 1421 "TERMINATION, DEVICE POWER etc.!\n", sym_name(np));
1417 spin_unlock_irqrestore(shost->host_lock, flags); 1422 spin_unlock_irqrestore(shost->host_lock, flags);
1418 attach_failed: 1423 attach_failed:
1419 if (!shost) 1424 printf_info("sym%d: giving up ...\n", unit);
1420 return NULL;
1421 printf_info("%s: giving up ...\n", sym_name(np));
1422 if (np) 1425 if (np)
1423 sym_free_resources(np, pdev); 1426 sym_free_resources(np, pdev, do_free_irq);
1424 scsi_host_put(shost); 1427 else
1428 sym_iounmap_device(dev);
1429 if (shost)
1430 scsi_host_put(shost);
1425 1431
1426 return NULL; 1432 return NULL;
1427 } 1433 }
@@ -1550,30 +1556,28 @@ static int __devinit sym_set_workarounds(struct sym_device *device)
1550} 1556}
1551 1557
1552/* 1558/*
1553 * Read and check the PCI configuration for any detected NCR 1559 * Map HBA registers and on-chip SRAM (if present).
1554 * boards and save data for attaching after all boards have
1555 * been detected.
1556 */ 1560 */
1557static void __devinit 1561static int __devinit
1558sym_init_device(struct pci_dev *pdev, struct sym_device *device) 1562sym_iomap_device(struct sym_device *device)
1559{ 1563{
1560 int i = 2; 1564 struct pci_dev *pdev = device->pdev;
1561 struct pci_bus_region bus_addr; 1565 struct pci_bus_region bus_addr;
1562 1566 int i = 2;
1563 device->host_id = SYM_SETUP_HOST_ID;
1564 device->pdev = pdev;
1565 1567
1566 pcibios_resource_to_bus(pdev, &bus_addr, &pdev->resource[1]); 1568 pcibios_resource_to_bus(pdev, &bus_addr, &pdev->resource[1]);
1567 device->mmio_base = bus_addr.start; 1569 device->mmio_base = bus_addr.start;
1568 1570
1569 /* 1571 if (device->chip.features & FE_RAM) {
1570 * If the BAR is 64-bit, resource 2 will be occupied by the 1572 /*
1571 * upper 32 bits 1573 * If the BAR is 64-bit, resource 2 will be occupied by the
1572 */ 1574 * upper 32 bits
1573 if (!pdev->resource[i].flags) 1575 */
1574 i++; 1576 if (!pdev->resource[i].flags)
1575 pcibios_resource_to_bus(pdev, &bus_addr, &pdev->resource[i]); 1577 i++;
1576 device->ram_base = bus_addr.start; 1578 pcibios_resource_to_bus(pdev, &bus_addr, &pdev->resource[i]);
1579 device->ram_base = bus_addr.start;
1580 }
1577 1581
1578#ifdef CONFIG_SCSI_SYM53C8XX_MMIO 1582#ifdef CONFIG_SCSI_SYM53C8XX_MMIO
1579 if (device->mmio_base) 1583 if (device->mmio_base)
@@ -1583,9 +1587,21 @@ sym_init_device(struct pci_dev *pdev, struct sym_device *device)
1583 if (!device->s.ioaddr) 1587 if (!device->s.ioaddr)
1584 device->s.ioaddr = pci_iomap(pdev, 0, 1588 device->s.ioaddr = pci_iomap(pdev, 0,
1585 pci_resource_len(pdev, 0)); 1589 pci_resource_len(pdev, 0));
1586 if (device->ram_base) 1590 if (!device->s.ioaddr) {
1591 dev_err(&pdev->dev, "could not map registers; giving up.\n");
1592 return -EIO;
1593 }
1594 if (device->ram_base) {
1587 device->s.ramaddr = pci_iomap(pdev, i, 1595 device->s.ramaddr = pci_iomap(pdev, i,
1588 pci_resource_len(pdev, i)); 1596 pci_resource_len(pdev, i));
1597 if (!device->s.ramaddr) {
1598 dev_warn(&pdev->dev,
1599 "could not map SRAM; continuing anyway.\n");
1600 device->ram_base = 0;
1601 }
1602 }
1603
1604 return 0;
1589} 1605}
1590 1606
1591/* 1607/*
@@ -1659,7 +1675,8 @@ static int sym_detach(struct Scsi_Host *shost, struct pci_dev *pdev)
1659 udelay(10); 1675 udelay(10);
1660 OUTB(np, nc_istat, 0); 1676 OUTB(np, nc_istat, 0);
1661 1677
1662 sym_free_resources(np, pdev); 1678 sym_free_resources(np, pdev, 1);
1679 scsi_host_put(shost);
1663 1680
1664 return 1; 1681 return 1;
1665} 1682}
@@ -1696,9 +1713,13 @@ static int __devinit sym2_probe(struct pci_dev *pdev,
1696 struct sym_device sym_dev; 1713 struct sym_device sym_dev;
1697 struct sym_nvram nvram; 1714 struct sym_nvram nvram;
1698 struct Scsi_Host *shost; 1715 struct Scsi_Host *shost;
1716 int do_iounmap = 0;
1717 int do_disable_device = 1;
1699 1718
1700 memset(&sym_dev, 0, sizeof(sym_dev)); 1719 memset(&sym_dev, 0, sizeof(sym_dev));
1701 memset(&nvram, 0, sizeof(nvram)); 1720 memset(&nvram, 0, sizeof(nvram));
1721 sym_dev.pdev = pdev;
1722 sym_dev.host_id = SYM_SETUP_HOST_ID;
1702 1723
1703 if (pci_enable_device(pdev)) 1724 if (pci_enable_device(pdev))
1704 goto leave; 1725 goto leave;
@@ -1708,12 +1729,17 @@ static int __devinit sym2_probe(struct pci_dev *pdev,
1708 if (pci_request_regions(pdev, NAME53C8XX)) 1729 if (pci_request_regions(pdev, NAME53C8XX))
1709 goto disable; 1730 goto disable;
1710 1731
1711 sym_init_device(pdev, &sym_dev);
1712 if (sym_check_supported(&sym_dev)) 1732 if (sym_check_supported(&sym_dev))
1713 goto free; 1733 goto free;
1714 1734
1715 if (sym_check_raid(&sym_dev)) 1735 if (sym_iomap_device(&sym_dev))
1716 goto leave; /* Don't disable the device */ 1736 goto free;
1737 do_iounmap = 1;
1738
1739 if (sym_check_raid(&sym_dev)) {
1740 do_disable_device = 0; /* Don't disable the device */
1741 goto free;
1742 }
1717 1743
1718 if (sym_set_workarounds(&sym_dev)) 1744 if (sym_set_workarounds(&sym_dev))
1719 goto free; 1745 goto free;
@@ -1722,6 +1748,7 @@ static int __devinit sym2_probe(struct pci_dev *pdev,
1722 1748
1723 sym_get_nvram(&sym_dev, &nvram); 1749 sym_get_nvram(&sym_dev, &nvram);
1724 1750
1751 do_iounmap = 0; /* Don't sym_iounmap_device() after sym_attach(). */
1725 shost = sym_attach(&sym2_template, attach_count, &sym_dev); 1752 shost = sym_attach(&sym2_template, attach_count, &sym_dev);
1726 if (!shost) 1753 if (!shost)
1727 goto free; 1754 goto free;
@@ -1737,9 +1764,12 @@ static int __devinit sym2_probe(struct pci_dev *pdev,
1737 detach: 1764 detach:
1738 sym_detach(pci_get_drvdata(pdev), pdev); 1765 sym_detach(pci_get_drvdata(pdev), pdev);
1739 free: 1766 free:
1767 if (do_iounmap)
1768 sym_iounmap_device(&sym_dev);
1740 pci_release_regions(pdev); 1769 pci_release_regions(pdev);
1741 disable: 1770 disable:
1742 pci_disable_device(pdev); 1771 if (do_disable_device)
1772 pci_disable_device(pdev);
1743 leave: 1773 leave:
1744 return -ENODEV; 1774 return -ENODEV;
1745} 1775}
@@ -1749,7 +1779,6 @@ static void sym2_remove(struct pci_dev *pdev)
1749 struct Scsi_Host *shost = pci_get_drvdata(pdev); 1779 struct Scsi_Host *shost = pci_get_drvdata(pdev);
1750 1780
1751 scsi_remove_host(shost); 1781 scsi_remove_host(shost);
1752 scsi_host_put(shost);
1753 sym_detach(shost, pdev); 1782 sym_detach(shost, pdev);
1754 pci_release_regions(pdev); 1783 pci_release_regions(pdev);
1755 pci_disable_device(pdev); 1784 pci_disable_device(pdev);
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index 98df1651404f..ccea7db59f49 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -1433,13 +1433,12 @@ static int sym_prepare_nego(struct sym_hcb *np, struct sym_ccb *cp, u_char *msgp
1433 * Many devices implement PPR in a buggy way, so only use it if we 1433 * Many devices implement PPR in a buggy way, so only use it if we
1434 * really want to. 1434 * really want to.
1435 */ 1435 */
1436 if (goal->offset && 1436 if (goal->renego == NS_PPR || (goal->offset &&
1437 (goal->iu || goal->dt || goal->qas || (goal->period < 0xa))) { 1437 (goal->iu || goal->dt || goal->qas || (goal->period < 0xa)))) {
1438 nego = NS_PPR; 1438 nego = NS_PPR;
1439 } else if (spi_width(starget) != goal->width) { 1439 } else if (goal->renego == NS_WIDE || goal->width) {
1440 nego = NS_WIDE; 1440 nego = NS_WIDE;
1441 } else if (spi_period(starget) != goal->period || 1441 } else if (goal->renego == NS_SYNC || goal->offset) {
1442 spi_offset(starget) != goal->offset) {
1443 nego = NS_SYNC; 1442 nego = NS_SYNC;
1444 } else { 1443 } else {
1445 goal->check_nego = 0; 1444 goal->check_nego = 0;
@@ -2040,6 +2039,29 @@ static void sym_settrans(struct sym_hcb *np, int target, u_char opts, u_char ofs
2040 } 2039 }
2041} 2040}
2042 2041
2042static void sym_announce_transfer_rate(struct sym_tcb *tp)
2043{
2044 struct scsi_target *starget = tp->starget;
2045
2046 if (tp->tprint.period != spi_period(starget) ||
2047 tp->tprint.offset != spi_offset(starget) ||
2048 tp->tprint.width != spi_width(starget) ||
2049 tp->tprint.iu != spi_iu(starget) ||
2050 tp->tprint.dt != spi_dt(starget) ||
2051 tp->tprint.qas != spi_qas(starget) ||
2052 !tp->tprint.check_nego) {
2053 tp->tprint.period = spi_period(starget);
2054 tp->tprint.offset = spi_offset(starget);
2055 tp->tprint.width = spi_width(starget);
2056 tp->tprint.iu = spi_iu(starget);
2057 tp->tprint.dt = spi_dt(starget);
2058 tp->tprint.qas = spi_qas(starget);
2059 tp->tprint.check_nego = 1;
2060
2061 spi_display_xfer_agreement(starget);
2062 }
2063}
2064
2043/* 2065/*
2044 * We received a WDTR. 2066 * We received a WDTR.
2045 * Let everything be aware of the changes. 2067 * Let everything be aware of the changes.
@@ -2049,11 +2071,13 @@ static void sym_setwide(struct sym_hcb *np, int target, u_char wide)
2049 struct sym_tcb *tp = &np->target[target]; 2071 struct sym_tcb *tp = &np->target[target];
2050 struct scsi_target *starget = tp->starget; 2072 struct scsi_target *starget = tp->starget;
2051 2073
2052 if (spi_width(starget) == wide)
2053 return;
2054
2055 sym_settrans(np, target, 0, 0, 0, wide, 0, 0); 2074 sym_settrans(np, target, 0, 0, 0, wide, 0, 0);
2056 2075
2076 if (wide)
2077 tp->tgoal.renego = NS_WIDE;
2078 else
2079 tp->tgoal.renego = 0;
2080 tp->tgoal.check_nego = 0;
2057 tp->tgoal.width = wide; 2081 tp->tgoal.width = wide;
2058 spi_offset(starget) = 0; 2082 spi_offset(starget) = 0;
2059 spi_period(starget) = 0; 2083 spi_period(starget) = 0;
@@ -2063,7 +2087,7 @@ static void sym_setwide(struct sym_hcb *np, int target, u_char wide)
2063 spi_qas(starget) = 0; 2087 spi_qas(starget) = 0;
2064 2088
2065 if (sym_verbose >= 3) 2089 if (sym_verbose >= 3)
2066 spi_display_xfer_agreement(starget); 2090 sym_announce_transfer_rate(tp);
2067} 2091}
2068 2092
2069/* 2093/*
@@ -2080,6 +2104,12 @@ sym_setsync(struct sym_hcb *np, int target,
2080 2104
2081 sym_settrans(np, target, 0, ofs, per, wide, div, fak); 2105 sym_settrans(np, target, 0, ofs, per, wide, div, fak);
2082 2106
2107 if (wide)
2108 tp->tgoal.renego = NS_WIDE;
2109 else if (ofs)
2110 tp->tgoal.renego = NS_SYNC;
2111 else
2112 tp->tgoal.renego = 0;
2083 spi_period(starget) = per; 2113 spi_period(starget) = per;
2084 spi_offset(starget) = ofs; 2114 spi_offset(starget) = ofs;
2085 spi_iu(starget) = spi_dt(starget) = spi_qas(starget) = 0; 2115 spi_iu(starget) = spi_dt(starget) = spi_qas(starget) = 0;
@@ -2090,7 +2120,7 @@ sym_setsync(struct sym_hcb *np, int target,
2090 tp->tgoal.check_nego = 0; 2120 tp->tgoal.check_nego = 0;
2091 } 2121 }
2092 2122
2093 spi_display_xfer_agreement(starget); 2123 sym_announce_transfer_rate(tp);
2094} 2124}
2095 2125
2096/* 2126/*
@@ -2106,6 +2136,10 @@ sym_setpprot(struct sym_hcb *np, int target, u_char opts, u_char ofs,
2106 2136
2107 sym_settrans(np, target, opts, ofs, per, wide, div, fak); 2137 sym_settrans(np, target, opts, ofs, per, wide, div, fak);
2108 2138
2139 if (wide || ofs)
2140 tp->tgoal.renego = NS_PPR;
2141 else
2142 tp->tgoal.renego = 0;
2109 spi_width(starget) = tp->tgoal.width = wide; 2143 spi_width(starget) = tp->tgoal.width = wide;
2110 spi_period(starget) = tp->tgoal.period = per; 2144 spi_period(starget) = tp->tgoal.period = per;
2111 spi_offset(starget) = tp->tgoal.offset = ofs; 2145 spi_offset(starget) = tp->tgoal.offset = ofs;
@@ -2114,7 +2148,7 @@ sym_setpprot(struct sym_hcb *np, int target, u_char opts, u_char ofs,
2114 spi_qas(starget) = tp->tgoal.qas = !!(opts & PPR_OPT_QAS); 2148 spi_qas(starget) = tp->tgoal.qas = !!(opts & PPR_OPT_QAS);
2115 tp->tgoal.check_nego = 0; 2149 tp->tgoal.check_nego = 0;
2116 2150
2117 spi_display_xfer_agreement(starget); 2151 sym_announce_transfer_rate(tp);
2118} 2152}
2119 2153
2120/* 2154/*
@@ -3516,6 +3550,7 @@ static void sym_sir_task_recovery(struct sym_hcb *np, int num)
3516 spi_dt(starget) = 0; 3550 spi_dt(starget) = 0;
3517 spi_qas(starget) = 0; 3551 spi_qas(starget) = 0;
3518 tp->tgoal.check_nego = 1; 3552 tp->tgoal.check_nego = 1;
3553 tp->tgoal.renego = 0;
3519 } 3554 }
3520 3555
3521 /* 3556 /*
@@ -5135,9 +5170,14 @@ int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *
5135 /* 5170 /*
5136 * Build a negotiation message if needed. 5171 * Build a negotiation message if needed.
5137 * (nego_status is filled by sym_prepare_nego()) 5172 * (nego_status is filled by sym_prepare_nego())
5173 *
5174 * Always negotiate on INQUIRY and REQUEST SENSE.
5175 *
5138 */ 5176 */
5139 cp->nego_status = 0; 5177 cp->nego_status = 0;
5140 if (tp->tgoal.check_nego && !tp->nego_cp && lp) { 5178 if ((tp->tgoal.check_nego ||
5179 cmd->cmnd[0] == INQUIRY || cmd->cmnd[0] == REQUEST_SENSE) &&
5180 !tp->nego_cp && lp) {
5141 msglen += sym_prepare_nego(np, cp, msgptr + msglen); 5181 msglen += sym_prepare_nego(np, cp, msgptr + msglen);
5142 } 5182 }
5143 5183
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.h b/drivers/scsi/sym53c8xx_2/sym_hipd.h
index ad078805e62b..61d28fcfffbf 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.h
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.h
@@ -354,6 +354,7 @@ struct sym_trans {
354 unsigned int dt:1; 354 unsigned int dt:1;
355 unsigned int qas:1; 355 unsigned int qas:1;
356 unsigned int check_nego:1; 356 unsigned int check_nego:1;
357 unsigned int renego:2;
357}; 358};
358 359
359/* 360/*
@@ -419,6 +420,9 @@ struct sym_tcb {
419 /* Transfer goal */ 420 /* Transfer goal */
420 struct sym_trans tgoal; 421 struct sym_trans tgoal;
421 422
423 /* Last printed transfer speed */
424 struct sym_trans tprint;
425
422 /* 426 /*
423 * Keep track of the CCB used for the negotiation in order 427 * Keep track of the CCB used for the negotiation in order
424 * to ensure that only 1 negotiation is queued at a time. 428 * to ensure that only 1 negotiation is queued at a time.
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index b1b947edcf01..540a2948596c 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -53,11 +53,11 @@ config SSB_B43_PCI_BRIDGE
53 53
54config SSB_PCMCIAHOST_POSSIBLE 54config SSB_PCMCIAHOST_POSSIBLE
55 bool 55 bool
56 depends on SSB && (PCMCIA = y || PCMCIA = SSB) && EXPERIMENTAL 56 depends on SSB && (PCMCIA = y || PCMCIA = SSB)
57 default y 57 default y
58 58
59config SSB_PCMCIAHOST 59config SSB_PCMCIAHOST
60 bool "Support for SSB on PCMCIA-bus host (EXPERIMENTAL)" 60 bool "Support for SSB on PCMCIA-bus host"
61 depends on SSB_PCMCIAHOST_POSSIBLE 61 depends on SSB_PCMCIAHOST_POSSIBLE
62 select SSB_SPROM 62 select SSB_SPROM
63 help 63 help
@@ -107,14 +107,14 @@ config SSB_DRIVER_PCICORE
107 If unsure, say Y 107 If unsure, say Y
108 108
109config SSB_PCICORE_HOSTMODE 109config SSB_PCICORE_HOSTMODE
110 bool "Hostmode support for SSB PCI core (EXPERIMENTAL)" 110 bool "Hostmode support for SSB PCI core"
111 depends on SSB_DRIVER_PCICORE && SSB_DRIVER_MIPS && EXPERIMENTAL 111 depends on SSB_DRIVER_PCICORE && SSB_DRIVER_MIPS
112 help 112 help
113 PCIcore hostmode operation (external PCI bus). 113 PCIcore hostmode operation (external PCI bus).
114 114
115config SSB_DRIVER_MIPS 115config SSB_DRIVER_MIPS
116 bool "SSB Broadcom MIPS core driver (EXPERIMENTAL)" 116 bool "SSB Broadcom MIPS core driver"
117 depends on SSB && MIPS && EXPERIMENTAL 117 depends on SSB && MIPS
118 select SSB_SERIAL 118 select SSB_SERIAL
119 help 119 help
120 Driver for the Sonics Silicon Backplane attached 120 Driver for the Sonics Silicon Backplane attached
@@ -129,8 +129,8 @@ config SSB_EMBEDDED
129 default y 129 default y
130 130
131config SSB_DRIVER_EXTIF 131config SSB_DRIVER_EXTIF
132 bool "SSB Broadcom EXTIF core driver (EXPERIMENTAL)" 132 bool "SSB Broadcom EXTIF core driver"
133 depends on SSB_DRIVER_MIPS && EXPERIMENTAL 133 depends on SSB_DRIVER_MIPS
134 help 134 help
135 Driver for the Sonics Silicon Backplane attached 135 Driver for the Sonics Silicon Backplane attached
136 Broadcom EXTIF core. 136 Broadcom EXTIF core.
diff --git a/drivers/ssb/b43_pci_bridge.c b/drivers/ssb/b43_pci_bridge.c
index 27a677584a4c..ef9c6a04ad8f 100644
--- a/drivers/ssb/b43_pci_bridge.c
+++ b/drivers/ssb/b43_pci_bridge.c
@@ -18,6 +18,7 @@
18 18
19static const struct pci_device_id b43_pci_bridge_tbl[] = { 19static const struct pci_device_id b43_pci_bridge_tbl[] = {
20 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4301) }, 20 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4301) },
21 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4306) },
21 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4307) }, 22 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4307) },
22 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4311) }, 23 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4311) },
23 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4312) }, 24 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4312) },
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index d48c8553539d..49aedb36dc19 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -787,7 +787,7 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
787 /* Did we transfer less than the minimum amount required? */ 787 /* Did we transfer less than the minimum amount required? */
788 if ((srb->result == SAM_STAT_GOOD || srb->sense_buffer[2] == 0) && 788 if ((srb->result == SAM_STAT_GOOD || srb->sense_buffer[2] == 0) &&
789 scsi_bufflen(srb) - scsi_get_resid(srb) < srb->underflow) 789 scsi_bufflen(srb) - scsi_get_resid(srb) < srb->underflow)
790 srb->result = (DID_ERROR << 16) | (SUGGEST_RETRY << 24); 790 srb->result = DID_ERROR << 16;
791 791
792 last_sector_hacks(us, srb); 792 last_sector_hacks(us, srb);
793 return; 793 return;
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index c295ba786edd..f0c7de78e205 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -41,8 +41,8 @@ extern struct file_system_type v9fs_fs_type;
41extern const struct address_space_operations v9fs_addr_operations; 41extern const struct address_space_operations v9fs_addr_operations;
42extern const struct file_operations v9fs_file_operations; 42extern const struct file_operations v9fs_file_operations;
43extern const struct file_operations v9fs_dir_operations; 43extern const struct file_operations v9fs_dir_operations;
44extern struct dentry_operations v9fs_dentry_operations; 44extern const struct dentry_operations v9fs_dentry_operations;
45extern struct dentry_operations v9fs_cached_dentry_operations; 45extern const struct dentry_operations v9fs_cached_dentry_operations;
46 46
47struct inode *v9fs_get_inode(struct super_block *sb, int mode); 47struct inode *v9fs_get_inode(struct super_block *sb, int mode);
48ino_t v9fs_qid2ino(struct p9_qid *qid); 48ino_t v9fs_qid2ino(struct p9_qid *qid);
diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c
index 06dcc7c4f234..d74325295b1e 100644
--- a/fs/9p/vfs_dentry.c
+++ b/fs/9p/vfs_dentry.c
@@ -104,12 +104,12 @@ void v9fs_dentry_release(struct dentry *dentry)
104 } 104 }
105} 105}
106 106
107struct dentry_operations v9fs_cached_dentry_operations = { 107const struct dentry_operations v9fs_cached_dentry_operations = {
108 .d_delete = v9fs_cached_dentry_delete, 108 .d_delete = v9fs_cached_dentry_delete,
109 .d_release = v9fs_dentry_release, 109 .d_release = v9fs_dentry_release,
110}; 110};
111 111
112struct dentry_operations v9fs_dentry_operations = { 112const struct dentry_operations v9fs_dentry_operations = {
113 .d_delete = v9fs_dentry_delete, 113 .d_delete = v9fs_dentry_delete,
114 .d_release = v9fs_dentry_release, 114 .d_release = v9fs_dentry_release,
115}; 115};
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index 93212e40221a..5f8ab8adb5f5 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -168,8 +168,9 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
168 p9stat_free(st); 168 p9stat_free(st);
169 kfree(st); 169 kfree(st);
170 170
171P9_DPRINTK(P9_DEBUG_VFS, " return simple set mount\n"); 171P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n");
172 return simple_set_mnt(mnt, sb); 172 simple_set_mnt(mnt, sb);
173 return 0;
173 174
174release_sb: 175release_sb:
175 if (sb) { 176 if (sb) {
diff --git a/fs/Kconfig b/fs/Kconfig
index 93945dd0b1ae..cef8b18ceaa3 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -56,61 +56,7 @@ endif # BLOCK
56 56
57source "fs/notify/Kconfig" 57source "fs/notify/Kconfig"
58 58
59config QUOTA 59source "fs/quota/Kconfig"
60 bool "Quota support"
61 help
62 If you say Y here, you will be able to set per user limits for disk
63 usage (also called disk quotas). Currently, it works for the
64 ext2, ext3, and reiserfs file system. ext3 also supports journalled
65 quotas for which you don't need to run quotacheck(8) after an unclean
66 shutdown.
67 For further details, read the Quota mini-HOWTO, available from
68 <http://www.tldp.org/docs.html#howto>, or the documentation provided
69 with the quota tools. Probably the quota support is only useful for
70 multi user systems. If unsure, say N.
71
72config QUOTA_NETLINK_INTERFACE
73 bool "Report quota messages through netlink interface"
74 depends on QUOTA && NET
75 help
76 If you say Y here, quota warnings (about exceeding softlimit, reaching
77 hardlimit, etc.) will be reported through netlink interface. If unsure,
78 say Y.
79
80config PRINT_QUOTA_WARNING
81 bool "Print quota warnings to console (OBSOLETE)"
82 depends on QUOTA
83 default y
84 help
85 If you say Y here, quota warnings (about exceeding softlimit, reaching
86 hardlimit, etc.) will be printed to the process' controlling terminal.
87 Note that this behavior is currently deprecated and may go away in
88 future. Please use notification via netlink socket instead.
89
90# Generic support for tree structured quota files. Seleted when needed.
91config QUOTA_TREE
92 tristate
93
94config QFMT_V1
95 tristate "Old quota format support"
96 depends on QUOTA
97 help
98 This quota format was (is) used by kernels earlier than 2.4.22. If
99 you have quota working and you don't want to convert to new quota
100 format say Y here.
101
102config QFMT_V2
103 tristate "Quota format v2 support"
104 depends on QUOTA
105 select QUOTA_TREE
106 help
107 This quota format allows using quotas with 32-bit UIDs/GIDs. If you
108 need this functionality say Y here.
109
110config QUOTACTL
111 bool
112 depends on XFS_QUOTA || QUOTA
113 default y
114 60
115source "fs/autofs/Kconfig" 61source "fs/autofs/Kconfig"
116source "fs/autofs4/Kconfig" 62source "fs/autofs4/Kconfig"
diff --git a/fs/Makefile b/fs/Makefile
index dc20db348679..6e82a307bcd4 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -51,11 +51,7 @@ obj-$(CONFIG_FS_POSIX_ACL) += posix_acl.o xattr_acl.o
51obj-$(CONFIG_NFS_COMMON) += nfs_common/ 51obj-$(CONFIG_NFS_COMMON) += nfs_common/
52obj-$(CONFIG_GENERIC_ACL) += generic_acl.o 52obj-$(CONFIG_GENERIC_ACL) += generic_acl.o
53 53
54obj-$(CONFIG_QUOTA) += dquot.o 54obj-y += quota/
55obj-$(CONFIG_QFMT_V1) += quota_v1.o
56obj-$(CONFIG_QFMT_V2) += quota_v2.o
57obj-$(CONFIG_QUOTA_TREE) += quota_tree.o
58obj-$(CONFIG_QUOTACTL) += quota.o
59 55
60obj-$(CONFIG_PROC_FS) += proc/ 56obj-$(CONFIG_PROC_FS) += proc/
61obj-y += partitions/ 57obj-y += partitions/
diff --git a/fs/adfs/adfs.h b/fs/adfs/adfs.h
index 831157502d5a..e0a85dbeeb88 100644
--- a/fs/adfs/adfs.h
+++ b/fs/adfs/adfs.h
@@ -86,7 +86,7 @@ void __adfs_error(struct super_block *sb, const char *function,
86/* dir_*.c */ 86/* dir_*.c */
87extern const struct inode_operations adfs_dir_inode_operations; 87extern const struct inode_operations adfs_dir_inode_operations;
88extern const struct file_operations adfs_dir_operations; 88extern const struct file_operations adfs_dir_operations;
89extern struct dentry_operations adfs_dentry_operations; 89extern const struct dentry_operations adfs_dentry_operations;
90extern struct adfs_dir_ops adfs_f_dir_ops; 90extern struct adfs_dir_ops adfs_f_dir_ops;
91extern struct adfs_dir_ops adfs_fplus_dir_ops; 91extern struct adfs_dir_ops adfs_fplus_dir_ops;
92 92
diff --git a/fs/adfs/dir.c b/fs/adfs/dir.c
index 85a30e929800..e867ccf37246 100644
--- a/fs/adfs/dir.c
+++ b/fs/adfs/dir.c
@@ -263,7 +263,7 @@ adfs_compare(struct dentry *parent, struct qstr *entry, struct qstr *name)
263 return 0; 263 return 0;
264} 264}
265 265
266struct dentry_operations adfs_dentry_operations = { 266const struct dentry_operations adfs_dentry_operations = {
267 .d_hash = adfs_hash, 267 .d_hash = adfs_hash,
268 .d_compare = adfs_compare, 268 .d_compare = adfs_compare,
269}; 269};
diff --git a/fs/affs/affs.h b/fs/affs/affs.h
index e9ec915f7553..1a2d5e3c7f4e 100644
--- a/fs/affs/affs.h
+++ b/fs/affs/affs.h
@@ -199,8 +199,7 @@ extern const struct address_space_operations affs_symlink_aops;
199extern const struct address_space_operations affs_aops; 199extern const struct address_space_operations affs_aops;
200extern const struct address_space_operations affs_aops_ofs; 200extern const struct address_space_operations affs_aops_ofs;
201 201
202extern struct dentry_operations affs_dentry_operations; 202extern const struct dentry_operations affs_dentry_operations;
203extern struct dentry_operations affs_dentry_operations_intl;
204 203
205static inline void 204static inline void
206affs_set_blocksize(struct super_block *sb, int size) 205affs_set_blocksize(struct super_block *sb, int size)
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
index 805573005de6..7d0f0a30f7a3 100644
--- a/fs/affs/amigaffs.c
+++ b/fs/affs/amigaffs.c
@@ -179,14 +179,18 @@ affs_remove_link(struct dentry *dentry)
179 affs_lock_dir(dir); 179 affs_lock_dir(dir);
180 affs_fix_dcache(dentry, link_ino); 180 affs_fix_dcache(dentry, link_ino);
181 retval = affs_remove_hash(dir, link_bh); 181 retval = affs_remove_hash(dir, link_bh);
182 if (retval) 182 if (retval) {
183 affs_unlock_dir(dir);
183 goto done; 184 goto done;
185 }
184 mark_buffer_dirty_inode(link_bh, inode); 186 mark_buffer_dirty_inode(link_bh, inode);
185 187
186 memcpy(AFFS_TAIL(sb, bh)->name, AFFS_TAIL(sb, link_bh)->name, 32); 188 memcpy(AFFS_TAIL(sb, bh)->name, AFFS_TAIL(sb, link_bh)->name, 32);
187 retval = affs_insert_hash(dir, bh); 189 retval = affs_insert_hash(dir, bh);
188 if (retval) 190 if (retval) {
191 affs_unlock_dir(dir);
189 goto done; 192 goto done;
193 }
190 mark_buffer_dirty_inode(bh, inode); 194 mark_buffer_dirty_inode(bh, inode);
191 195
192 affs_unlock_dir(dir); 196 affs_unlock_dir(dir);
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index cfcf1b6cf82b..960d336ec694 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -19,12 +19,12 @@ static int affs_intl_toupper(int ch);
19static int affs_intl_hash_dentry(struct dentry *, struct qstr *); 19static int affs_intl_hash_dentry(struct dentry *, struct qstr *);
20static int affs_intl_compare_dentry(struct dentry *, struct qstr *, struct qstr *); 20static int affs_intl_compare_dentry(struct dentry *, struct qstr *, struct qstr *);
21 21
22struct dentry_operations affs_dentry_operations = { 22const struct dentry_operations affs_dentry_operations = {
23 .d_hash = affs_hash_dentry, 23 .d_hash = affs_hash_dentry,
24 .d_compare = affs_compare_dentry, 24 .d_compare = affs_compare_dentry,
25}; 25};
26 26
27static struct dentry_operations affs_intl_dentry_operations = { 27static const struct dentry_operations affs_intl_dentry_operations = {
28 .d_hash = affs_intl_hash_dentry, 28 .d_hash = affs_intl_hash_dentry,
29 .d_compare = affs_intl_compare_dentry, 29 .d_compare = affs_intl_compare_dentry,
30}; 30};
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 99cf390641f7..9bd757774c9e 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -62,7 +62,7 @@ const struct inode_operations afs_dir_inode_operations = {
62 .setattr = afs_setattr, 62 .setattr = afs_setattr,
63}; 63};
64 64
65static struct dentry_operations afs_fs_dentry_operations = { 65static const struct dentry_operations afs_fs_dentry_operations = {
66 .d_revalidate = afs_d_revalidate, 66 .d_revalidate = afs_d_revalidate,
67 .d_delete = afs_d_delete, 67 .d_delete = afs_d_delete,
68 .d_release = afs_d_release, 68 .d_release = afs_d_release,
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index 3bbdb9d02376..1dd96d4406c0 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -48,7 +48,7 @@ static struct file_system_type anon_inode_fs_type = {
48 .get_sb = anon_inodefs_get_sb, 48 .get_sb = anon_inodefs_get_sb,
49 .kill_sb = kill_anon_super, 49 .kill_sb = kill_anon_super,
50}; 50};
51static struct dentry_operations anon_inodefs_dentry_operations = { 51static const struct dentry_operations anon_inodefs_dentry_operations = {
52 .d_delete = anon_inodefs_delete_dentry, 52 .d_delete = anon_inodefs_delete_dentry,
53}; 53};
54 54
diff --git a/fs/attr.c b/fs/attr.c
index f4360192a938..9fe1b1bd30a8 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -173,7 +173,8 @@ int notify_change(struct dentry * dentry, struct iattr * attr)
173 if (!error) { 173 if (!error) {
174 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || 174 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
175 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) 175 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid))
176 error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0; 176 error = vfs_dq_transfer(inode, attr) ?
177 -EDQUOT : 0;
177 if (!error) 178 if (!error)
178 error = inode_setattr(inode, attr); 179 error = inode_setattr(inode, attr);
179 } 180 }
diff --git a/fs/autofs/root.c b/fs/autofs/root.c
index 8aacade56956..4a1401cea0a1 100644
--- a/fs/autofs/root.c
+++ b/fs/autofs/root.c
@@ -192,7 +192,7 @@ static int autofs_revalidate(struct dentry * dentry, struct nameidata *nd)
192 return 1; 192 return 1;
193} 193}
194 194
195static struct dentry_operations autofs_dentry_operations = { 195static const struct dentry_operations autofs_dentry_operations = {
196 .d_revalidate = autofs_revalidate, 196 .d_revalidate = autofs_revalidate,
197}; 197};
198 198
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index 716e12b627b2..69c8142da838 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -310,7 +310,7 @@ static struct autofs_info *autofs4_mkroot(struct autofs_sb_info *sbi)
310 return ino; 310 return ino;
311} 311}
312 312
313static struct dentry_operations autofs4_sb_dentry_operations = { 313static const struct dentry_operations autofs4_sb_dentry_operations = {
314 .d_release = autofs4_dentry_release, 314 .d_release = autofs4_dentry_release,
315}; 315};
316 316
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 2a41c2a7fc52..74b1469a9504 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -349,13 +349,13 @@ void autofs4_dentry_release(struct dentry *de)
349} 349}
350 350
351/* For dentries of directories in the root dir */ 351/* For dentries of directories in the root dir */
352static struct dentry_operations autofs4_root_dentry_operations = { 352static const struct dentry_operations autofs4_root_dentry_operations = {
353 .d_revalidate = autofs4_revalidate, 353 .d_revalidate = autofs4_revalidate,
354 .d_release = autofs4_dentry_release, 354 .d_release = autofs4_dentry_release,
355}; 355};
356 356
357/* For other dentries */ 357/* For other dentries */
358static struct dentry_operations autofs4_dentry_operations = { 358static const struct dentry_operations autofs4_dentry_operations = {
359 .d_revalidate = autofs4_revalidate, 359 .d_revalidate = autofs4_revalidate,
360 .d_release = autofs4_dentry_release, 360 .d_release = autofs4_dentry_release,
361}; 361};
diff --git a/fs/block_dev.c b/fs/block_dev.c
index b3c1efff5e1d..8c3c6899ccf3 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -18,6 +18,7 @@
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/blkpg.h> 19#include <linux/blkpg.h>
20#include <linux/buffer_head.h> 20#include <linux/buffer_head.h>
21#include <linux/pagevec.h>
21#include <linux/writeback.h> 22#include <linux/writeback.h>
22#include <linux/mpage.h> 23#include <linux/mpage.h>
23#include <linux/mount.h> 24#include <linux/mount.h>
@@ -174,6 +175,151 @@ blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
174 iov, offset, nr_segs, blkdev_get_blocks, NULL); 175 iov, offset, nr_segs, blkdev_get_blocks, NULL);
175} 176}
176 177
178/*
179 * Write out and wait upon all the dirty data associated with a block
180 * device via its mapping. Does not take the superblock lock.
181 */
182int sync_blockdev(struct block_device *bdev)
183{
184 int ret = 0;
185
186 if (bdev)
187 ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
188 return ret;
189}
190EXPORT_SYMBOL(sync_blockdev);
191
192/*
193 * Write out and wait upon all dirty data associated with this
194 * device. Filesystem data as well as the underlying block
195 * device. Takes the superblock lock.
196 */
197int fsync_bdev(struct block_device *bdev)
198{
199 struct super_block *sb = get_super(bdev);
200 if (sb) {
201 int res = fsync_super(sb);
202 drop_super(sb);
203 return res;
204 }
205 return sync_blockdev(bdev);
206}
207
208/**
209 * freeze_bdev -- lock a filesystem and force it into a consistent state
210 * @bdev: blockdevice to lock
211 *
212 * This takes the block device bd_mount_sem to make sure no new mounts
213 * happen on bdev until thaw_bdev() is called.
214 * If a superblock is found on this device, we take the s_umount semaphore
215 * on it to make sure nobody unmounts until the snapshot creation is done.
216 * The reference counter (bd_fsfreeze_count) guarantees that only the last
217 * unfreeze process can unfreeze the frozen filesystem actually when multiple
218 * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
219 * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
220 * actually.
221 */
222struct super_block *freeze_bdev(struct block_device *bdev)
223{
224 struct super_block *sb;
225 int error = 0;
226
227 mutex_lock(&bdev->bd_fsfreeze_mutex);
228 if (bdev->bd_fsfreeze_count > 0) {
229 bdev->bd_fsfreeze_count++;
230 sb = get_super(bdev);
231 mutex_unlock(&bdev->bd_fsfreeze_mutex);
232 return sb;
233 }
234 bdev->bd_fsfreeze_count++;
235
236 down(&bdev->bd_mount_sem);
237 sb = get_super(bdev);
238 if (sb && !(sb->s_flags & MS_RDONLY)) {
239 sb->s_frozen = SB_FREEZE_WRITE;
240 smp_wmb();
241
242 __fsync_super(sb);
243
244 sb->s_frozen = SB_FREEZE_TRANS;
245 smp_wmb();
246
247 sync_blockdev(sb->s_bdev);
248
249 if (sb->s_op->freeze_fs) {
250 error = sb->s_op->freeze_fs(sb);
251 if (error) {
252 printk(KERN_ERR
253 "VFS:Filesystem freeze failed\n");
254 sb->s_frozen = SB_UNFROZEN;
255 drop_super(sb);
256 up(&bdev->bd_mount_sem);
257 bdev->bd_fsfreeze_count--;
258 mutex_unlock(&bdev->bd_fsfreeze_mutex);
259 return ERR_PTR(error);
260 }
261 }
262 }
263
264 sync_blockdev(bdev);
265 mutex_unlock(&bdev->bd_fsfreeze_mutex);
266
267 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
268}
269EXPORT_SYMBOL(freeze_bdev);
270
271/**
272 * thaw_bdev -- unlock filesystem
273 * @bdev: blockdevice to unlock
274 * @sb: associated superblock
275 *
276 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
277 */
278int thaw_bdev(struct block_device *bdev, struct super_block *sb)
279{
280 int error = 0;
281
282 mutex_lock(&bdev->bd_fsfreeze_mutex);
283 if (!bdev->bd_fsfreeze_count) {
284 mutex_unlock(&bdev->bd_fsfreeze_mutex);
285 return -EINVAL;
286 }
287
288 bdev->bd_fsfreeze_count--;
289 if (bdev->bd_fsfreeze_count > 0) {
290 if (sb)
291 drop_super(sb);
292 mutex_unlock(&bdev->bd_fsfreeze_mutex);
293 return 0;
294 }
295
296 if (sb) {
297 BUG_ON(sb->s_bdev != bdev);
298 if (!(sb->s_flags & MS_RDONLY)) {
299 if (sb->s_op->unfreeze_fs) {
300 error = sb->s_op->unfreeze_fs(sb);
301 if (error) {
302 printk(KERN_ERR
303 "VFS:Filesystem thaw failed\n");
304 sb->s_frozen = SB_FREEZE_TRANS;
305 bdev->bd_fsfreeze_count++;
306 mutex_unlock(&bdev->bd_fsfreeze_mutex);
307 return error;
308 }
309 }
310 sb->s_frozen = SB_UNFROZEN;
311 smp_wmb();
312 wake_up(&sb->s_wait_unfrozen);
313 }
314 drop_super(sb);
315 }
316
317 up(&bdev->bd_mount_sem);
318 mutex_unlock(&bdev->bd_fsfreeze_mutex);
319 return 0;
320}
321EXPORT_SYMBOL(thaw_bdev);
322
177static int blkdev_writepage(struct page *page, struct writeback_control *wbc) 323static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
178{ 324{
179 return block_write_full_page(page, blkdev_get_block, wbc); 325 return block_write_full_page(page, blkdev_get_block, wbc);
diff --git a/fs/buffer.c b/fs/buffer.c
index 891e1c78e4f1..a2fd743d97cb 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -166,151 +166,6 @@ void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
166} 166}
167 167
168/* 168/*
169 * Write out and wait upon all the dirty data associated with a block
170 * device via its mapping. Does not take the superblock lock.
171 */
172int sync_blockdev(struct block_device *bdev)
173{
174 int ret = 0;
175
176 if (bdev)
177 ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
178 return ret;
179}
180EXPORT_SYMBOL(sync_blockdev);
181
182/*
183 * Write out and wait upon all dirty data associated with this
184 * device. Filesystem data as well as the underlying block
185 * device. Takes the superblock lock.
186 */
187int fsync_bdev(struct block_device *bdev)
188{
189 struct super_block *sb = get_super(bdev);
190 if (sb) {
191 int res = fsync_super(sb);
192 drop_super(sb);
193 return res;
194 }
195 return sync_blockdev(bdev);
196}
197
198/**
199 * freeze_bdev -- lock a filesystem and force it into a consistent state
200 * @bdev: blockdevice to lock
201 *
202 * This takes the block device bd_mount_sem to make sure no new mounts
203 * happen on bdev until thaw_bdev() is called.
204 * If a superblock is found on this device, we take the s_umount semaphore
205 * on it to make sure nobody unmounts until the snapshot creation is done.
206 * The reference counter (bd_fsfreeze_count) guarantees that only the last
207 * unfreeze process can unfreeze the frozen filesystem actually when multiple
208 * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
209 * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
210 * actually.
211 */
212struct super_block *freeze_bdev(struct block_device *bdev)
213{
214 struct super_block *sb;
215 int error = 0;
216
217 mutex_lock(&bdev->bd_fsfreeze_mutex);
218 if (bdev->bd_fsfreeze_count > 0) {
219 bdev->bd_fsfreeze_count++;
220 sb = get_super(bdev);
221 mutex_unlock(&bdev->bd_fsfreeze_mutex);
222 return sb;
223 }
224 bdev->bd_fsfreeze_count++;
225
226 down(&bdev->bd_mount_sem);
227 sb = get_super(bdev);
228 if (sb && !(sb->s_flags & MS_RDONLY)) {
229 sb->s_frozen = SB_FREEZE_WRITE;
230 smp_wmb();
231
232 __fsync_super(sb);
233
234 sb->s_frozen = SB_FREEZE_TRANS;
235 smp_wmb();
236
237 sync_blockdev(sb->s_bdev);
238
239 if (sb->s_op->freeze_fs) {
240 error = sb->s_op->freeze_fs(sb);
241 if (error) {
242 printk(KERN_ERR
243 "VFS:Filesystem freeze failed\n");
244 sb->s_frozen = SB_UNFROZEN;
245 drop_super(sb);
246 up(&bdev->bd_mount_sem);
247 bdev->bd_fsfreeze_count--;
248 mutex_unlock(&bdev->bd_fsfreeze_mutex);
249 return ERR_PTR(error);
250 }
251 }
252 }
253
254 sync_blockdev(bdev);
255 mutex_unlock(&bdev->bd_fsfreeze_mutex);
256
257 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
258}
259EXPORT_SYMBOL(freeze_bdev);
260
261/**
262 * thaw_bdev -- unlock filesystem
263 * @bdev: blockdevice to unlock
264 * @sb: associated superblock
265 *
266 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
267 */
268int thaw_bdev(struct block_device *bdev, struct super_block *sb)
269{
270 int error = 0;
271
272 mutex_lock(&bdev->bd_fsfreeze_mutex);
273 if (!bdev->bd_fsfreeze_count) {
274 mutex_unlock(&bdev->bd_fsfreeze_mutex);
275 return -EINVAL;
276 }
277
278 bdev->bd_fsfreeze_count--;
279 if (bdev->bd_fsfreeze_count > 0) {
280 if (sb)
281 drop_super(sb);
282 mutex_unlock(&bdev->bd_fsfreeze_mutex);
283 return 0;
284 }
285
286 if (sb) {
287 BUG_ON(sb->s_bdev != bdev);
288 if (!(sb->s_flags & MS_RDONLY)) {
289 if (sb->s_op->unfreeze_fs) {
290 error = sb->s_op->unfreeze_fs(sb);
291 if (error) {
292 printk(KERN_ERR
293 "VFS:Filesystem thaw failed\n");
294 sb->s_frozen = SB_FREEZE_TRANS;
295 bdev->bd_fsfreeze_count++;
296 mutex_unlock(&bdev->bd_fsfreeze_mutex);
297 return error;
298 }
299 }
300 sb->s_frozen = SB_UNFROZEN;
301 smp_wmb();
302 wake_up(&sb->s_wait_unfrozen);
303 }
304 drop_super(sb);
305 }
306
307 up(&bdev->bd_mount_sem);
308 mutex_unlock(&bdev->bd_fsfreeze_mutex);
309 return 0;
310}
311EXPORT_SYMBOL(thaw_bdev);
312
313/*
314 * Various filesystems appear to want __find_get_block to be non-blocking. 169 * Various filesystems appear to want __find_get_block to be non-blocking.
315 * But it's the page lock which protects the buffers. To get around this, 170 * But it's the page lock which protects the buffers. To get around this,
316 * we get exclusion from try_to_free_buffers with the blockdev mapping's 171 * we get exclusion from try_to_free_buffers with the blockdev mapping's
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 13ea53251dcf..38491fd3871d 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -606,7 +606,8 @@ cifs_get_sb(struct file_system_type *fs_type,
606 return rc; 606 return rc;
607 } 607 }
608 sb->s_flags |= MS_ACTIVE; 608 sb->s_flags |= MS_ACTIVE;
609 return simple_set_mnt(mnt, sb); 609 simple_set_mnt(mnt, sb);
610 return 0;
610} 611}
611 612
612static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov, 613static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 2b1d28a9ee28..77e190dc2883 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -78,8 +78,8 @@ extern int cifs_dir_open(struct inode *inode, struct file *file);
78extern int cifs_readdir(struct file *file, void *direntry, filldir_t filldir); 78extern int cifs_readdir(struct file *file, void *direntry, filldir_t filldir);
79 79
80/* Functions related to dir entries */ 80/* Functions related to dir entries */
81extern struct dentry_operations cifs_dentry_ops; 81extern const struct dentry_operations cifs_dentry_ops;
82extern struct dentry_operations cifs_ci_dentry_ops; 82extern const struct dentry_operations cifs_ci_dentry_ops;
83 83
84/* Functions related to symlinks */ 84/* Functions related to symlinks */
85extern void *cifs_follow_link(struct dentry *direntry, struct nameidata *nd); 85extern void *cifs_follow_link(struct dentry *direntry, struct nameidata *nd);
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index f9b6f68be976..2f35cccfcd8d 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -701,7 +701,7 @@ cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd)
701 return rc; 701 return rc;
702} */ 702} */
703 703
704struct dentry_operations cifs_dentry_ops = { 704const struct dentry_operations cifs_dentry_ops = {
705 .d_revalidate = cifs_d_revalidate, 705 .d_revalidate = cifs_d_revalidate,
706/* d_delete: cifs_d_delete, */ /* not needed except for debugging */ 706/* d_delete: cifs_d_delete, */ /* not needed except for debugging */
707}; 707};
@@ -739,7 +739,7 @@ static int cifs_ci_compare(struct dentry *dentry, struct qstr *a,
739 return 1; 739 return 1;
740} 740}
741 741
742struct dentry_operations cifs_ci_dentry_ops = { 742const struct dentry_operations cifs_ci_dentry_ops = {
743 .d_revalidate = cifs_d_revalidate, 743 .d_revalidate = cifs_d_revalidate,
744 .d_hash = cifs_ci_hash, 744 .d_hash = cifs_ci_hash,
745 .d_compare = cifs_ci_compare, 745 .d_compare = cifs_ci_compare,
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index 75b1fa90b2cb..4bb9d0a5decc 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -59,7 +59,7 @@ static int coda_return_EIO(void)
59} 59}
60#define CODA_EIO_ERROR ((void *) (coda_return_EIO)) 60#define CODA_EIO_ERROR ((void *) (coda_return_EIO))
61 61
62static struct dentry_operations coda_dentry_operations = 62static const struct dentry_operations coda_dentry_operations =
63{ 63{
64 .d_revalidate = coda_dentry_revalidate, 64 .d_revalidate = coda_dentry_revalidate,
65 .d_delete = coda_dentry_delete, 65 .d_delete = coda_dentry_delete,
diff --git a/fs/compat.c b/fs/compat.c
index 0949b43794a4..5e374aad33f7 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -378,6 +378,34 @@ out:
378 return error; 378 return error;
379} 379}
380 380
381/*
382 * This is a copy of sys_ustat, just dealing with a structure layout.
383 * Given how simple this syscall is that apporach is more maintainable
384 * than the various conversion hacks.
385 */
386asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u)
387{
388 struct super_block *sb;
389 struct compat_ustat tmp;
390 struct kstatfs sbuf;
391 int err;
392
393 sb = user_get_super(new_decode_dev(dev));
394 if (!sb)
395 return -EINVAL;
396 err = vfs_statfs(sb->s_root, &sbuf);
397 drop_super(sb);
398 if (err)
399 return err;
400
401 memset(&tmp, 0, sizeof(struct compat_ustat));
402 tmp.f_tfree = sbuf.f_bfree;
403 tmp.f_tinode = sbuf.f_ffree;
404 if (copy_to_user(u, &tmp, sizeof(struct compat_ustat)))
405 return -EFAULT;
406 return 0;
407}
408
381static int get_compat_flock(struct flock *kfl, struct compat_flock __user *ufl) 409static int get_compat_flock(struct flock *kfl, struct compat_flock __user *ufl)
382{ 410{
383 if (!access_ok(VERIFY_READ, ufl, sizeof(*ufl)) || 411 if (!access_ok(VERIFY_READ, ufl, sizeof(*ufl)) ||
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 8e93341f3e82..05373db21a4e 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -72,7 +72,7 @@ static int configfs_d_delete(struct dentry *dentry)
72 return 1; 72 return 1;
73} 73}
74 74
75static struct dentry_operations configfs_dentry_ops = { 75static const struct dentry_operations configfs_dentry_ops = {
76 .d_iput = configfs_d_iput, 76 .d_iput = configfs_d_iput,
77 /* simple_delete_dentry() isn't exported */ 77 /* simple_delete_dentry() isn't exported */
78 .d_delete = configfs_d_delete, 78 .d_delete = configfs_d_delete,
diff --git a/fs/dcache.c b/fs/dcache.c
index 07e2d4a44bda..90bbd7e1b116 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1247,15 +1247,18 @@ struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
1247 struct dentry *found; 1247 struct dentry *found;
1248 struct dentry *new; 1248 struct dentry *new;
1249 1249
1250 /* Does a dentry matching the name exist already? */ 1250 /*
1251 * First check if a dentry matching the name already exists,
1252 * if not go ahead and create it now.
1253 */
1251 found = d_hash_and_lookup(dentry->d_parent, name); 1254 found = d_hash_and_lookup(dentry->d_parent, name);
1252 /* If not, create it now and return */
1253 if (!found) { 1255 if (!found) {
1254 new = d_alloc(dentry->d_parent, name); 1256 new = d_alloc(dentry->d_parent, name);
1255 if (!new) { 1257 if (!new) {
1256 error = -ENOMEM; 1258 error = -ENOMEM;
1257 goto err_out; 1259 goto err_out;
1258 } 1260 }
1261
1259 found = d_splice_alias(inode, new); 1262 found = d_splice_alias(inode, new);
1260 if (found) { 1263 if (found) {
1261 dput(new); 1264 dput(new);
@@ -1263,61 +1266,46 @@ struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
1263 } 1266 }
1264 return new; 1267 return new;
1265 } 1268 }
1266 /* Matching dentry exists, check if it is negative. */ 1269
1270 /*
1271 * If a matching dentry exists, and it's not negative use it.
1272 *
1273 * Decrement the reference count to balance the iget() done
1274 * earlier on.
1275 */
1267 if (found->d_inode) { 1276 if (found->d_inode) {
1268 if (unlikely(found->d_inode != inode)) { 1277 if (unlikely(found->d_inode != inode)) {
1269 /* This can't happen because bad inodes are unhashed. */ 1278 /* This can't happen because bad inodes are unhashed. */
1270 BUG_ON(!is_bad_inode(inode)); 1279 BUG_ON(!is_bad_inode(inode));
1271 BUG_ON(!is_bad_inode(found->d_inode)); 1280 BUG_ON(!is_bad_inode(found->d_inode));
1272 } 1281 }
1273 /*
1274 * Already have the inode and the dentry attached, decrement
1275 * the reference count to balance the iget() done
1276 * earlier on. We found the dentry using d_lookup() so it
1277 * cannot be disconnected and thus we do not need to worry
1278 * about any NFS/disconnectedness issues here.
1279 */
1280 iput(inode); 1282 iput(inode);
1281 return found; 1283 return found;
1282 } 1284 }
1285
1283 /* 1286 /*
1284 * Negative dentry: instantiate it unless the inode is a directory and 1287 * Negative dentry: instantiate it unless the inode is a directory and
1285 * has a 'disconnected' dentry (i.e. IS_ROOT and DCACHE_DISCONNECTED), 1288 * already has a dentry.
1286 * in which case d_move() that in place of the found dentry.
1287 */ 1289 */
1288 if (!S_ISDIR(inode->i_mode)) {
1289 /* Not a directory; everything is easy. */
1290 d_instantiate(found, inode);
1291 return found;
1292 }
1293 spin_lock(&dcache_lock); 1290 spin_lock(&dcache_lock);
1294 if (list_empty(&inode->i_dentry)) { 1291 if (!S_ISDIR(inode->i_mode) || list_empty(&inode->i_dentry)) {
1295 /*
1296 * Directory without a 'disconnected' dentry; we need to do
1297 * d_instantiate() by hand because it takes dcache_lock which
1298 * we already hold.
1299 */
1300 __d_instantiate(found, inode); 1292 __d_instantiate(found, inode);
1301 spin_unlock(&dcache_lock); 1293 spin_unlock(&dcache_lock);
1302 security_d_instantiate(found, inode); 1294 security_d_instantiate(found, inode);
1303 return found; 1295 return found;
1304 } 1296 }
1297
1305 /* 1298 /*
1306 * Directory with a 'disconnected' dentry; get a reference to the 1299 * In case a directory already has a (disconnected) entry grab a
1307 * 'disconnected' dentry. 1300 * reference to it, move it in place and use it.
1308 */ 1301 */
1309 new = list_entry(inode->i_dentry.next, struct dentry, d_alias); 1302 new = list_entry(inode->i_dentry.next, struct dentry, d_alias);
1310 dget_locked(new); 1303 dget_locked(new);
1311 spin_unlock(&dcache_lock); 1304 spin_unlock(&dcache_lock);
1312 /* Do security vodoo. */
1313 security_d_instantiate(found, inode); 1305 security_d_instantiate(found, inode);
1314 /* Move new in place of found. */
1315 d_move(new, found); 1306 d_move(new, found);
1316 /* Balance the iget() we did above. */
1317 iput(inode); 1307 iput(inode);
1318 /* Throw away found. */
1319 dput(found); 1308 dput(found);
1320 /* Use new as the actual dentry. */
1321 return new; 1309 return new;
1322 1310
1323err_out: 1311err_out:
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index bff4052b05e7..63a4a59e4148 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -322,177 +322,81 @@ static int compare_init_pts_sb(struct super_block *s, void *p)
322} 322}
323 323
324/* 324/*
325 * Safely parse the mount options in @data and update @opts. 325 * devpts_get_sb()
326 * 326 *
327 * devpts ends up parsing options two times during mount, due to the 327 * If the '-o newinstance' mount option was specified, mount a new
328 * two modes of operation it supports. The first parse occurs in 328 * (private) instance of devpts. PTYs created in this instance are
329 * devpts_get_sb() when determining the mode (single-instance or 329 * independent of the PTYs in other devpts instances.
330 * multi-instance mode). The second parse happens in devpts_remount()
331 * or new_pts_mount() depending on the mode.
332 * 330 *
333 * Parsing of options modifies the @data making subsequent parsing 331 * If the '-o newinstance' option was not specified, mount/remount the
334 * incorrect. So make a local copy of @data and parse it. 332 * initial kernel mount of devpts. This type of mount gives the
333 * legacy, single-instance semantics.
335 * 334 *
336 * Return: 0 On success, -errno on error 335 * The 'newinstance' option is needed to support multiple namespace
337 */ 336 * semantics in devpts while preserving backward compatibility of the
338static int safe_parse_mount_options(void *data, struct pts_mount_opts *opts) 337 * current 'single-namespace' semantics. i.e all mounts of devpts
339{ 338 * without the 'newinstance' mount option should bind to the initial
340 int rc; 339 * kernel mount, like get_sb_single().
341 void *datacp;
342
343 if (!data)
344 return 0;
345
346 /* Use kstrdup() ? */
347 datacp = kmalloc(PAGE_SIZE, GFP_KERNEL);
348 if (!datacp)
349 return -ENOMEM;
350
351 memcpy(datacp, data, PAGE_SIZE);
352 rc = parse_mount_options((char *)datacp, PARSE_MOUNT, opts);
353 kfree(datacp);
354
355 return rc;
356}
357
358/*
359 * Mount a new (private) instance of devpts. PTYs created in this
360 * instance are independent of the PTYs in other devpts instances.
361 */
362static int new_pts_mount(struct file_system_type *fs_type, int flags,
363 void *data, struct vfsmount *mnt)
364{
365 int err;
366 struct pts_fs_info *fsi;
367 struct pts_mount_opts *opts;
368
369 err = get_sb_nodev(fs_type, flags, data, devpts_fill_super, mnt);
370 if (err)
371 return err;
372
373 fsi = DEVPTS_SB(mnt->mnt_sb);
374 opts = &fsi->mount_opts;
375
376 err = parse_mount_options(data, PARSE_MOUNT, opts);
377 if (err)
378 goto fail;
379
380 err = mknod_ptmx(mnt->mnt_sb);
381 if (err)
382 goto fail;
383
384 return 0;
385
386fail:
387 dput(mnt->mnt_sb->s_root);
388 deactivate_super(mnt->mnt_sb);
389 return err;
390}
391
392/*
393 * Check if 'newinstance' mount option was specified in @data.
394 * 340 *
395 * Return: -errno on error (eg: invalid mount options specified) 341 * Mounts with 'newinstance' option create a new, private namespace.
396 * : 1 if 'newinstance' mount option was specified
397 * : 0 if 'newinstance' mount option was NOT specified
398 */
399static int is_new_instance_mount(void *data)
400{
401 int rc;
402 struct pts_mount_opts opts;
403
404 if (!data)
405 return 0;
406
407 rc = safe_parse_mount_options(data, &opts);
408 if (!rc)
409 rc = opts.newinstance;
410
411 return rc;
412}
413
414/*
415 * get_init_pts_sb()
416 *
417 * This interface is needed to support multiple namespace semantics in
418 * devpts while preserving backward compatibility of the current 'single-
419 * namespace' semantics. i.e all mounts of devpts without the 'newinstance'
420 * mount option should bind to the initial kernel mount, like
421 * get_sb_single().
422 * 342 *
423 * Mounts with 'newinstance' option create a new private namespace. 343 * NOTE:
424 * 344 *
425 * But for single-mount semantics, devpts cannot use get_sb_single(), 345 * For single-mount semantics, devpts cannot use get_sb_single(),
426 * because get_sb_single()/sget() find and use the super-block from 346 * because get_sb_single()/sget() find and use the super-block from
427 * the most recent mount of devpts. But that recent mount may be a 347 * the most recent mount of devpts. But that recent mount may be a
428 * 'newinstance' mount and get_sb_single() would pick the newinstance 348 * 'newinstance' mount and get_sb_single() would pick the newinstance
429 * super-block instead of the initial super-block. 349 * super-block instead of the initial super-block.
430 *
431 * This interface is identical to get_sb_single() except that it
432 * consistently selects the 'single-namespace' superblock even in the
433 * presence of the private namespace (i.e 'newinstance') super-blocks.
434 */ 350 */
435static int get_init_pts_sb(struct file_system_type *fs_type, int flags, 351static int devpts_get_sb(struct file_system_type *fs_type,
436 void *data, struct vfsmount *mnt) 352 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
437{ 353{
438 struct super_block *s;
439 int error; 354 int error;
355 struct pts_mount_opts opts;
356 struct super_block *s;
357
358 memset(&opts, 0, sizeof(opts));
359 if (data) {
360 error = parse_mount_options(data, PARSE_MOUNT, &opts);
361 if (error)
362 return error;
363 }
364
365 if (opts.newinstance)
366 s = sget(fs_type, NULL, set_anon_super, NULL);
367 else
368 s = sget(fs_type, compare_init_pts_sb, set_anon_super, NULL);
440 369
441 s = sget(fs_type, compare_init_pts_sb, set_anon_super, NULL);
442 if (IS_ERR(s)) 370 if (IS_ERR(s))
443 return PTR_ERR(s); 371 return PTR_ERR(s);
444 372
445 if (!s->s_root) { 373 if (!s->s_root) {
446 s->s_flags = flags; 374 s->s_flags = flags;
447 error = devpts_fill_super(s, data, flags & MS_SILENT ? 1 : 0); 375 error = devpts_fill_super(s, data, flags & MS_SILENT ? 1 : 0);
448 if (error) { 376 if (error)
449 up_write(&s->s_umount); 377 goto out_undo_sget;
450 deactivate_super(s);
451 return error;
452 }
453 s->s_flags |= MS_ACTIVE; 378 s->s_flags |= MS_ACTIVE;
454 } 379 }
455 do_remount_sb(s, flags, data, 0);
456 return simple_set_mnt(mnt, s);
457}
458 380
459/* 381 simple_set_mnt(mnt, s);
460 * Mount or remount the initial kernel mount of devpts. This type of
461 * mount maintains the legacy, single-instance semantics, while the
462 * kernel still allows multiple-instances.
463 */
464static int init_pts_mount(struct file_system_type *fs_type, int flags,
465 void *data, struct vfsmount *mnt)
466{
467 int err;
468 382
469 err = get_init_pts_sb(fs_type, flags, data, mnt); 383 memcpy(&(DEVPTS_SB(s))->mount_opts, &opts, sizeof(opts));
470 if (err)
471 return err;
472 384
473 err = mknod_ptmx(mnt->mnt_sb); 385 error = mknod_ptmx(s);
474 if (err) { 386 if (error)
475 dput(mnt->mnt_sb->s_root); 387 goto out_dput;
476 deactivate_super(mnt->mnt_sb);
477 }
478 388
479 return err; 389 return 0;
480}
481
482static int devpts_get_sb(struct file_system_type *fs_type,
483 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
484{
485 int new;
486
487 new = is_new_instance_mount(data);
488 if (new < 0)
489 return new;
490 390
491 if (new) 391out_dput:
492 return new_pts_mount(fs_type, flags, data, mnt); 392 dput(s->s_root);
493 393
494 return init_pts_mount(fs_type, flags, data, mnt); 394out_undo_sget:
395 up_write(&s->s_umount);
396 deactivate_super(s);
397 return error;
495} 398}
399
496#else 400#else
497/* 401/*
498 * This supports only the legacy single-instance semantics (no 402 * This supports only the legacy single-instance semantics (no
diff --git a/fs/dlm/dir.c b/fs/dlm/dir.c
index 92969f879a17..858fba14aaa6 100644
--- a/fs/dlm/dir.c
+++ b/fs/dlm/dir.c
@@ -156,7 +156,7 @@ void dlm_dir_remove_entry(struct dlm_ls *ls, int nodeid, char *name, int namelen
156 156
157 bucket = dir_hash(ls, name, namelen); 157 bucket = dir_hash(ls, name, namelen);
158 158
159 write_lock(&ls->ls_dirtbl[bucket].lock); 159 spin_lock(&ls->ls_dirtbl[bucket].lock);
160 160
161 de = search_bucket(ls, name, namelen, bucket); 161 de = search_bucket(ls, name, namelen, bucket);
162 162
@@ -173,7 +173,7 @@ void dlm_dir_remove_entry(struct dlm_ls *ls, int nodeid, char *name, int namelen
173 list_del(&de->list); 173 list_del(&de->list);
174 kfree(de); 174 kfree(de);
175 out: 175 out:
176 write_unlock(&ls->ls_dirtbl[bucket].lock); 176 spin_unlock(&ls->ls_dirtbl[bucket].lock);
177} 177}
178 178
179void dlm_dir_clear(struct dlm_ls *ls) 179void dlm_dir_clear(struct dlm_ls *ls)
@@ -185,14 +185,14 @@ void dlm_dir_clear(struct dlm_ls *ls)
185 DLM_ASSERT(list_empty(&ls->ls_recover_list), ); 185 DLM_ASSERT(list_empty(&ls->ls_recover_list), );
186 186
187 for (i = 0; i < ls->ls_dirtbl_size; i++) { 187 for (i = 0; i < ls->ls_dirtbl_size; i++) {
188 write_lock(&ls->ls_dirtbl[i].lock); 188 spin_lock(&ls->ls_dirtbl[i].lock);
189 head = &ls->ls_dirtbl[i].list; 189 head = &ls->ls_dirtbl[i].list;
190 while (!list_empty(head)) { 190 while (!list_empty(head)) {
191 de = list_entry(head->next, struct dlm_direntry, list); 191 de = list_entry(head->next, struct dlm_direntry, list);
192 list_del(&de->list); 192 list_del(&de->list);
193 put_free_de(ls, de); 193 put_free_de(ls, de);
194 } 194 }
195 write_unlock(&ls->ls_dirtbl[i].lock); 195 spin_unlock(&ls->ls_dirtbl[i].lock);
196 } 196 }
197} 197}
198 198
@@ -307,17 +307,17 @@ static int get_entry(struct dlm_ls *ls, int nodeid, char *name,
307 307
308 bucket = dir_hash(ls, name, namelen); 308 bucket = dir_hash(ls, name, namelen);
309 309
310 write_lock(&ls->ls_dirtbl[bucket].lock); 310 spin_lock(&ls->ls_dirtbl[bucket].lock);
311 de = search_bucket(ls, name, namelen, bucket); 311 de = search_bucket(ls, name, namelen, bucket);
312 if (de) { 312 if (de) {
313 *r_nodeid = de->master_nodeid; 313 *r_nodeid = de->master_nodeid;
314 write_unlock(&ls->ls_dirtbl[bucket].lock); 314 spin_unlock(&ls->ls_dirtbl[bucket].lock);
315 if (*r_nodeid == nodeid) 315 if (*r_nodeid == nodeid)
316 return -EEXIST; 316 return -EEXIST;
317 return 0; 317 return 0;
318 } 318 }
319 319
320 write_unlock(&ls->ls_dirtbl[bucket].lock); 320 spin_unlock(&ls->ls_dirtbl[bucket].lock);
321 321
322 if (namelen > DLM_RESNAME_MAXLEN) 322 if (namelen > DLM_RESNAME_MAXLEN)
323 return -EINVAL; 323 return -EINVAL;
@@ -330,7 +330,7 @@ static int get_entry(struct dlm_ls *ls, int nodeid, char *name,
330 de->length = namelen; 330 de->length = namelen;
331 memcpy(de->name, name, namelen); 331 memcpy(de->name, name, namelen);
332 332
333 write_lock(&ls->ls_dirtbl[bucket].lock); 333 spin_lock(&ls->ls_dirtbl[bucket].lock);
334 tmp = search_bucket(ls, name, namelen, bucket); 334 tmp = search_bucket(ls, name, namelen, bucket);
335 if (tmp) { 335 if (tmp) {
336 kfree(de); 336 kfree(de);
@@ -339,7 +339,7 @@ static int get_entry(struct dlm_ls *ls, int nodeid, char *name,
339 list_add_tail(&de->list, &ls->ls_dirtbl[bucket].list); 339 list_add_tail(&de->list, &ls->ls_dirtbl[bucket].list);
340 } 340 }
341 *r_nodeid = de->master_nodeid; 341 *r_nodeid = de->master_nodeid;
342 write_unlock(&ls->ls_dirtbl[bucket].lock); 342 spin_unlock(&ls->ls_dirtbl[bucket].lock);
343 return 0; 343 return 0;
344} 344}
345 345
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 076e86f38bc8..d01ca0a711db 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -99,7 +99,7 @@ struct dlm_direntry {
99 99
100struct dlm_dirtable { 100struct dlm_dirtable {
101 struct list_head list; 101 struct list_head list;
102 rwlock_t lock; 102 spinlock_t lock;
103}; 103};
104 104
105struct dlm_rsbtable { 105struct dlm_rsbtable {
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 01e7d39c5fba..205ec95b347e 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -835,7 +835,7 @@ static int add_to_waiters(struct dlm_lkb *lkb, int mstype)
835 lkb->lkb_wait_count++; 835 lkb->lkb_wait_count++;
836 hold_lkb(lkb); 836 hold_lkb(lkb);
837 837
838 log_debug(ls, "add overlap %x cur %d new %d count %d flags %x", 838 log_debug(ls, "addwait %x cur %d overlap %d count %d f %x",
839 lkb->lkb_id, lkb->lkb_wait_type, mstype, 839 lkb->lkb_id, lkb->lkb_wait_type, mstype,
840 lkb->lkb_wait_count, lkb->lkb_flags); 840 lkb->lkb_wait_count, lkb->lkb_flags);
841 goto out; 841 goto out;
@@ -851,7 +851,7 @@ static int add_to_waiters(struct dlm_lkb *lkb, int mstype)
851 list_add(&lkb->lkb_wait_reply, &ls->ls_waiters); 851 list_add(&lkb->lkb_wait_reply, &ls->ls_waiters);
852 out: 852 out:
853 if (error) 853 if (error)
854 log_error(ls, "add_to_waiters %x error %d flags %x %d %d %s", 854 log_error(ls, "addwait error %x %d flags %x %d %d %s",
855 lkb->lkb_id, error, lkb->lkb_flags, mstype, 855 lkb->lkb_id, error, lkb->lkb_flags, mstype,
856 lkb->lkb_wait_type, lkb->lkb_resource->res_name); 856 lkb->lkb_wait_type, lkb->lkb_resource->res_name);
857 mutex_unlock(&ls->ls_waiters_mutex); 857 mutex_unlock(&ls->ls_waiters_mutex);
@@ -863,23 +863,55 @@ static int add_to_waiters(struct dlm_lkb *lkb, int mstype)
863 request reply on the requestqueue) between dlm_recover_waiters_pre() which 863 request reply on the requestqueue) between dlm_recover_waiters_pre() which
864 set RESEND and dlm_recover_waiters_post() */ 864 set RESEND and dlm_recover_waiters_post() */
865 865
866static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype) 866static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype,
867 struct dlm_message *ms)
867{ 868{
868 struct dlm_ls *ls = lkb->lkb_resource->res_ls; 869 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
869 int overlap_done = 0; 870 int overlap_done = 0;
870 871
871 if (is_overlap_unlock(lkb) && (mstype == DLM_MSG_UNLOCK_REPLY)) { 872 if (is_overlap_unlock(lkb) && (mstype == DLM_MSG_UNLOCK_REPLY)) {
873 log_debug(ls, "remwait %x unlock_reply overlap", lkb->lkb_id);
872 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK; 874 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
873 overlap_done = 1; 875 overlap_done = 1;
874 goto out_del; 876 goto out_del;
875 } 877 }
876 878
877 if (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL_REPLY)) { 879 if (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL_REPLY)) {
880 log_debug(ls, "remwait %x cancel_reply overlap", lkb->lkb_id);
878 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL; 881 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
879 overlap_done = 1; 882 overlap_done = 1;
880 goto out_del; 883 goto out_del;
881 } 884 }
882 885
886 /* Cancel state was preemptively cleared by a successful convert,
887 see next comment, nothing to do. */
888
889 if ((mstype == DLM_MSG_CANCEL_REPLY) &&
890 (lkb->lkb_wait_type != DLM_MSG_CANCEL)) {
891 log_debug(ls, "remwait %x cancel_reply wait_type %d",
892 lkb->lkb_id, lkb->lkb_wait_type);
893 return -1;
894 }
895
896 /* Remove for the convert reply, and premptively remove for the
897 cancel reply. A convert has been granted while there's still
898 an outstanding cancel on it (the cancel is moot and the result
899 in the cancel reply should be 0). We preempt the cancel reply
900 because the app gets the convert result and then can follow up
901 with another op, like convert. This subsequent op would see the
902 lingering state of the cancel and fail with -EBUSY. */
903
904 if ((mstype == DLM_MSG_CONVERT_REPLY) &&
905 (lkb->lkb_wait_type == DLM_MSG_CONVERT) &&
906 is_overlap_cancel(lkb) && ms && !ms->m_result) {
907 log_debug(ls, "remwait %x convert_reply zap overlap_cancel",
908 lkb->lkb_id);
909 lkb->lkb_wait_type = 0;
910 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
911 lkb->lkb_wait_count--;
912 goto out_del;
913 }
914
883 /* N.B. type of reply may not always correspond to type of original 915 /* N.B. type of reply may not always correspond to type of original
884 msg due to lookup->request optimization, verify others? */ 916 msg due to lookup->request optimization, verify others? */
885 917
@@ -888,8 +920,8 @@ static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype)
888 goto out_del; 920 goto out_del;
889 } 921 }
890 922
891 log_error(ls, "remove_from_waiters lkid %x flags %x types %d %d", 923 log_error(ls, "remwait error %x reply %d flags %x no wait_type",
892 lkb->lkb_id, lkb->lkb_flags, mstype, lkb->lkb_wait_type); 924 lkb->lkb_id, mstype, lkb->lkb_flags);
893 return -1; 925 return -1;
894 926
895 out_del: 927 out_del:
@@ -899,7 +931,7 @@ static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype)
899 this would happen */ 931 this would happen */
900 932
901 if (overlap_done && lkb->lkb_wait_type) { 933 if (overlap_done && lkb->lkb_wait_type) {
902 log_error(ls, "remove_from_waiters %x reply %d give up on %d", 934 log_error(ls, "remwait error %x reply %d wait_type %d overlap",
903 lkb->lkb_id, mstype, lkb->lkb_wait_type); 935 lkb->lkb_id, mstype, lkb->lkb_wait_type);
904 lkb->lkb_wait_count--; 936 lkb->lkb_wait_count--;
905 lkb->lkb_wait_type = 0; 937 lkb->lkb_wait_type = 0;
@@ -921,7 +953,7 @@ static int remove_from_waiters(struct dlm_lkb *lkb, int mstype)
921 int error; 953 int error;
922 954
923 mutex_lock(&ls->ls_waiters_mutex); 955 mutex_lock(&ls->ls_waiters_mutex);
924 error = _remove_from_waiters(lkb, mstype); 956 error = _remove_from_waiters(lkb, mstype, NULL);
925 mutex_unlock(&ls->ls_waiters_mutex); 957 mutex_unlock(&ls->ls_waiters_mutex);
926 return error; 958 return error;
927} 959}
@@ -936,7 +968,7 @@ static int remove_from_waiters_ms(struct dlm_lkb *lkb, struct dlm_message *ms)
936 968
937 if (ms != &ls->ls_stub_ms) 969 if (ms != &ls->ls_stub_ms)
938 mutex_lock(&ls->ls_waiters_mutex); 970 mutex_lock(&ls->ls_waiters_mutex);
939 error = _remove_from_waiters(lkb, ms->m_type); 971 error = _remove_from_waiters(lkb, ms->m_type, ms);
940 if (ms != &ls->ls_stub_ms) 972 if (ms != &ls->ls_stub_ms)
941 mutex_unlock(&ls->ls_waiters_mutex); 973 mutex_unlock(&ls->ls_waiters_mutex);
942 return error; 974 return error;
@@ -2083,6 +2115,11 @@ static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2083 lkb->lkb_timeout_cs = args->timeout; 2115 lkb->lkb_timeout_cs = args->timeout;
2084 rv = 0; 2116 rv = 0;
2085 out: 2117 out:
2118 if (rv)
2119 log_debug(ls, "validate_lock_args %d %x %x %x %d %d %s",
2120 rv, lkb->lkb_id, lkb->lkb_flags, args->flags,
2121 lkb->lkb_status, lkb->lkb_wait_type,
2122 lkb->lkb_resource->res_name);
2086 return rv; 2123 return rv;
2087} 2124}
2088 2125
@@ -2149,6 +2186,13 @@ static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args)
2149 goto out; 2186 goto out;
2150 } 2187 }
2151 2188
2189 /* there's nothing to cancel */
2190 if (lkb->lkb_status == DLM_LKSTS_GRANTED &&
2191 !lkb->lkb_wait_type) {
2192 rv = -EBUSY;
2193 goto out;
2194 }
2195
2152 switch (lkb->lkb_wait_type) { 2196 switch (lkb->lkb_wait_type) {
2153 case DLM_MSG_LOOKUP: 2197 case DLM_MSG_LOOKUP:
2154 case DLM_MSG_REQUEST: 2198 case DLM_MSG_REQUEST:
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index aa32e5f02493..cd8e2df3c295 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -487,7 +487,7 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
487 goto out_lkbfree; 487 goto out_lkbfree;
488 for (i = 0; i < size; i++) { 488 for (i = 0; i < size; i++) {
489 INIT_LIST_HEAD(&ls->ls_dirtbl[i].list); 489 INIT_LIST_HEAD(&ls->ls_dirtbl[i].list);
490 rwlock_init(&ls->ls_dirtbl[i].lock); 490 spin_lock_init(&ls->ls_dirtbl[i].lock);
491 } 491 }
492 492
493 INIT_LIST_HEAD(&ls->ls_waiters); 493 INIT_LIST_HEAD(&ls->ls_waiters);
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 103a5ebd1371..609108a83267 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -2,7 +2,7 @@
2******************************************************************************* 2*******************************************************************************
3** 3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. 5** Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved.
6** 6**
7** This copyrighted material is made available to anyone wishing to use, 7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions 8** modify, copy, or redistribute it subject to the terms and conditions
@@ -21,7 +21,7 @@
21 * 21 *
22 * Cluster nodes are referred to by their nodeids. nodeids are 22 * Cluster nodes are referred to by their nodeids. nodeids are
23 * simply 32 bit numbers to the locking module - if they need to 23 * simply 32 bit numbers to the locking module - if they need to
24 * be expanded for the cluster infrastructure then that is it's 24 * be expanded for the cluster infrastructure then that is its
25 * responsibility. It is this layer's 25 * responsibility. It is this layer's
26 * responsibility to resolve these into IP address or 26 * responsibility to resolve these into IP address or
27 * whatever it needs for inter-node communication. 27 * whatever it needs for inter-node communication.
@@ -36,9 +36,9 @@
36 * of high load. Also, this way, the sending thread can collect together 36 * of high load. Also, this way, the sending thread can collect together
37 * messages bound for one node and send them in one block. 37 * messages bound for one node and send them in one block.
38 * 38 *
39 * lowcomms will choose to use wither TCP or SCTP as its transport layer 39 * lowcomms will choose to use either TCP or SCTP as its transport layer
40 * depending on the configuration variable 'protocol'. This should be set 40 * depending on the configuration variable 'protocol'. This should be set
41 * to 0 (default) for TCP or 1 for SCTP. It shouldbe configured using a 41 * to 0 (default) for TCP or 1 for SCTP. It should be configured using a
42 * cluster-wide mechanism as it must be the same on all nodes of the cluster 42 * cluster-wide mechanism as it must be the same on all nodes of the cluster
43 * for the DLM to function. 43 * for the DLM to function.
44 * 44 *
@@ -48,11 +48,11 @@
48#include <net/sock.h> 48#include <net/sock.h>
49#include <net/tcp.h> 49#include <net/tcp.h>
50#include <linux/pagemap.h> 50#include <linux/pagemap.h>
51#include <linux/idr.h>
52#include <linux/file.h> 51#include <linux/file.h>
53#include <linux/mutex.h> 52#include <linux/mutex.h>
54#include <linux/sctp.h> 53#include <linux/sctp.h>
55#include <net/sctp/user.h> 54#include <net/sctp/user.h>
55#include <net/ipv6.h>
56 56
57#include "dlm_internal.h" 57#include "dlm_internal.h"
58#include "lowcomms.h" 58#include "lowcomms.h"
@@ -60,6 +60,7 @@
60#include "config.h" 60#include "config.h"
61 61
62#define NEEDED_RMEM (4*1024*1024) 62#define NEEDED_RMEM (4*1024*1024)
63#define CONN_HASH_SIZE 32
63 64
64struct cbuf { 65struct cbuf {
65 unsigned int base; 66 unsigned int base;
@@ -114,6 +115,7 @@ struct connection {
114 int retries; 115 int retries;
115#define MAX_CONNECT_RETRIES 3 116#define MAX_CONNECT_RETRIES 3
116 int sctp_assoc; 117 int sctp_assoc;
118 struct hlist_node list;
117 struct connection *othercon; 119 struct connection *othercon;
118 struct work_struct rwork; /* Receive workqueue */ 120 struct work_struct rwork; /* Receive workqueue */
119 struct work_struct swork; /* Send workqueue */ 121 struct work_struct swork; /* Send workqueue */
@@ -138,14 +140,37 @@ static int dlm_local_count;
138static struct workqueue_struct *recv_workqueue; 140static struct workqueue_struct *recv_workqueue;
139static struct workqueue_struct *send_workqueue; 141static struct workqueue_struct *send_workqueue;
140 142
141static DEFINE_IDR(connections_idr); 143static struct hlist_head connection_hash[CONN_HASH_SIZE];
142static DEFINE_MUTEX(connections_lock); 144static DEFINE_MUTEX(connections_lock);
143static int max_nodeid;
144static struct kmem_cache *con_cache; 145static struct kmem_cache *con_cache;
145 146
146static void process_recv_sockets(struct work_struct *work); 147static void process_recv_sockets(struct work_struct *work);
147static void process_send_sockets(struct work_struct *work); 148static void process_send_sockets(struct work_struct *work);
148 149
150
151/* This is deliberately very simple because most clusters have simple
152 sequential nodeids, so we should be able to go straight to a connection
153 struct in the array */
154static inline int nodeid_hash(int nodeid)
155{
156 return nodeid & (CONN_HASH_SIZE-1);
157}
158
159static struct connection *__find_con(int nodeid)
160{
161 int r;
162 struct hlist_node *h;
163 struct connection *con;
164
165 r = nodeid_hash(nodeid);
166
167 hlist_for_each_entry(con, h, &connection_hash[r], list) {
168 if (con->nodeid == nodeid)
169 return con;
170 }
171 return NULL;
172}
173
149/* 174/*
150 * If 'allocation' is zero then we don't attempt to create a new 175 * If 'allocation' is zero then we don't attempt to create a new
151 * connection structure for this node. 176 * connection structure for this node.
@@ -154,31 +179,17 @@ static struct connection *__nodeid2con(int nodeid, gfp_t alloc)
154{ 179{
155 struct connection *con = NULL; 180 struct connection *con = NULL;
156 int r; 181 int r;
157 int n;
158 182
159 con = idr_find(&connections_idr, nodeid); 183 con = __find_con(nodeid);
160 if (con || !alloc) 184 if (con || !alloc)
161 return con; 185 return con;
162 186
163 r = idr_pre_get(&connections_idr, alloc);
164 if (!r)
165 return NULL;
166
167 con = kmem_cache_zalloc(con_cache, alloc); 187 con = kmem_cache_zalloc(con_cache, alloc);
168 if (!con) 188 if (!con)
169 return NULL; 189 return NULL;
170 190
171 r = idr_get_new_above(&connections_idr, con, nodeid, &n); 191 r = nodeid_hash(nodeid);
172 if (r) { 192 hlist_add_head(&con->list, &connection_hash[r]);
173 kmem_cache_free(con_cache, con);
174 return NULL;
175 }
176
177 if (n != nodeid) {
178 idr_remove(&connections_idr, n);
179 kmem_cache_free(con_cache, con);
180 return NULL;
181 }
182 193
183 con->nodeid = nodeid; 194 con->nodeid = nodeid;
184 mutex_init(&con->sock_mutex); 195 mutex_init(&con->sock_mutex);
@@ -189,19 +200,30 @@ static struct connection *__nodeid2con(int nodeid, gfp_t alloc)
189 200
190 /* Setup action pointers for child sockets */ 201 /* Setup action pointers for child sockets */
191 if (con->nodeid) { 202 if (con->nodeid) {
192 struct connection *zerocon = idr_find(&connections_idr, 0); 203 struct connection *zerocon = __find_con(0);
193 204
194 con->connect_action = zerocon->connect_action; 205 con->connect_action = zerocon->connect_action;
195 if (!con->rx_action) 206 if (!con->rx_action)
196 con->rx_action = zerocon->rx_action; 207 con->rx_action = zerocon->rx_action;
197 } 208 }
198 209
199 if (nodeid > max_nodeid)
200 max_nodeid = nodeid;
201
202 return con; 210 return con;
203} 211}
204 212
213/* Loop round all connections */
214static void foreach_conn(void (*conn_func)(struct connection *c))
215{
216 int i;
217 struct hlist_node *h, *n;
218 struct connection *con;
219
220 for (i = 0; i < CONN_HASH_SIZE; i++) {
221 hlist_for_each_entry_safe(con, h, n, &connection_hash[i], list){
222 conn_func(con);
223 }
224 }
225}
226
205static struct connection *nodeid2con(int nodeid, gfp_t allocation) 227static struct connection *nodeid2con(int nodeid, gfp_t allocation)
206{ 228{
207 struct connection *con; 229 struct connection *con;
@@ -217,14 +239,17 @@ static struct connection *nodeid2con(int nodeid, gfp_t allocation)
217static struct connection *assoc2con(int assoc_id) 239static struct connection *assoc2con(int assoc_id)
218{ 240{
219 int i; 241 int i;
242 struct hlist_node *h;
220 struct connection *con; 243 struct connection *con;
221 244
222 mutex_lock(&connections_lock); 245 mutex_lock(&connections_lock);
223 for (i=0; i<=max_nodeid; i++) { 246
224 con = __nodeid2con(i, 0); 247 for (i = 0 ; i < CONN_HASH_SIZE; i++) {
225 if (con && con->sctp_assoc == assoc_id) { 248 hlist_for_each_entry(con, h, &connection_hash[i], list) {
226 mutex_unlock(&connections_lock); 249 if (con && con->sctp_assoc == assoc_id) {
227 return con; 250 mutex_unlock(&connections_lock);
251 return con;
252 }
228 } 253 }
229 } 254 }
230 mutex_unlock(&connections_lock); 255 mutex_unlock(&connections_lock);
@@ -250,8 +275,7 @@ static int nodeid_to_addr(int nodeid, struct sockaddr *retaddr)
250 } else { 275 } else {
251 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &addr; 276 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &addr;
252 struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) retaddr; 277 struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) retaddr;
253 memcpy(&ret6->sin6_addr, &in6->sin6_addr, 278 ipv6_addr_copy(&ret6->sin6_addr, &in6->sin6_addr);
254 sizeof(in6->sin6_addr));
255 } 279 }
256 280
257 return 0; 281 return 0;
@@ -376,25 +400,23 @@ static void sctp_send_shutdown(sctp_assoc_t associd)
376 log_print("send EOF to node failed: %d", ret); 400 log_print("send EOF to node failed: %d", ret);
377} 401}
378 402
403static void sctp_init_failed_foreach(struct connection *con)
404{
405 con->sctp_assoc = 0;
406 if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) {
407 if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags))
408 queue_work(send_workqueue, &con->swork);
409 }
410}
411
379/* INIT failed but we don't know which node... 412/* INIT failed but we don't know which node...
380 restart INIT on all pending nodes */ 413 restart INIT on all pending nodes */
381static void sctp_init_failed(void) 414static void sctp_init_failed(void)
382{ 415{
383 int i;
384 struct connection *con;
385
386 mutex_lock(&connections_lock); 416 mutex_lock(&connections_lock);
387 for (i=1; i<=max_nodeid; i++) { 417
388 con = __nodeid2con(i, 0); 418 foreach_conn(sctp_init_failed_foreach);
389 if (!con) 419
390 continue;
391 con->sctp_assoc = 0;
392 if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) {
393 if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) {
394 queue_work(send_workqueue, &con->swork);
395 }
396 }
397 }
398 mutex_unlock(&connections_lock); 420 mutex_unlock(&connections_lock);
399} 421}
400 422
@@ -1313,13 +1335,10 @@ out_connect:
1313 1335
1314static void clean_one_writequeue(struct connection *con) 1336static void clean_one_writequeue(struct connection *con)
1315{ 1337{
1316 struct list_head *list; 1338 struct writequeue_entry *e, *safe;
1317 struct list_head *temp;
1318 1339
1319 spin_lock(&con->writequeue_lock); 1340 spin_lock(&con->writequeue_lock);
1320 list_for_each_safe(list, temp, &con->writequeue) { 1341 list_for_each_entry_safe(e, safe, &con->writequeue, list) {
1321 struct writequeue_entry *e =
1322 list_entry(list, struct writequeue_entry, list);
1323 list_del(&e->list); 1342 list_del(&e->list);
1324 free_entry(e); 1343 free_entry(e);
1325 } 1344 }
@@ -1369,14 +1388,7 @@ static void process_send_sockets(struct work_struct *work)
1369/* Discard all entries on the write queues */ 1388/* Discard all entries on the write queues */
1370static void clean_writequeues(void) 1389static void clean_writequeues(void)
1371{ 1390{
1372 int nodeid; 1391 foreach_conn(clean_one_writequeue);
1373
1374 for (nodeid = 1; nodeid <= max_nodeid; nodeid++) {
1375 struct connection *con = __nodeid2con(nodeid, 0);
1376
1377 if (con)
1378 clean_one_writequeue(con);
1379 }
1380} 1392}
1381 1393
1382static void work_stop(void) 1394static void work_stop(void)
@@ -1406,23 +1418,29 @@ static int work_start(void)
1406 return 0; 1418 return 0;
1407} 1419}
1408 1420
1409void dlm_lowcomms_stop(void) 1421static void stop_conn(struct connection *con)
1410{ 1422{
1411 int i; 1423 con->flags |= 0x0F;
1412 struct connection *con; 1424 if (con->sock)
1425 con->sock->sk->sk_user_data = NULL;
1426}
1413 1427
1428static void free_conn(struct connection *con)
1429{
1430 close_connection(con, true);
1431 if (con->othercon)
1432 kmem_cache_free(con_cache, con->othercon);
1433 hlist_del(&con->list);
1434 kmem_cache_free(con_cache, con);
1435}
1436
1437void dlm_lowcomms_stop(void)
1438{
1414 /* Set all the flags to prevent any 1439 /* Set all the flags to prevent any
1415 socket activity. 1440 socket activity.
1416 */ 1441 */
1417 mutex_lock(&connections_lock); 1442 mutex_lock(&connections_lock);
1418 for (i = 0; i <= max_nodeid; i++) { 1443 foreach_conn(stop_conn);
1419 con = __nodeid2con(i, 0);
1420 if (con) {
1421 con->flags |= 0x0F;
1422 if (con->sock)
1423 con->sock->sk->sk_user_data = NULL;
1424 }
1425 }
1426 mutex_unlock(&connections_lock); 1444 mutex_unlock(&connections_lock);
1427 1445
1428 work_stop(); 1446 work_stop();
@@ -1430,25 +1448,20 @@ void dlm_lowcomms_stop(void)
1430 mutex_lock(&connections_lock); 1448 mutex_lock(&connections_lock);
1431 clean_writequeues(); 1449 clean_writequeues();
1432 1450
1433 for (i = 0; i <= max_nodeid; i++) { 1451 foreach_conn(free_conn);
1434 con = __nodeid2con(i, 0); 1452
1435 if (con) {
1436 close_connection(con, true);
1437 if (con->othercon)
1438 kmem_cache_free(con_cache, con->othercon);
1439 kmem_cache_free(con_cache, con);
1440 }
1441 }
1442 max_nodeid = 0;
1443 mutex_unlock(&connections_lock); 1453 mutex_unlock(&connections_lock);
1444 kmem_cache_destroy(con_cache); 1454 kmem_cache_destroy(con_cache);
1445 idr_init(&connections_idr);
1446} 1455}
1447 1456
1448int dlm_lowcomms_start(void) 1457int dlm_lowcomms_start(void)
1449{ 1458{
1450 int error = -EINVAL; 1459 int error = -EINVAL;
1451 struct connection *con; 1460 struct connection *con;
1461 int i;
1462
1463 for (i = 0; i < CONN_HASH_SIZE; i++)
1464 INIT_HLIST_HEAD(&connection_hash[i]);
1452 1465
1453 init_local(); 1466 init_local();
1454 if (!dlm_local_count) { 1467 if (!dlm_local_count) {
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 065149e84f42..ebce994ab0b7 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved. 2 * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
3 * 3 *
4 * This copyrighted material is made available to anyone wishing to use, 4 * This copyrighted material is made available to anyone wishing to use,
5 * modify, copy, or redistribute it subject to the terms and conditions 5 * modify, copy, or redistribute it subject to the terms and conditions
@@ -84,7 +84,7 @@ struct dlm_lock_result32 {
84 84
85static void compat_input(struct dlm_write_request *kb, 85static void compat_input(struct dlm_write_request *kb,
86 struct dlm_write_request32 *kb32, 86 struct dlm_write_request32 *kb32,
87 size_t count) 87 int namelen)
88{ 88{
89 kb->version[0] = kb32->version[0]; 89 kb->version[0] = kb32->version[0];
90 kb->version[1] = kb32->version[1]; 90 kb->version[1] = kb32->version[1];
@@ -96,8 +96,7 @@ static void compat_input(struct dlm_write_request *kb,
96 kb->cmd == DLM_USER_REMOVE_LOCKSPACE) { 96 kb->cmd == DLM_USER_REMOVE_LOCKSPACE) {
97 kb->i.lspace.flags = kb32->i.lspace.flags; 97 kb->i.lspace.flags = kb32->i.lspace.flags;
98 kb->i.lspace.minor = kb32->i.lspace.minor; 98 kb->i.lspace.minor = kb32->i.lspace.minor;
99 memcpy(kb->i.lspace.name, kb32->i.lspace.name, count - 99 memcpy(kb->i.lspace.name, kb32->i.lspace.name, namelen);
100 offsetof(struct dlm_write_request32, i.lspace.name));
101 } else if (kb->cmd == DLM_USER_PURGE) { 100 } else if (kb->cmd == DLM_USER_PURGE) {
102 kb->i.purge.nodeid = kb32->i.purge.nodeid; 101 kb->i.purge.nodeid = kb32->i.purge.nodeid;
103 kb->i.purge.pid = kb32->i.purge.pid; 102 kb->i.purge.pid = kb32->i.purge.pid;
@@ -115,8 +114,7 @@ static void compat_input(struct dlm_write_request *kb,
115 kb->i.lock.bastaddr = (void *)(long)kb32->i.lock.bastaddr; 114 kb->i.lock.bastaddr = (void *)(long)kb32->i.lock.bastaddr;
116 kb->i.lock.lksb = (void *)(long)kb32->i.lock.lksb; 115 kb->i.lock.lksb = (void *)(long)kb32->i.lock.lksb;
117 memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN); 116 memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN);
118 memcpy(kb->i.lock.name, kb32->i.lock.name, count - 117 memcpy(kb->i.lock.name, kb32->i.lock.name, namelen);
119 offsetof(struct dlm_write_request32, i.lock.name));
120 } 118 }
121} 119}
122 120
@@ -539,9 +537,16 @@ static ssize_t device_write(struct file *file, const char __user *buf,
539#ifdef CONFIG_COMPAT 537#ifdef CONFIG_COMPAT
540 if (!kbuf->is64bit) { 538 if (!kbuf->is64bit) {
541 struct dlm_write_request32 *k32buf; 539 struct dlm_write_request32 *k32buf;
540 int namelen = 0;
541
542 if (count > sizeof(struct dlm_write_request32))
543 namelen = count - sizeof(struct dlm_write_request32);
544
542 k32buf = (struct dlm_write_request32 *)kbuf; 545 k32buf = (struct dlm_write_request32 *)kbuf;
543 kbuf = kmalloc(count + 1 + (sizeof(struct dlm_write_request) - 546
544 sizeof(struct dlm_write_request32)), GFP_KERNEL); 547 /* add 1 after namelen so that the name string is terminated */
548 kbuf = kzalloc(sizeof(struct dlm_write_request) + namelen + 1,
549 GFP_KERNEL);
545 if (!kbuf) { 550 if (!kbuf) {
546 kfree(k32buf); 551 kfree(k32buf);
547 return -ENOMEM; 552 return -ENOMEM;
@@ -549,7 +554,8 @@ static ssize_t device_write(struct file *file, const char __user *buf,
549 554
550 if (proc) 555 if (proc)
551 set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags); 556 set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags);
552 compat_input(kbuf, k32buf, count + 1); 557
558 compat_input(kbuf, k32buf, namelen);
553 kfree(k32buf); 559 kfree(k32buf);
554 } 560 }
555#endif 561#endif
diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index 3e5637fc3779..44d725f612cf 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -18,7 +18,7 @@ static void drop_pagecache_sb(struct super_block *sb)
18 18
19 spin_lock(&inode_lock); 19 spin_lock(&inode_lock);
20 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 20 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
21 if (inode->i_state & (I_FREEING|I_WILL_FREE)) 21 if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW))
22 continue; 22 continue;
23 if (inode->i_mapping->nrpages == 0) 23 if (inode->i_mapping->nrpages == 0)
24 continue; 24 continue;
diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c
index 5e596583946c..2dda5ade75bc 100644
--- a/fs/ecryptfs/dentry.c
+++ b/fs/ecryptfs/dentry.c
@@ -89,7 +89,7 @@ static void ecryptfs_d_release(struct dentry *dentry)
89 return; 89 return;
90} 90}
91 91
92struct dentry_operations ecryptfs_dops = { 92const struct dentry_operations ecryptfs_dops = {
93 .d_revalidate = ecryptfs_d_revalidate, 93 .d_revalidate = ecryptfs_d_revalidate,
94 .d_release = ecryptfs_d_release, 94 .d_release = ecryptfs_d_release,
95}; 95};
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index ac749d4d644f..064c5820e4e5 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -580,7 +580,7 @@ extern const struct inode_operations ecryptfs_main_iops;
580extern const struct inode_operations ecryptfs_dir_iops; 580extern const struct inode_operations ecryptfs_dir_iops;
581extern const struct inode_operations ecryptfs_symlink_iops; 581extern const struct inode_operations ecryptfs_symlink_iops;
582extern const struct super_operations ecryptfs_sops; 582extern const struct super_operations ecryptfs_sops;
583extern struct dentry_operations ecryptfs_dops; 583extern const struct dentry_operations ecryptfs_dops;
584extern struct address_space_operations ecryptfs_aops; 584extern struct address_space_operations ecryptfs_aops;
585extern int ecryptfs_verbosity; 585extern int ecryptfs_verbosity;
586extern unsigned int ecryptfs_message_buf_len; 586extern unsigned int ecryptfs_message_buf_len;
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index 4a29d6376081..7f8d2e5a7ea6 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -570,7 +570,7 @@ do_more:
570error_return: 570error_return:
571 brelse(bitmap_bh); 571 brelse(bitmap_bh);
572 release_blocks(sb, freed); 572 release_blocks(sb, freed);
573 DQUOT_FREE_BLOCK(inode, freed); 573 vfs_dq_free_block(inode, freed);
574} 574}
575 575
576/** 576/**
@@ -1247,7 +1247,7 @@ ext2_fsblk_t ext2_new_blocks(struct inode *inode, ext2_fsblk_t goal,
1247 /* 1247 /*
1248 * Check quota for allocation of this block. 1248 * Check quota for allocation of this block.
1249 */ 1249 */
1250 if (DQUOT_ALLOC_BLOCK(inode, num)) { 1250 if (vfs_dq_alloc_block(inode, num)) {
1251 *errp = -EDQUOT; 1251 *errp = -EDQUOT;
1252 return 0; 1252 return 0;
1253 } 1253 }
@@ -1409,7 +1409,7 @@ allocated:
1409 1409
1410 *errp = 0; 1410 *errp = 0;
1411 brelse(bitmap_bh); 1411 brelse(bitmap_bh);
1412 DQUOT_FREE_BLOCK(inode, *count-num); 1412 vfs_dq_free_block(inode, *count-num);
1413 *count = num; 1413 *count = num;
1414 return ret_block; 1414 return ret_block;
1415 1415
@@ -1420,7 +1420,7 @@ out:
1420 * Undo the block allocation 1420 * Undo the block allocation
1421 */ 1421 */
1422 if (!performed_allocation) 1422 if (!performed_allocation)
1423 DQUOT_FREE_BLOCK(inode, *count); 1423 vfs_dq_free_block(inode, *count);
1424 brelse(bitmap_bh); 1424 brelse(bitmap_bh);
1425 return 0; 1425 return 0;
1426} 1426}
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index 66321a877e74..15387c9c17d8 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -121,8 +121,8 @@ void ext2_free_inode (struct inode * inode)
121 if (!is_bad_inode(inode)) { 121 if (!is_bad_inode(inode)) {
122 /* Quota is already initialized in iput() */ 122 /* Quota is already initialized in iput() */
123 ext2_xattr_delete_inode(inode); 123 ext2_xattr_delete_inode(inode);
124 DQUOT_FREE_INODE(inode); 124 vfs_dq_free_inode(inode);
125 DQUOT_DROP(inode); 125 vfs_dq_drop(inode);
126 } 126 }
127 127
128 es = EXT2_SB(sb)->s_es; 128 es = EXT2_SB(sb)->s_es;
@@ -586,7 +586,7 @@ got:
586 goto fail_drop; 586 goto fail_drop;
587 } 587 }
588 588
589 if (DQUOT_ALLOC_INODE(inode)) { 589 if (vfs_dq_alloc_inode(inode)) {
590 err = -EDQUOT; 590 err = -EDQUOT;
591 goto fail_drop; 591 goto fail_drop;
592 } 592 }
@@ -605,10 +605,10 @@ got:
605 return inode; 605 return inode;
606 606
607fail_free_drop: 607fail_free_drop:
608 DQUOT_FREE_INODE(inode); 608 vfs_dq_free_inode(inode);
609 609
610fail_drop: 610fail_drop:
611 DQUOT_DROP(inode); 611 vfs_dq_drop(inode);
612 inode->i_flags |= S_NOQUOTA; 612 inode->i_flags |= S_NOQUOTA;
613 inode->i_nlink = 0; 613 inode->i_nlink = 0;
614 unlock_new_inode(inode); 614 unlock_new_inode(inode);
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 23fff2f87783..b43b95563663 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -1444,7 +1444,7 @@ int ext2_setattr(struct dentry *dentry, struct iattr *iattr)
1444 return error; 1444 return error;
1445 if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) || 1445 if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
1446 (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) { 1446 (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
1447 error = DQUOT_TRANSFER(inode, iattr) ? -EDQUOT : 0; 1447 error = vfs_dq_transfer(inode, iattr) ? -EDQUOT : 0;
1448 if (error) 1448 if (error)
1449 return error; 1449 return error;
1450 } 1450 }
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 7c6e3606f0ec..f983225266dc 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -1331,6 +1331,7 @@ static ssize_t ext2_quota_read(struct super_block *sb, int type, char *data,
1331 sb->s_blocksize - offset : toread; 1331 sb->s_blocksize - offset : toread;
1332 1332
1333 tmp_bh.b_state = 0; 1333 tmp_bh.b_state = 0;
1334 tmp_bh.b_size = sb->s_blocksize;
1334 err = ext2_get_block(inode, blk, &tmp_bh, 0); 1335 err = ext2_get_block(inode, blk, &tmp_bh, 0);
1335 if (err < 0) 1336 if (err < 0)
1336 return err; 1337 return err;
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index 987a5261cc2e..7913531ec6d5 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -642,7 +642,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
642 ea_bdebug(new_bh, "reusing block"); 642 ea_bdebug(new_bh, "reusing block");
643 643
644 error = -EDQUOT; 644 error = -EDQUOT;
645 if (DQUOT_ALLOC_BLOCK(inode, 1)) { 645 if (vfs_dq_alloc_block(inode, 1)) {
646 unlock_buffer(new_bh); 646 unlock_buffer(new_bh);
647 goto cleanup; 647 goto cleanup;
648 } 648 }
@@ -699,7 +699,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
699 * as if nothing happened and cleanup the unused block */ 699 * as if nothing happened and cleanup the unused block */
700 if (error && error != -ENOSPC) { 700 if (error && error != -ENOSPC) {
701 if (new_bh && new_bh != old_bh) 701 if (new_bh && new_bh != old_bh)
702 DQUOT_FREE_BLOCK(inode, 1); 702 vfs_dq_free_block(inode, 1);
703 goto cleanup; 703 goto cleanup;
704 } 704 }
705 } else 705 } else
@@ -731,7 +731,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
731 le32_add_cpu(&HDR(old_bh)->h_refcount, -1); 731 le32_add_cpu(&HDR(old_bh)->h_refcount, -1);
732 if (ce) 732 if (ce)
733 mb_cache_entry_release(ce); 733 mb_cache_entry_release(ce);
734 DQUOT_FREE_BLOCK(inode, 1); 734 vfs_dq_free_block(inode, 1);
735 mark_buffer_dirty(old_bh); 735 mark_buffer_dirty(old_bh);
736 ea_bdebug(old_bh, "refcount now=%d", 736 ea_bdebug(old_bh, "refcount now=%d",
737 le32_to_cpu(HDR(old_bh)->h_refcount)); 737 le32_to_cpu(HDR(old_bh)->h_refcount));
@@ -794,7 +794,7 @@ ext2_xattr_delete_inode(struct inode *inode)
794 mark_buffer_dirty(bh); 794 mark_buffer_dirty(bh);
795 if (IS_SYNC(inode)) 795 if (IS_SYNC(inode))
796 sync_dirty_buffer(bh); 796 sync_dirty_buffer(bh);
797 DQUOT_FREE_BLOCK(inode, 1); 797 vfs_dq_free_block(inode, 1);
798 } 798 }
799 EXT2_I(inode)->i_file_acl = 0; 799 EXT2_I(inode)->i_file_acl = 0;
800 800
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index 0dbf1c048475..225202db8974 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -676,7 +676,7 @@ void ext3_free_blocks(handle_t *handle, struct inode *inode,
676 } 676 }
677 ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks); 677 ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks);
678 if (dquot_freed_blocks) 678 if (dquot_freed_blocks)
679 DQUOT_FREE_BLOCK(inode, dquot_freed_blocks); 679 vfs_dq_free_block(inode, dquot_freed_blocks);
680 return; 680 return;
681} 681}
682 682
@@ -1502,7 +1502,7 @@ ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode,
1502 /* 1502 /*
1503 * Check quota for allocation of this block. 1503 * Check quota for allocation of this block.
1504 */ 1504 */
1505 if (DQUOT_ALLOC_BLOCK(inode, num)) { 1505 if (vfs_dq_alloc_block(inode, num)) {
1506 *errp = -EDQUOT; 1506 *errp = -EDQUOT;
1507 return 0; 1507 return 0;
1508 } 1508 }
@@ -1714,7 +1714,7 @@ allocated:
1714 1714
1715 *errp = 0; 1715 *errp = 0;
1716 brelse(bitmap_bh); 1716 brelse(bitmap_bh);
1717 DQUOT_FREE_BLOCK(inode, *count-num); 1717 vfs_dq_free_block(inode, *count-num);
1718 *count = num; 1718 *count = num;
1719 return ret_block; 1719 return ret_block;
1720 1720
@@ -1729,7 +1729,7 @@ out:
1729 * Undo the block allocation 1729 * Undo the block allocation
1730 */ 1730 */
1731 if (!performed_allocation) 1731 if (!performed_allocation)
1732 DQUOT_FREE_BLOCK(inode, *count); 1732 vfs_dq_free_block(inode, *count);
1733 brelse(bitmap_bh); 1733 brelse(bitmap_bh);
1734 return 0; 1734 return 0;
1735} 1735}
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index 8de6c720e510..dd13d60d524b 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -123,10 +123,10 @@ void ext3_free_inode (handle_t *handle, struct inode * inode)
123 * Note: we must free any quota before locking the superblock, 123 * Note: we must free any quota before locking the superblock,
124 * as writing the quota to disk may need the lock as well. 124 * as writing the quota to disk may need the lock as well.
125 */ 125 */
126 DQUOT_INIT(inode); 126 vfs_dq_init(inode);
127 ext3_xattr_delete_inode(handle, inode); 127 ext3_xattr_delete_inode(handle, inode);
128 DQUOT_FREE_INODE(inode); 128 vfs_dq_free_inode(inode);
129 DQUOT_DROP(inode); 129 vfs_dq_drop(inode);
130 130
131 is_directory = S_ISDIR(inode->i_mode); 131 is_directory = S_ISDIR(inode->i_mode);
132 132
@@ -589,7 +589,7 @@ got:
589 sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE : 0; 589 sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE : 0;
590 590
591 ret = inode; 591 ret = inode;
592 if(DQUOT_ALLOC_INODE(inode)) { 592 if (vfs_dq_alloc_inode(inode)) {
593 err = -EDQUOT; 593 err = -EDQUOT;
594 goto fail_drop; 594 goto fail_drop;
595 } 595 }
@@ -620,10 +620,10 @@ really_out:
620 return ret; 620 return ret;
621 621
622fail_free_drop: 622fail_free_drop:
623 DQUOT_FREE_INODE(inode); 623 vfs_dq_free_inode(inode);
624 624
625fail_drop: 625fail_drop:
626 DQUOT_DROP(inode); 626 vfs_dq_drop(inode);
627 inode->i_flags |= S_NOQUOTA; 627 inode->i_flags |= S_NOQUOTA;
628 inode->i_nlink = 0; 628 inode->i_nlink = 0;
629 unlock_new_inode(inode); 629 unlock_new_inode(inode);
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 05e5c2e5c0d7..4a09ff169870 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -3063,7 +3063,7 @@ int ext3_setattr(struct dentry *dentry, struct iattr *attr)
3063 error = PTR_ERR(handle); 3063 error = PTR_ERR(handle);
3064 goto err_out; 3064 goto err_out;
3065 } 3065 }
3066 error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0; 3066 error = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0;
3067 if (error) { 3067 if (error) {
3068 ext3_journal_stop(handle); 3068 ext3_journal_stop(handle);
3069 return error; 3069 return error;
@@ -3154,7 +3154,7 @@ static int ext3_writepage_trans_blocks(struct inode *inode)
3154 ret = 2 * (bpp + indirects) + 2; 3154 ret = 2 * (bpp + indirects) + 2;
3155 3155
3156#ifdef CONFIG_QUOTA 3156#ifdef CONFIG_QUOTA
3157 /* We know that structure was already allocated during DQUOT_INIT so 3157 /* We know that structure was already allocated during vfs_dq_init so
3158 * we will be updating only the data blocks + inodes */ 3158 * we will be updating only the data blocks + inodes */
3159 ret += 2*EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb); 3159 ret += 2*EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb);
3160#endif 3160#endif
@@ -3245,7 +3245,7 @@ int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
3245 * i_size has been changed by generic_commit_write() and we thus need 3245 * i_size has been changed by generic_commit_write() and we thus need
3246 * to include the updated inode in the current transaction. 3246 * to include the updated inode in the current transaction.
3247 * 3247 *
3248 * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks 3248 * Also, vfs_dq_alloc_space() will always dirty the inode when blocks
3249 * are allocated to the file. 3249 * are allocated to the file.
3250 * 3250 *
3251 * If the inode is marked synchronous, we don't honour that here - doing 3251 * If the inode is marked synchronous, we don't honour that here - doing
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index 4db4ffa1edad..e2fc63cbba8b 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -2049,7 +2049,7 @@ static int ext3_rmdir (struct inode * dir, struct dentry *dentry)
2049 2049
2050 /* Initialize quotas before so that eventual writes go in 2050 /* Initialize quotas before so that eventual writes go in
2051 * separate transaction */ 2051 * separate transaction */
2052 DQUOT_INIT(dentry->d_inode); 2052 vfs_dq_init(dentry->d_inode);
2053 handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb)); 2053 handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb));
2054 if (IS_ERR(handle)) 2054 if (IS_ERR(handle))
2055 return PTR_ERR(handle); 2055 return PTR_ERR(handle);
@@ -2108,7 +2108,7 @@ static int ext3_unlink(struct inode * dir, struct dentry *dentry)
2108 2108
2109 /* Initialize quotas before so that eventual writes go 2109 /* Initialize quotas before so that eventual writes go
2110 * in separate transaction */ 2110 * in separate transaction */
2111 DQUOT_INIT(dentry->d_inode); 2111 vfs_dq_init(dentry->d_inode);
2112 handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb)); 2112 handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb));
2113 if (IS_ERR(handle)) 2113 if (IS_ERR(handle))
2114 return PTR_ERR(handle); 2114 return PTR_ERR(handle);
@@ -2272,7 +2272,7 @@ static int ext3_rename (struct inode * old_dir, struct dentry *old_dentry,
2272 /* Initialize quotas before so that eventual writes go 2272 /* Initialize quotas before so that eventual writes go
2273 * in separate transaction */ 2273 * in separate transaction */
2274 if (new_dentry->d_inode) 2274 if (new_dentry->d_inode)
2275 DQUOT_INIT(new_dentry->d_inode); 2275 vfs_dq_init(new_dentry->d_inode);
2276 handle = ext3_journal_start(old_dir, 2 * 2276 handle = ext3_journal_start(old_dir, 2 *
2277 EXT3_DATA_TRANS_BLOCKS(old_dir->i_sb) + 2277 EXT3_DATA_TRANS_BLOCKS(old_dir->i_sb) +
2278 EXT3_INDEX_EXTRA_TRANS_BLOCKS + 2); 2278 EXT3_INDEX_EXTRA_TRANS_BLOCKS + 2);
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 4a970411a458..9e5b8e387e1e 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -707,8 +707,6 @@ static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
707#define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group") 707#define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group")
708#define QTYPE2MOPT(on, t) ((t)==USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA)) 708#define QTYPE2MOPT(on, t) ((t)==USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA))
709 709
710static int ext3_dquot_initialize(struct inode *inode, int type);
711static int ext3_dquot_drop(struct inode *inode);
712static int ext3_write_dquot(struct dquot *dquot); 710static int ext3_write_dquot(struct dquot *dquot);
713static int ext3_acquire_dquot(struct dquot *dquot); 711static int ext3_acquire_dquot(struct dquot *dquot);
714static int ext3_release_dquot(struct dquot *dquot); 712static int ext3_release_dquot(struct dquot *dquot);
@@ -723,8 +721,8 @@ static ssize_t ext3_quota_write(struct super_block *sb, int type,
723 const char *data, size_t len, loff_t off); 721 const char *data, size_t len, loff_t off);
724 722
725static struct dquot_operations ext3_quota_operations = { 723static struct dquot_operations ext3_quota_operations = {
726 .initialize = ext3_dquot_initialize, 724 .initialize = dquot_initialize,
727 .drop = ext3_dquot_drop, 725 .drop = dquot_drop,
728 .alloc_space = dquot_alloc_space, 726 .alloc_space = dquot_alloc_space,
729 .alloc_inode = dquot_alloc_inode, 727 .alloc_inode = dquot_alloc_inode,
730 .free_space = dquot_free_space, 728 .free_space = dquot_free_space,
@@ -1438,7 +1436,7 @@ static void ext3_orphan_cleanup (struct super_block * sb,
1438 } 1436 }
1439 1437
1440 list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan); 1438 list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan);
1441 DQUOT_INIT(inode); 1439 vfs_dq_init(inode);
1442 if (inode->i_nlink) { 1440 if (inode->i_nlink) {
1443 printk(KERN_DEBUG 1441 printk(KERN_DEBUG
1444 "%s: truncating inode %lu to %Ld bytes\n", 1442 "%s: truncating inode %lu to %Ld bytes\n",
@@ -2702,7 +2700,7 @@ static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf)
2702 * Process 1 Process 2 2700 * Process 1 Process 2
2703 * ext3_create() quota_sync() 2701 * ext3_create() quota_sync()
2704 * journal_start() write_dquot() 2702 * journal_start() write_dquot()
2705 * DQUOT_INIT() down(dqio_mutex) 2703 * vfs_dq_init() down(dqio_mutex)
2706 * down(dqio_mutex) journal_start() 2704 * down(dqio_mutex) journal_start()
2707 * 2705 *
2708 */ 2706 */
@@ -2714,44 +2712,6 @@ static inline struct inode *dquot_to_inode(struct dquot *dquot)
2714 return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type]; 2712 return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type];
2715} 2713}
2716 2714
2717static int ext3_dquot_initialize(struct inode *inode, int type)
2718{
2719 handle_t *handle;
2720 int ret, err;
2721
2722 /* We may create quota structure so we need to reserve enough blocks */
2723 handle = ext3_journal_start(inode, 2*EXT3_QUOTA_INIT_BLOCKS(inode->i_sb));
2724 if (IS_ERR(handle))
2725 return PTR_ERR(handle);
2726 ret = dquot_initialize(inode, type);
2727 err = ext3_journal_stop(handle);
2728 if (!ret)
2729 ret = err;
2730 return ret;
2731}
2732
2733static int ext3_dquot_drop(struct inode *inode)
2734{
2735 handle_t *handle;
2736 int ret, err;
2737
2738 /* We may delete quota structure so we need to reserve enough blocks */
2739 handle = ext3_journal_start(inode, 2*EXT3_QUOTA_DEL_BLOCKS(inode->i_sb));
2740 if (IS_ERR(handle)) {
2741 /*
2742 * We call dquot_drop() anyway to at least release references
2743 * to quota structures so that umount does not hang.
2744 */
2745 dquot_drop(inode);
2746 return PTR_ERR(handle);
2747 }
2748 ret = dquot_drop(inode);
2749 err = ext3_journal_stop(handle);
2750 if (!ret)
2751 ret = err;
2752 return ret;
2753}
2754
2755static int ext3_write_dquot(struct dquot *dquot) 2715static int ext3_write_dquot(struct dquot *dquot)
2756{ 2716{
2757 int ret, err; 2717 int ret, err;
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
index 175414ac2210..83b7be849bd5 100644
--- a/fs/ext3/xattr.c
+++ b/fs/ext3/xattr.c
@@ -498,7 +498,7 @@ ext3_xattr_release_block(handle_t *handle, struct inode *inode,
498 error = ext3_journal_dirty_metadata(handle, bh); 498 error = ext3_journal_dirty_metadata(handle, bh);
499 if (IS_SYNC(inode)) 499 if (IS_SYNC(inode))
500 handle->h_sync = 1; 500 handle->h_sync = 1;
501 DQUOT_FREE_BLOCK(inode, 1); 501 vfs_dq_free_block(inode, 1);
502 ea_bdebug(bh, "refcount now=%d; releasing", 502 ea_bdebug(bh, "refcount now=%d; releasing",
503 le32_to_cpu(BHDR(bh)->h_refcount)); 503 le32_to_cpu(BHDR(bh)->h_refcount));
504 if (ce) 504 if (ce)
@@ -774,7 +774,7 @@ inserted:
774 /* The old block is released after updating 774 /* The old block is released after updating
775 the inode. */ 775 the inode. */
776 error = -EDQUOT; 776 error = -EDQUOT;
777 if (DQUOT_ALLOC_BLOCK(inode, 1)) 777 if (vfs_dq_alloc_block(inode, 1))
778 goto cleanup; 778 goto cleanup;
779 error = ext3_journal_get_write_access(handle, 779 error = ext3_journal_get_write_access(handle,
780 new_bh); 780 new_bh);
@@ -848,7 +848,7 @@ cleanup:
848 return error; 848 return error;
849 849
850cleanup_dquot: 850cleanup_dquot:
851 DQUOT_FREE_BLOCK(inode, 1); 851 vfs_dq_free_block(inode, 1);
852 goto cleanup; 852 goto cleanup;
853 853
854bad_block: 854bad_block:
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index de9459b4cb94..38f40d55899c 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -536,7 +536,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
536 ext4_mb_free_blocks(handle, inode, block, count, 536 ext4_mb_free_blocks(handle, inode, block, count,
537 metadata, &dquot_freed_blocks); 537 metadata, &dquot_freed_blocks);
538 if (dquot_freed_blocks) 538 if (dquot_freed_blocks)
539 DQUOT_FREE_BLOCK(inode, dquot_freed_blocks); 539 vfs_dq_free_block(inode, dquot_freed_blocks);
540 return; 540 return;
541} 541}
542 542
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index b0c87dce66a3..6083bb38057b 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -20,6 +20,7 @@
20#include <linux/blkdev.h> 20#include <linux/blkdev.h>
21#include <linux/magic.h> 21#include <linux/magic.h>
22#include <linux/jbd2.h> 22#include <linux/jbd2.h>
23#include <linux/quota.h>
23#include "ext4_i.h" 24#include "ext4_i.h"
24 25
25/* 26/*
@@ -1098,6 +1099,7 @@ extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
1098extern int ext4_block_truncate_page(handle_t *handle, 1099extern int ext4_block_truncate_page(handle_t *handle,
1099 struct address_space *mapping, loff_t from); 1100 struct address_space *mapping, loff_t from);
1100extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page); 1101extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page);
1102extern qsize_t ext4_get_reserved_space(struct inode *inode);
1101 1103
1102/* ioctl.c */ 1104/* ioctl.c */
1103extern long ext4_ioctl(struct file *, unsigned int, unsigned long); 1105extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 2d2b3585ee91..fb51b40e3e8f 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -220,10 +220,10 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
220 * Note: we must free any quota before locking the superblock, 220 * Note: we must free any quota before locking the superblock,
221 * as writing the quota to disk may need the lock as well. 221 * as writing the quota to disk may need the lock as well.
222 */ 222 */
223 DQUOT_INIT(inode); 223 vfs_dq_init(inode);
224 ext4_xattr_delete_inode(handle, inode); 224 ext4_xattr_delete_inode(handle, inode);
225 DQUOT_FREE_INODE(inode); 225 vfs_dq_free_inode(inode);
226 DQUOT_DROP(inode); 226 vfs_dq_drop(inode);
227 227
228 is_directory = S_ISDIR(inode->i_mode); 228 is_directory = S_ISDIR(inode->i_mode);
229 229
@@ -915,7 +915,7 @@ got:
915 ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize; 915 ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
916 916
917 ret = inode; 917 ret = inode;
918 if (DQUOT_ALLOC_INODE(inode)) { 918 if (vfs_dq_alloc_inode(inode)) {
919 err = -EDQUOT; 919 err = -EDQUOT;
920 goto fail_drop; 920 goto fail_drop;
921 } 921 }
@@ -956,10 +956,10 @@ really_out:
956 return ret; 956 return ret;
957 957
958fail_free_drop: 958fail_free_drop:
959 DQUOT_FREE_INODE(inode); 959 vfs_dq_free_inode(inode);
960 960
961fail_drop: 961fail_drop:
962 DQUOT_DROP(inode); 962 vfs_dq_drop(inode);
963 inode->i_flags |= S_NOQUOTA; 963 inode->i_flags |= S_NOQUOTA;
964 inode->i_nlink = 0; 964 inode->i_nlink = 0;
965 unlock_new_inode(inode); 965 unlock_new_inode(inode);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index c7fed5b18745..71d3ecd5db79 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -975,6 +975,17 @@ out:
975 return err; 975 return err;
976} 976}
977 977
978qsize_t ext4_get_reserved_space(struct inode *inode)
979{
980 unsigned long long total;
981
982 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
983 total = EXT4_I(inode)->i_reserved_data_blocks +
984 EXT4_I(inode)->i_reserved_meta_blocks;
985 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
986
987 return total;
988}
978/* 989/*
979 * Calculate the number of metadata blocks need to reserve 990 * Calculate the number of metadata blocks need to reserve
980 * to allocate @blocks for non extent file based file 991 * to allocate @blocks for non extent file based file
@@ -1036,8 +1047,14 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used)
1036 /* update per-inode reservations */ 1047 /* update per-inode reservations */
1037 BUG_ON(used > EXT4_I(inode)->i_reserved_data_blocks); 1048 BUG_ON(used > EXT4_I(inode)->i_reserved_data_blocks);
1038 EXT4_I(inode)->i_reserved_data_blocks -= used; 1049 EXT4_I(inode)->i_reserved_data_blocks -= used;
1039
1040 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1050 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1051
1052 /*
1053 * free those over-booking quota for metadata blocks
1054 */
1055
1056 if (mdb_free)
1057 vfs_dq_release_reservation_block(inode, mdb_free);
1041} 1058}
1042 1059
1043/* 1060/*
@@ -1553,8 +1570,8 @@ static int ext4_journalled_write_end(struct file *file,
1553static int ext4_da_reserve_space(struct inode *inode, int nrblocks) 1570static int ext4_da_reserve_space(struct inode *inode, int nrblocks)
1554{ 1571{
1555 int retries = 0; 1572 int retries = 0;
1556 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1573 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1557 unsigned long md_needed, mdblocks, total = 0; 1574 unsigned long md_needed, mdblocks, total = 0;
1558 1575
1559 /* 1576 /*
1560 * recalculate the amount of metadata blocks to reserve 1577 * recalculate the amount of metadata blocks to reserve
@@ -1570,12 +1587,23 @@ repeat:
1570 md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks; 1587 md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks;
1571 total = md_needed + nrblocks; 1588 total = md_needed + nrblocks;
1572 1589
1590 /*
1591 * Make quota reservation here to prevent quota overflow
1592 * later. Real quota accounting is done at pages writeout
1593 * time.
1594 */
1595 if (vfs_dq_reserve_block(inode, total)) {
1596 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1597 return -EDQUOT;
1598 }
1599
1573 if (ext4_claim_free_blocks(sbi, total)) { 1600 if (ext4_claim_free_blocks(sbi, total)) {
1574 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1601 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1575 if (ext4_should_retry_alloc(inode->i_sb, &retries)) { 1602 if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1576 yield(); 1603 yield();
1577 goto repeat; 1604 goto repeat;
1578 } 1605 }
1606 vfs_dq_release_reservation_block(inode, total);
1579 return -ENOSPC; 1607 return -ENOSPC;
1580 } 1608 }
1581 EXT4_I(inode)->i_reserved_data_blocks += nrblocks; 1609 EXT4_I(inode)->i_reserved_data_blocks += nrblocks;
@@ -1629,6 +1657,8 @@ static void ext4_da_release_space(struct inode *inode, int to_free)
1629 BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks); 1657 BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
1630 EXT4_I(inode)->i_reserved_meta_blocks = mdb; 1658 EXT4_I(inode)->i_reserved_meta_blocks = mdb;
1631 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1659 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1660
1661 vfs_dq_release_reservation_block(inode, release);
1632} 1662}
1633 1663
1634static void ext4_da_page_release_reservation(struct page *page, 1664static void ext4_da_page_release_reservation(struct page *page,
@@ -4612,7 +4642,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
4612 error = PTR_ERR(handle); 4642 error = PTR_ERR(handle);
4613 goto err_out; 4643 goto err_out;
4614 } 4644 }
4615 error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0; 4645 error = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0;
4616 if (error) { 4646 if (error) {
4617 ext4_journal_stop(handle); 4647 ext4_journal_stop(handle);
4618 return error; 4648 return error;
@@ -4991,7 +5021,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
4991 * i_size has been changed by generic_commit_write() and we thus need 5021 * i_size has been changed by generic_commit_write() and we thus need
4992 * to include the updated inode in the current transaction. 5022 * to include the updated inode in the current transaction.
4993 * 5023 *
4994 * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks 5024 * Also, vfs_dq_alloc_block() will always dirty the inode when blocks
4995 * are allocated to the file. 5025 * are allocated to the file.
4996 * 5026 *
4997 * If the inode is marked synchronous, we don't honour that here - doing 5027 * If the inode is marked synchronous, we don't honour that here - doing
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 9f61e62f435f..b038188bd039 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3086,9 +3086,12 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
3086 if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) 3086 if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
3087 /* release all the reserved blocks if non delalloc */ 3087 /* release all the reserved blocks if non delalloc */
3088 percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks); 3088 percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks);
3089 else 3089 else {
3090 percpu_counter_sub(&sbi->s_dirtyblocks_counter, 3090 percpu_counter_sub(&sbi->s_dirtyblocks_counter,
3091 ac->ac_b_ex.fe_len); 3091 ac->ac_b_ex.fe_len);
3092 /* convert reserved quota blocks to real quota blocks */
3093 vfs_dq_claim_block(ac->ac_inode, ac->ac_b_ex.fe_len);
3094 }
3092 3095
3093 if (sbi->s_log_groups_per_flex) { 3096 if (sbi->s_log_groups_per_flex) {
3094 ext4_group_t flex_group = ext4_flex_group(sbi, 3097 ext4_group_t flex_group = ext4_flex_group(sbi,
@@ -4544,7 +4547,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
4544 struct ext4_sb_info *sbi; 4547 struct ext4_sb_info *sbi;
4545 struct super_block *sb; 4548 struct super_block *sb;
4546 ext4_fsblk_t block = 0; 4549 ext4_fsblk_t block = 0;
4547 unsigned int inquota; 4550 unsigned int inquota = 0;
4548 unsigned int reserv_blks = 0; 4551 unsigned int reserv_blks = 0;
4549 4552
4550 sb = ar->inode->i_sb; 4553 sb = ar->inode->i_sb;
@@ -4562,9 +4565,17 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
4562 (unsigned long long) ar->pleft, 4565 (unsigned long long) ar->pleft,
4563 (unsigned long long) ar->pright); 4566 (unsigned long long) ar->pright);
4564 4567
4565 if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag) { 4568 /*
4566 /* 4569 * For delayed allocation, we could skip the ENOSPC and
4567 * With delalloc we already reserved the blocks 4570 * EDQUOT check, as blocks and quotas have been already
4571 * reserved when data being copied into pagecache.
4572 */
4573 if (EXT4_I(ar->inode)->i_delalloc_reserved_flag)
4574 ar->flags |= EXT4_MB_DELALLOC_RESERVED;
4575 else {
4576 /* Without delayed allocation we need to verify
4577 * there is enough free blocks to do block allocation
4578 * and verify allocation doesn't exceed the quota limits.
4568 */ 4579 */
4569 while (ar->len && ext4_claim_free_blocks(sbi, ar->len)) { 4580 while (ar->len && ext4_claim_free_blocks(sbi, ar->len)) {
4570 /* let others to free the space */ 4581 /* let others to free the space */
@@ -4576,19 +4587,16 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
4576 return 0; 4587 return 0;
4577 } 4588 }
4578 reserv_blks = ar->len; 4589 reserv_blks = ar->len;
4590 while (ar->len && vfs_dq_alloc_block(ar->inode, ar->len)) {
4591 ar->flags |= EXT4_MB_HINT_NOPREALLOC;
4592 ar->len--;
4593 }
4594 inquota = ar->len;
4595 if (ar->len == 0) {
4596 *errp = -EDQUOT;
4597 goto out3;
4598 }
4579 } 4599 }
4580 while (ar->len && DQUOT_ALLOC_BLOCK(ar->inode, ar->len)) {
4581 ar->flags |= EXT4_MB_HINT_NOPREALLOC;
4582 ar->len--;
4583 }
4584 if (ar->len == 0) {
4585 *errp = -EDQUOT;
4586 goto out3;
4587 }
4588 inquota = ar->len;
4589
4590 if (EXT4_I(ar->inode)->i_delalloc_reserved_flag)
4591 ar->flags |= EXT4_MB_DELALLOC_RESERVED;
4592 4600
4593 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS); 4601 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
4594 if (!ac) { 4602 if (!ac) {
@@ -4654,8 +4662,8 @@ repeat:
4654out2: 4662out2:
4655 kmem_cache_free(ext4_ac_cachep, ac); 4663 kmem_cache_free(ext4_ac_cachep, ac);
4656out1: 4664out1:
4657 if (ar->len < inquota) 4665 if (inquota && ar->len < inquota)
4658 DQUOT_FREE_BLOCK(ar->inode, inquota - ar->len); 4666 vfs_dq_free_block(ar->inode, inquota - ar->len);
4659out3: 4667out3:
4660 if (!ar->len) { 4668 if (!ar->len) {
4661 if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag) 4669 if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag)
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index ba702bd7910d..83410244d3ee 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -2092,7 +2092,7 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
2092 2092
2093 /* Initialize quotas before so that eventual writes go in 2093 /* Initialize quotas before so that eventual writes go in
2094 * separate transaction */ 2094 * separate transaction */
2095 DQUOT_INIT(dentry->d_inode); 2095 vfs_dq_init(dentry->d_inode);
2096 handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb)); 2096 handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb));
2097 if (IS_ERR(handle)) 2097 if (IS_ERR(handle))
2098 return PTR_ERR(handle); 2098 return PTR_ERR(handle);
@@ -2151,7 +2151,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
2151 2151
2152 /* Initialize quotas before so that eventual writes go 2152 /* Initialize quotas before so that eventual writes go
2153 * in separate transaction */ 2153 * in separate transaction */
2154 DQUOT_INIT(dentry->d_inode); 2154 vfs_dq_init(dentry->d_inode);
2155 handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb)); 2155 handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb));
2156 if (IS_ERR(handle)) 2156 if (IS_ERR(handle))
2157 return PTR_ERR(handle); 2157 return PTR_ERR(handle);
@@ -2318,7 +2318,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
2318 /* Initialize quotas before so that eventual writes go 2318 /* Initialize quotas before so that eventual writes go
2319 * in separate transaction */ 2319 * in separate transaction */
2320 if (new_dentry->d_inode) 2320 if (new_dentry->d_inode)
2321 DQUOT_INIT(new_dentry->d_inode); 2321 vfs_dq_init(new_dentry->d_inode);
2322 handle = ext4_journal_start(old_dir, 2 * 2322 handle = ext4_journal_start(old_dir, 2 *
2323 EXT4_DATA_TRANS_BLOCKS(old_dir->i_sb) + 2323 EXT4_DATA_TRANS_BLOCKS(old_dir->i_sb) +
2324 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2); 2324 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 39d1993cfa13..f7371a6a923d 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -926,8 +926,6 @@ static int bdev_try_to_free_page(struct super_block *sb, struct page *page, gfp_
926#define QTYPE2NAME(t) ((t) == USRQUOTA ? "user" : "group") 926#define QTYPE2NAME(t) ((t) == USRQUOTA ? "user" : "group")
927#define QTYPE2MOPT(on, t) ((t) == USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA)) 927#define QTYPE2MOPT(on, t) ((t) == USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA))
928 928
929static int ext4_dquot_initialize(struct inode *inode, int type);
930static int ext4_dquot_drop(struct inode *inode);
931static int ext4_write_dquot(struct dquot *dquot); 929static int ext4_write_dquot(struct dquot *dquot);
932static int ext4_acquire_dquot(struct dquot *dquot); 930static int ext4_acquire_dquot(struct dquot *dquot);
933static int ext4_release_dquot(struct dquot *dquot); 931static int ext4_release_dquot(struct dquot *dquot);
@@ -942,9 +940,13 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
942 const char *data, size_t len, loff_t off); 940 const char *data, size_t len, loff_t off);
943 941
944static struct dquot_operations ext4_quota_operations = { 942static struct dquot_operations ext4_quota_operations = {
945 .initialize = ext4_dquot_initialize, 943 .initialize = dquot_initialize,
946 .drop = ext4_dquot_drop, 944 .drop = dquot_drop,
947 .alloc_space = dquot_alloc_space, 945 .alloc_space = dquot_alloc_space,
946 .reserve_space = dquot_reserve_space,
947 .claim_space = dquot_claim_space,
948 .release_rsv = dquot_release_reserved_space,
949 .get_reserved_space = ext4_get_reserved_space,
948 .alloc_inode = dquot_alloc_inode, 950 .alloc_inode = dquot_alloc_inode,
949 .free_space = dquot_free_space, 951 .free_space = dquot_free_space,
950 .free_inode = dquot_free_inode, 952 .free_inode = dquot_free_inode,
@@ -1802,7 +1804,7 @@ static void ext4_orphan_cleanup(struct super_block *sb,
1802 } 1804 }
1803 1805
1804 list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan); 1806 list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
1805 DQUOT_INIT(inode); 1807 vfs_dq_init(inode);
1806 if (inode->i_nlink) { 1808 if (inode->i_nlink) {
1807 printk(KERN_DEBUG 1809 printk(KERN_DEBUG
1808 "%s: truncating inode %lu to %lld bytes\n", 1810 "%s: truncating inode %lu to %lld bytes\n",
@@ -3367,8 +3369,8 @@ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
3367 * is locked for write. Otherwise the are possible deadlocks: 3369 * is locked for write. Otherwise the are possible deadlocks:
3368 * Process 1 Process 2 3370 * Process 1 Process 2
3369 * ext4_create() quota_sync() 3371 * ext4_create() quota_sync()
3370 * jbd2_journal_start() write_dquot() 3372 * jbd2_journal_start() write_dquot()
3371 * DQUOT_INIT() down(dqio_mutex) 3373 * vfs_dq_init() down(dqio_mutex)
3372 * down(dqio_mutex) jbd2_journal_start() 3374 * down(dqio_mutex) jbd2_journal_start()
3373 * 3375 *
3374 */ 3376 */
@@ -3380,44 +3382,6 @@ static inline struct inode *dquot_to_inode(struct dquot *dquot)
3380 return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type]; 3382 return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type];
3381} 3383}
3382 3384
3383static int ext4_dquot_initialize(struct inode *inode, int type)
3384{
3385 handle_t *handle;
3386 int ret, err;
3387
3388 /* We may create quota structure so we need to reserve enough blocks */
3389 handle = ext4_journal_start(inode, 2*EXT4_QUOTA_INIT_BLOCKS(inode->i_sb));
3390 if (IS_ERR(handle))
3391 return PTR_ERR(handle);
3392 ret = dquot_initialize(inode, type);
3393 err = ext4_journal_stop(handle);
3394 if (!ret)
3395 ret = err;
3396 return ret;
3397}
3398
3399static int ext4_dquot_drop(struct inode *inode)
3400{
3401 handle_t *handle;
3402 int ret, err;
3403
3404 /* We may delete quota structure so we need to reserve enough blocks */
3405 handle = ext4_journal_start(inode, 2*EXT4_QUOTA_DEL_BLOCKS(inode->i_sb));
3406 if (IS_ERR(handle)) {
3407 /*
3408 * We call dquot_drop() anyway to at least release references
3409 * to quota structures so that umount does not hang.
3410 */
3411 dquot_drop(inode);
3412 return PTR_ERR(handle);
3413 }
3414 ret = dquot_drop(inode);
3415 err = ext4_journal_stop(handle);
3416 if (!ret)
3417 ret = err;
3418 return ret;
3419}
3420
3421static int ext4_write_dquot(struct dquot *dquot) 3385static int ext4_write_dquot(struct dquot *dquot)
3422{ 3386{
3423 int ret, err; 3387 int ret, err;
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 157ce6589c54..62b31c246994 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -490,7 +490,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
490 error = ext4_handle_dirty_metadata(handle, inode, bh); 490 error = ext4_handle_dirty_metadata(handle, inode, bh);
491 if (IS_SYNC(inode)) 491 if (IS_SYNC(inode))
492 ext4_handle_sync(handle); 492 ext4_handle_sync(handle);
493 DQUOT_FREE_BLOCK(inode, 1); 493 vfs_dq_free_block(inode, 1);
494 ea_bdebug(bh, "refcount now=%d; releasing", 494 ea_bdebug(bh, "refcount now=%d; releasing",
495 le32_to_cpu(BHDR(bh)->h_refcount)); 495 le32_to_cpu(BHDR(bh)->h_refcount));
496 if (ce) 496 if (ce)
@@ -784,7 +784,7 @@ inserted:
784 /* The old block is released after updating 784 /* The old block is released after updating
785 the inode. */ 785 the inode. */
786 error = -EDQUOT; 786 error = -EDQUOT;
787 if (DQUOT_ALLOC_BLOCK(inode, 1)) 787 if (vfs_dq_alloc_block(inode, 1))
788 goto cleanup; 788 goto cleanup;
789 error = ext4_journal_get_write_access(handle, 789 error = ext4_journal_get_write_access(handle,
790 new_bh); 790 new_bh);
@@ -860,7 +860,7 @@ cleanup:
860 return error; 860 return error;
861 861
862cleanup_dquot: 862cleanup_dquot:
863 DQUOT_FREE_BLOCK(inode, 1); 863 vfs_dq_free_block(inode, 1);
864 goto cleanup; 864 goto cleanup;
865 865
866bad_block: 866bad_block:
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
index 7ba03a4acbe0..da3f361a37dd 100644
--- a/fs/fat/namei_msdos.c
+++ b/fs/fat/namei_msdos.c
@@ -188,7 +188,7 @@ old_compare:
188 goto out; 188 goto out;
189} 189}
190 190
191static struct dentry_operations msdos_dentry_operations = { 191static const struct dentry_operations msdos_dentry_operations = {
192 .d_hash = msdos_hash, 192 .d_hash = msdos_hash,
193 .d_compare = msdos_cmp, 193 .d_compare = msdos_cmp,
194}; 194};
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index 8ae32e37673c..a0e00e3a46e9 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -166,13 +166,13 @@ static int vfat_cmp(struct dentry *dentry, struct qstr *a, struct qstr *b)
166 return 1; 166 return 1;
167} 167}
168 168
169static struct dentry_operations vfat_ci_dentry_ops = { 169static const struct dentry_operations vfat_ci_dentry_ops = {
170 .d_revalidate = vfat_revalidate_ci, 170 .d_revalidate = vfat_revalidate_ci,
171 .d_hash = vfat_hashi, 171 .d_hash = vfat_hashi,
172 .d_compare = vfat_cmpi, 172 .d_compare = vfat_cmpi,
173}; 173};
174 174
175static struct dentry_operations vfat_dentry_ops = { 175static const struct dentry_operations vfat_dentry_ops = {
176 .d_revalidate = vfat_revalidate, 176 .d_revalidate = vfat_revalidate,
177 .d_hash = vfat_hash, 177 .d_hash = vfat_hash,
178 .d_compare = vfat_cmp, 178 .d_compare = vfat_cmp,
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index fdff346e96fd..06da05261e04 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -224,7 +224,7 @@ static int invalid_nodeid(u64 nodeid)
224 return !nodeid || nodeid == FUSE_ROOT_ID; 224 return !nodeid || nodeid == FUSE_ROOT_ID;
225} 225}
226 226
227struct dentry_operations fuse_dentry_operations = { 227const struct dentry_operations fuse_dentry_operations = {
228 .d_revalidate = fuse_dentry_revalidate, 228 .d_revalidate = fuse_dentry_revalidate,
229}; 229};
230 230
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 5e64b815a5a1..6fc5aedaa0d5 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -493,7 +493,7 @@ static inline u64 get_node_id(struct inode *inode)
493/** Device operations */ 493/** Device operations */
494extern const struct file_operations fuse_dev_operations; 494extern const struct file_operations fuse_dev_operations;
495 495
496extern struct dentry_operations fuse_dentry_operations; 496extern const struct dentry_operations fuse_dentry_operations;
497 497
498/** 498/**
499 * Get a filled in inode 499 * Get a filled in inode
diff --git a/fs/gfs2/ops_dentry.c b/fs/gfs2/ops_dentry.c
index 5eb57b044382..022c66cd5606 100644
--- a/fs/gfs2/ops_dentry.c
+++ b/fs/gfs2/ops_dentry.c
@@ -107,7 +107,7 @@ static int gfs2_dhash(struct dentry *dentry, struct qstr *str)
107 return 0; 107 return 0;
108} 108}
109 109
110struct dentry_operations gfs2_dops = { 110const struct dentry_operations gfs2_dops = {
111 .d_revalidate = gfs2_drevalidate, 111 .d_revalidate = gfs2_drevalidate,
112 .d_hash = gfs2_dhash, 112 .d_hash = gfs2_dhash,
113}; 113};
diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h
index 91abdbedcc86..b56413e3e40d 100644
--- a/fs/gfs2/super.h
+++ b/fs/gfs2/super.h
@@ -49,7 +49,7 @@ extern struct file_system_type gfs2_fs_type;
49extern struct file_system_type gfs2meta_fs_type; 49extern struct file_system_type gfs2meta_fs_type;
50extern const struct export_operations gfs2_export_ops; 50extern const struct export_operations gfs2_export_ops;
51extern const struct super_operations gfs2_super_ops; 51extern const struct super_operations gfs2_super_ops;
52extern struct dentry_operations gfs2_dops; 52extern const struct dentry_operations gfs2_dops;
53 53
54#endif /* __SUPER_DOT_H__ */ 54#endif /* __SUPER_DOT_H__ */
55 55
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index 9955232fdf8c..052387e11671 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -213,7 +213,7 @@ extern void hfs_mdb_put(struct super_block *);
213extern int hfs_part_find(struct super_block *, sector_t *, sector_t *); 213extern int hfs_part_find(struct super_block *, sector_t *, sector_t *);
214 214
215/* string.c */ 215/* string.c */
216extern struct dentry_operations hfs_dentry_operations; 216extern const struct dentry_operations hfs_dentry_operations;
217 217
218extern int hfs_hash_dentry(struct dentry *, struct qstr *); 218extern int hfs_hash_dentry(struct dentry *, struct qstr *);
219extern int hfs_strcmp(const unsigned char *, unsigned int, 219extern int hfs_strcmp(const unsigned char *, unsigned int,
diff --git a/fs/hfs/sysdep.c b/fs/hfs/sysdep.c
index 5bf89ec01cd4..7478f5c219aa 100644
--- a/fs/hfs/sysdep.c
+++ b/fs/hfs/sysdep.c
@@ -31,7 +31,7 @@ static int hfs_revalidate_dentry(struct dentry *dentry, struct nameidata *nd)
31 return 1; 31 return 1;
32} 32}
33 33
34struct dentry_operations hfs_dentry_operations = 34const struct dentry_operations hfs_dentry_operations =
35{ 35{
36 .d_revalidate = hfs_revalidate_dentry, 36 .d_revalidate = hfs_revalidate_dentry,
37 .d_hash = hfs_hash_dentry, 37 .d_hash = hfs_hash_dentry,
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index f027a905225f..5c10d803d9df 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -327,7 +327,7 @@ void hfsplus_file_truncate(struct inode *);
327/* inode.c */ 327/* inode.c */
328extern const struct address_space_operations hfsplus_aops; 328extern const struct address_space_operations hfsplus_aops;
329extern const struct address_space_operations hfsplus_btree_aops; 329extern const struct address_space_operations hfsplus_btree_aops;
330extern struct dentry_operations hfsplus_dentry_operations; 330extern const struct dentry_operations hfsplus_dentry_operations;
331 331
332void hfsplus_inode_read_fork(struct inode *, struct hfsplus_fork_raw *); 332void hfsplus_inode_read_fork(struct inode *, struct hfsplus_fork_raw *);
333void hfsplus_inode_write_fork(struct inode *, struct hfsplus_fork_raw *); 333void hfsplus_inode_write_fork(struct inode *, struct hfsplus_fork_raw *);
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index f105ee9e1cc4..1bcf597c0562 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -137,7 +137,7 @@ const struct address_space_operations hfsplus_aops = {
137 .writepages = hfsplus_writepages, 137 .writepages = hfsplus_writepages,
138}; 138};
139 139
140struct dentry_operations hfsplus_dentry_operations = { 140const struct dentry_operations hfsplus_dentry_operations = {
141 .d_hash = hfsplus_hash_dentry, 141 .d_hash = hfsplus_hash_dentry,
142 .d_compare = hfsplus_compare_dentry, 142 .d_compare = hfsplus_compare_dentry,
143}; 143};
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 5c538e0ec14b..fe02ad4740e7 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -31,12 +31,12 @@ static inline struct hostfs_inode_info *HOSTFS_I(struct inode *inode)
31 31
32#define FILE_HOSTFS_I(file) HOSTFS_I((file)->f_path.dentry->d_inode) 32#define FILE_HOSTFS_I(file) HOSTFS_I((file)->f_path.dentry->d_inode)
33 33
34int hostfs_d_delete(struct dentry *dentry) 34static int hostfs_d_delete(struct dentry *dentry)
35{ 35{
36 return 1; 36 return 1;
37} 37}
38 38
39struct dentry_operations hostfs_dentry_ops = { 39static const struct dentry_operations hostfs_dentry_ops = {
40 .d_delete = hostfs_d_delete, 40 .d_delete = hostfs_d_delete,
41}; 41};
42 42
diff --git a/fs/hpfs/dentry.c b/fs/hpfs/dentry.c
index 08319126b2af..940d6d150bee 100644
--- a/fs/hpfs/dentry.c
+++ b/fs/hpfs/dentry.c
@@ -49,7 +49,7 @@ static int hpfs_compare_dentry(struct dentry *dentry, struct qstr *a, struct qst
49 return 0; 49 return 0;
50} 50}
51 51
52static struct dentry_operations hpfs_dentry_operations = { 52static const struct dentry_operations hpfs_dentry_operations = {
53 .d_hash = hpfs_hash_dentry, 53 .d_hash = hpfs_hash_dentry,
54 .d_compare = hpfs_compare_dentry, 54 .d_compare = hpfs_compare_dentry,
55}; 55};
diff --git a/fs/inode.c b/fs/inode.c
index 643ac43e5a5c..d06d6d268de9 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -294,7 +294,7 @@ void clear_inode(struct inode *inode)
294 BUG_ON(!(inode->i_state & I_FREEING)); 294 BUG_ON(!(inode->i_state & I_FREEING));
295 BUG_ON(inode->i_state & I_CLEAR); 295 BUG_ON(inode->i_state & I_CLEAR);
296 inode_sync_wait(inode); 296 inode_sync_wait(inode);
297 DQUOT_DROP(inode); 297 vfs_dq_drop(inode);
298 if (inode->i_sb->s_op->clear_inode) 298 if (inode->i_sb->s_op->clear_inode)
299 inode->i_sb->s_op->clear_inode(inode); 299 inode->i_sb->s_op->clear_inode(inode);
300 if (S_ISBLK(inode->i_mode) && inode->i_bdev) 300 if (S_ISBLK(inode->i_mode) && inode->i_bdev)
@@ -366,6 +366,8 @@ static int invalidate_list(struct list_head *head, struct list_head *dispose)
366 if (tmp == head) 366 if (tmp == head)
367 break; 367 break;
368 inode = list_entry(tmp, struct inode, i_sb_list); 368 inode = list_entry(tmp, struct inode, i_sb_list);
369 if (inode->i_state & I_NEW)
370 continue;
369 invalidate_inode_buffers(inode); 371 invalidate_inode_buffers(inode);
370 if (!atomic_read(&inode->i_count)) { 372 if (!atomic_read(&inode->i_count)) {
371 list_move(&inode->i_list, dispose); 373 list_move(&inode->i_list, dispose);
@@ -1168,7 +1170,7 @@ void generic_delete_inode(struct inode *inode)
1168 if (op->delete_inode) { 1170 if (op->delete_inode) {
1169 void (*delete)(struct inode *) = op->delete_inode; 1171 void (*delete)(struct inode *) = op->delete_inode;
1170 if (!is_bad_inode(inode)) 1172 if (!is_bad_inode(inode))
1171 DQUOT_INIT(inode); 1173 vfs_dq_init(inode);
1172 /* Filesystems implementing their own 1174 /* Filesystems implementing their own
1173 * s_op->delete_inode are required to call 1175 * s_op->delete_inode are required to call
1174 * truncate_inode_pages and clear_inode() 1176 * truncate_inode_pages and clear_inode()
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 6147ec3643a0..13d2eddd0692 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -114,7 +114,7 @@ static const struct super_operations isofs_sops = {
114}; 114};
115 115
116 116
117static struct dentry_operations isofs_dentry_ops[] = { 117static const struct dentry_operations isofs_dentry_ops[] = {
118 { 118 {
119 .d_hash = isofs_hash, 119 .d_hash = isofs_hash,
120 .d_compare = isofs_dentry_cmp, 120 .d_compare = isofs_dentry_cmp,
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
index d3e5c33665de..a166c1669e82 100644
--- a/fs/jfs/acl.c
+++ b/fs/jfs/acl.c
@@ -233,7 +233,7 @@ int jfs_setattr(struct dentry *dentry, struct iattr *iattr)
233 233
234 if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) || 234 if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
235 (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) { 235 (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
236 if (DQUOT_TRANSFER(inode, iattr)) 236 if (vfs_dq_transfer(inode, iattr))
237 return -EDQUOT; 237 return -EDQUOT;
238 } 238 }
239 239
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index b00ee9f05a06..b2ae190a77ba 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -158,9 +158,9 @@ void jfs_delete_inode(struct inode *inode)
158 /* 158 /*
159 * Free the inode from the quota allocation. 159 * Free the inode from the quota allocation.
160 */ 160 */
161 DQUOT_INIT(inode); 161 vfs_dq_init(inode);
162 DQUOT_FREE_INODE(inode); 162 vfs_dq_free_inode(inode);
163 DQUOT_DROP(inode); 163 vfs_dq_drop(inode);
164 } 164 }
165 165
166 clear_inode(inode); 166 clear_inode(inode);
diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c
index 4dcc05819998..925871e9887b 100644
--- a/fs/jfs/jfs_dtree.c
+++ b/fs/jfs/jfs_dtree.c
@@ -381,10 +381,10 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
381 * It's time to move the inline table to an external 381 * It's time to move the inline table to an external
382 * page and begin to build the xtree 382 * page and begin to build the xtree
383 */ 383 */
384 if (DQUOT_ALLOC_BLOCK(ip, sbi->nbperpage)) 384 if (vfs_dq_alloc_block(ip, sbi->nbperpage))
385 goto clean_up; 385 goto clean_up;
386 if (dbAlloc(ip, 0, sbi->nbperpage, &xaddr)) { 386 if (dbAlloc(ip, 0, sbi->nbperpage, &xaddr)) {
387 DQUOT_FREE_BLOCK(ip, sbi->nbperpage); 387 vfs_dq_free_block(ip, sbi->nbperpage);
388 goto clean_up; 388 goto clean_up;
389 } 389 }
390 390
@@ -408,7 +408,7 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
408 memcpy(&jfs_ip->i_dirtable, temp_table, 408 memcpy(&jfs_ip->i_dirtable, temp_table,
409 sizeof (temp_table)); 409 sizeof (temp_table));
410 dbFree(ip, xaddr, sbi->nbperpage); 410 dbFree(ip, xaddr, sbi->nbperpage);
411 DQUOT_FREE_BLOCK(ip, sbi->nbperpage); 411 vfs_dq_free_block(ip, sbi->nbperpage);
412 goto clean_up; 412 goto clean_up;
413 } 413 }
414 ip->i_size = PSIZE; 414 ip->i_size = PSIZE;
@@ -1027,7 +1027,7 @@ static int dtSplitUp(tid_t tid,
1027 n = xlen; 1027 n = xlen;
1028 1028
1029 /* Allocate blocks to quota. */ 1029 /* Allocate blocks to quota. */
1030 if (DQUOT_ALLOC_BLOCK(ip, n)) { 1030 if (vfs_dq_alloc_block(ip, n)) {
1031 rc = -EDQUOT; 1031 rc = -EDQUOT;
1032 goto extendOut; 1032 goto extendOut;
1033 } 1033 }
@@ -1308,7 +1308,7 @@ static int dtSplitUp(tid_t tid,
1308 1308
1309 /* Rollback quota allocation */ 1309 /* Rollback quota allocation */
1310 if (rc && quota_allocation) 1310 if (rc && quota_allocation)
1311 DQUOT_FREE_BLOCK(ip, quota_allocation); 1311 vfs_dq_free_block(ip, quota_allocation);
1312 1312
1313 dtSplitUp_Exit: 1313 dtSplitUp_Exit:
1314 1314
@@ -1369,7 +1369,7 @@ static int dtSplitPage(tid_t tid, struct inode *ip, struct dtsplit * split,
1369 return -EIO; 1369 return -EIO;
1370 1370
1371 /* Allocate blocks to quota. */ 1371 /* Allocate blocks to quota. */
1372 if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) { 1372 if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
1373 release_metapage(rmp); 1373 release_metapage(rmp);
1374 return -EDQUOT; 1374 return -EDQUOT;
1375 } 1375 }
@@ -1916,7 +1916,7 @@ static int dtSplitRoot(tid_t tid,
1916 rp = rmp->data; 1916 rp = rmp->data;
1917 1917
1918 /* Allocate blocks to quota. */ 1918 /* Allocate blocks to quota. */
1919 if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) { 1919 if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
1920 release_metapage(rmp); 1920 release_metapage(rmp);
1921 return -EDQUOT; 1921 return -EDQUOT;
1922 } 1922 }
@@ -2287,7 +2287,7 @@ static int dtDeleteUp(tid_t tid, struct inode *ip,
2287 xlen = lengthPXD(&fp->header.self); 2287 xlen = lengthPXD(&fp->header.self);
2288 2288
2289 /* Free quota allocation. */ 2289 /* Free quota allocation. */
2290 DQUOT_FREE_BLOCK(ip, xlen); 2290 vfs_dq_free_block(ip, xlen);
2291 2291
2292 /* free/invalidate its buffer page */ 2292 /* free/invalidate its buffer page */
2293 discard_metapage(fmp); 2293 discard_metapage(fmp);
@@ -2363,7 +2363,7 @@ static int dtDeleteUp(tid_t tid, struct inode *ip,
2363 xlen = lengthPXD(&p->header.self); 2363 xlen = lengthPXD(&p->header.self);
2364 2364
2365 /* Free quota allocation */ 2365 /* Free quota allocation */
2366 DQUOT_FREE_BLOCK(ip, xlen); 2366 vfs_dq_free_block(ip, xlen);
2367 2367
2368 /* free/invalidate its buffer page */ 2368 /* free/invalidate its buffer page */
2369 discard_metapage(mp); 2369 discard_metapage(mp);
diff --git a/fs/jfs/jfs_extent.c b/fs/jfs/jfs_extent.c
index 7ae1e3281de9..169802ea07f9 100644
--- a/fs/jfs/jfs_extent.c
+++ b/fs/jfs/jfs_extent.c
@@ -141,7 +141,7 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr)
141 } 141 }
142 142
143 /* Allocate blocks to quota. */ 143 /* Allocate blocks to quota. */
144 if (DQUOT_ALLOC_BLOCK(ip, nxlen)) { 144 if (vfs_dq_alloc_block(ip, nxlen)) {
145 dbFree(ip, nxaddr, (s64) nxlen); 145 dbFree(ip, nxaddr, (s64) nxlen);
146 mutex_unlock(&JFS_IP(ip)->commit_mutex); 146 mutex_unlock(&JFS_IP(ip)->commit_mutex);
147 return -EDQUOT; 147 return -EDQUOT;
@@ -164,7 +164,7 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr)
164 */ 164 */
165 if (rc) { 165 if (rc) {
166 dbFree(ip, nxaddr, nxlen); 166 dbFree(ip, nxaddr, nxlen);
167 DQUOT_FREE_BLOCK(ip, nxlen); 167 vfs_dq_free_block(ip, nxlen);
168 mutex_unlock(&JFS_IP(ip)->commit_mutex); 168 mutex_unlock(&JFS_IP(ip)->commit_mutex);
169 return (rc); 169 return (rc);
170 } 170 }
@@ -256,7 +256,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr)
256 goto exit; 256 goto exit;
257 257
258 /* Allocat blocks to quota. */ 258 /* Allocat blocks to quota. */
259 if (DQUOT_ALLOC_BLOCK(ip, nxlen)) { 259 if (vfs_dq_alloc_block(ip, nxlen)) {
260 dbFree(ip, nxaddr, (s64) nxlen); 260 dbFree(ip, nxaddr, (s64) nxlen);
261 mutex_unlock(&JFS_IP(ip)->commit_mutex); 261 mutex_unlock(&JFS_IP(ip)->commit_mutex);
262 return -EDQUOT; 262 return -EDQUOT;
@@ -297,7 +297,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr)
297 /* extend the extent */ 297 /* extend the extent */
298 if ((rc = xtExtend(0, ip, xoff + xlen, (int) nextend, 0))) { 298 if ((rc = xtExtend(0, ip, xoff + xlen, (int) nextend, 0))) {
299 dbFree(ip, xaddr + xlen, delta); 299 dbFree(ip, xaddr + xlen, delta);
300 DQUOT_FREE_BLOCK(ip, nxlen); 300 vfs_dq_free_block(ip, nxlen);
301 goto exit; 301 goto exit;
302 } 302 }
303 } else { 303 } else {
@@ -308,7 +308,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr)
308 */ 308 */
309 if ((rc = xtTailgate(0, ip, xoff, (int) ntail, nxaddr, 0))) { 309 if ((rc = xtTailgate(0, ip, xoff, (int) ntail, nxaddr, 0))) {
310 dbFree(ip, nxaddr, nxlen); 310 dbFree(ip, nxaddr, nxlen);
311 DQUOT_FREE_BLOCK(ip, nxlen); 311 vfs_dq_free_block(ip, nxlen);
312 goto exit; 312 goto exit;
313 } 313 }
314 } 314 }
diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c
index d4d142c2edd4..dc0e02159ac9 100644
--- a/fs/jfs/jfs_inode.c
+++ b/fs/jfs/jfs_inode.c
@@ -116,7 +116,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
116 /* 116 /*
117 * Allocate inode to quota. 117 * Allocate inode to quota.
118 */ 118 */
119 if (DQUOT_ALLOC_INODE(inode)) { 119 if (vfs_dq_alloc_inode(inode)) {
120 rc = -EDQUOT; 120 rc = -EDQUOT;
121 goto fail_drop; 121 goto fail_drop;
122 } 122 }
@@ -162,7 +162,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
162 return inode; 162 return inode;
163 163
164fail_drop: 164fail_drop:
165 DQUOT_DROP(inode); 165 vfs_dq_drop(inode);
166 inode->i_flags |= S_NOQUOTA; 166 inode->i_flags |= S_NOQUOTA;
167fail_unlock: 167fail_unlock:
168 inode->i_nlink = 0; 168 inode->i_nlink = 0;
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h
index adb2fafcc544..1eff7db34d63 100644
--- a/fs/jfs/jfs_inode.h
+++ b/fs/jfs/jfs_inode.h
@@ -47,5 +47,5 @@ extern const struct file_operations jfs_dir_operations;
47extern const struct inode_operations jfs_file_inode_operations; 47extern const struct inode_operations jfs_file_inode_operations;
48extern const struct file_operations jfs_file_operations; 48extern const struct file_operations jfs_file_operations;
49extern const struct inode_operations jfs_symlink_inode_operations; 49extern const struct inode_operations jfs_symlink_inode_operations;
50extern struct dentry_operations jfs_ci_dentry_operations; 50extern const struct dentry_operations jfs_ci_dentry_operations;
51#endif /* _H_JFS_INODE */ 51#endif /* _H_JFS_INODE */
diff --git a/fs/jfs/jfs_xtree.c b/fs/jfs/jfs_xtree.c
index ae3acafb447b..a27e26c90568 100644
--- a/fs/jfs/jfs_xtree.c
+++ b/fs/jfs/jfs_xtree.c
@@ -846,10 +846,10 @@ int xtInsert(tid_t tid, /* transaction id */
846 hint = addressXAD(xad) + lengthXAD(xad) - 1; 846 hint = addressXAD(xad) + lengthXAD(xad) - 1;
847 } else 847 } else
848 hint = 0; 848 hint = 0;
849 if ((rc = DQUOT_ALLOC_BLOCK(ip, xlen))) 849 if ((rc = vfs_dq_alloc_block(ip, xlen)))
850 goto out; 850 goto out;
851 if ((rc = dbAlloc(ip, hint, (s64) xlen, &xaddr))) { 851 if ((rc = dbAlloc(ip, hint, (s64) xlen, &xaddr))) {
852 DQUOT_FREE_BLOCK(ip, xlen); 852 vfs_dq_free_block(ip, xlen);
853 goto out; 853 goto out;
854 } 854 }
855 } 855 }
@@ -878,7 +878,7 @@ int xtInsert(tid_t tid, /* transaction id */
878 /* undo data extent allocation */ 878 /* undo data extent allocation */
879 if (*xaddrp == 0) { 879 if (*xaddrp == 0) {
880 dbFree(ip, xaddr, (s64) xlen); 880 dbFree(ip, xaddr, (s64) xlen);
881 DQUOT_FREE_BLOCK(ip, xlen); 881 vfs_dq_free_block(ip, xlen);
882 } 882 }
883 return rc; 883 return rc;
884 } 884 }
@@ -1246,7 +1246,7 @@ xtSplitPage(tid_t tid, struct inode *ip,
1246 rbn = addressPXD(pxd); 1246 rbn = addressPXD(pxd);
1247 1247
1248 /* Allocate blocks to quota. */ 1248 /* Allocate blocks to quota. */
1249 if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) { 1249 if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
1250 rc = -EDQUOT; 1250 rc = -EDQUOT;
1251 goto clean_up; 1251 goto clean_up;
1252 } 1252 }
@@ -1456,7 +1456,7 @@ xtSplitPage(tid_t tid, struct inode *ip,
1456 1456
1457 /* Rollback quota allocation. */ 1457 /* Rollback quota allocation. */
1458 if (quota_allocation) 1458 if (quota_allocation)
1459 DQUOT_FREE_BLOCK(ip, quota_allocation); 1459 vfs_dq_free_block(ip, quota_allocation);
1460 1460
1461 return (rc); 1461 return (rc);
1462} 1462}
@@ -1513,7 +1513,7 @@ xtSplitRoot(tid_t tid,
1513 return -EIO; 1513 return -EIO;
1514 1514
1515 /* Allocate blocks to quota. */ 1515 /* Allocate blocks to quota. */
1516 if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) { 1516 if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
1517 release_metapage(rmp); 1517 release_metapage(rmp);
1518 return -EDQUOT; 1518 return -EDQUOT;
1519 } 1519 }
@@ -3941,7 +3941,7 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
3941 ip->i_size = newsize; 3941 ip->i_size = newsize;
3942 3942
3943 /* update quota allocation to reflect freed blocks */ 3943 /* update quota allocation to reflect freed blocks */
3944 DQUOT_FREE_BLOCK(ip, nfreed); 3944 vfs_dq_free_block(ip, nfreed);
3945 3945
3946 /* 3946 /*
3947 * free tlock of invalidated pages 3947 * free tlock of invalidated pages
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index b4de56b851e4..514ee2edb92a 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -35,7 +35,7 @@
35/* 35/*
36 * forward references 36 * forward references
37 */ 37 */
38struct dentry_operations jfs_ci_dentry_operations; 38const struct dentry_operations jfs_ci_dentry_operations;
39 39
40static s64 commitZeroLink(tid_t, struct inode *); 40static s64 commitZeroLink(tid_t, struct inode *);
41 41
@@ -356,7 +356,7 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry)
356 jfs_info("jfs_rmdir: dip:0x%p name:%s", dip, dentry->d_name.name); 356 jfs_info("jfs_rmdir: dip:0x%p name:%s", dip, dentry->d_name.name);
357 357
358 /* Init inode for quota operations. */ 358 /* Init inode for quota operations. */
359 DQUOT_INIT(ip); 359 vfs_dq_init(ip);
360 360
361 /* directory must be empty to be removed */ 361 /* directory must be empty to be removed */
362 if (!dtEmpty(ip)) { 362 if (!dtEmpty(ip)) {
@@ -483,7 +483,7 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry)
483 jfs_info("jfs_unlink: dip:0x%p name:%s", dip, dentry->d_name.name); 483 jfs_info("jfs_unlink: dip:0x%p name:%s", dip, dentry->d_name.name);
484 484
485 /* Init inode for quota operations. */ 485 /* Init inode for quota operations. */
486 DQUOT_INIT(ip); 486 vfs_dq_init(ip);
487 487
488 if ((rc = get_UCSname(&dname, dentry))) 488 if ((rc = get_UCSname(&dname, dentry)))
489 goto out; 489 goto out;
@@ -1136,7 +1136,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1136 } else if (new_ip) { 1136 } else if (new_ip) {
1137 IWRITE_LOCK(new_ip, RDWRLOCK_NORMAL); 1137 IWRITE_LOCK(new_ip, RDWRLOCK_NORMAL);
1138 /* Init inode for quota operations. */ 1138 /* Init inode for quota operations. */
1139 DQUOT_INIT(new_ip); 1139 vfs_dq_init(new_ip);
1140 } 1140 }
1141 1141
1142 /* 1142 /*
@@ -1595,7 +1595,7 @@ out:
1595 return result; 1595 return result;
1596} 1596}
1597 1597
1598struct dentry_operations jfs_ci_dentry_operations = 1598const struct dentry_operations jfs_ci_dentry_operations =
1599{ 1599{
1600 .d_hash = jfs_ci_hash, 1600 .d_hash = jfs_ci_hash,
1601 .d_compare = jfs_ci_compare, 1601 .d_compare = jfs_ci_compare,
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index 9b7f2cdaae0a..61dfa8173ebc 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -260,14 +260,14 @@ static int ea_write(struct inode *ip, struct jfs_ea_list *ealist, int size,
260 nblocks = (size + (sb->s_blocksize - 1)) >> sb->s_blocksize_bits; 260 nblocks = (size + (sb->s_blocksize - 1)) >> sb->s_blocksize_bits;
261 261
262 /* Allocate new blocks to quota. */ 262 /* Allocate new blocks to quota. */
263 if (DQUOT_ALLOC_BLOCK(ip, nblocks)) { 263 if (vfs_dq_alloc_block(ip, nblocks)) {
264 return -EDQUOT; 264 return -EDQUOT;
265 } 265 }
266 266
267 rc = dbAlloc(ip, INOHINT(ip), nblocks, &blkno); 267 rc = dbAlloc(ip, INOHINT(ip), nblocks, &blkno);
268 if (rc) { 268 if (rc) {
269 /*Rollback quota allocation. */ 269 /*Rollback quota allocation. */
270 DQUOT_FREE_BLOCK(ip, nblocks); 270 vfs_dq_free_block(ip, nblocks);
271 return rc; 271 return rc;
272 } 272 }
273 273
@@ -332,7 +332,7 @@ static int ea_write(struct inode *ip, struct jfs_ea_list *ealist, int size,
332 332
333 failed: 333 failed:
334 /* Rollback quota allocation. */ 334 /* Rollback quota allocation. */
335 DQUOT_FREE_BLOCK(ip, nblocks); 335 vfs_dq_free_block(ip, nblocks);
336 336
337 dbFree(ip, blkno, nblocks); 337 dbFree(ip, blkno, nblocks);
338 return rc; 338 return rc;
@@ -538,7 +538,7 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
538 538
539 if (blocks_needed > current_blocks) { 539 if (blocks_needed > current_blocks) {
540 /* Allocate new blocks to quota. */ 540 /* Allocate new blocks to quota. */
541 if (DQUOT_ALLOC_BLOCK(inode, blocks_needed)) 541 if (vfs_dq_alloc_block(inode, blocks_needed))
542 return -EDQUOT; 542 return -EDQUOT;
543 543
544 quota_allocation = blocks_needed; 544 quota_allocation = blocks_needed;
@@ -602,7 +602,7 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
602 clean_up: 602 clean_up:
603 /* Rollback quota allocation */ 603 /* Rollback quota allocation */
604 if (quota_allocation) 604 if (quota_allocation)
605 DQUOT_FREE_BLOCK(inode, quota_allocation); 605 vfs_dq_free_block(inode, quota_allocation);
606 606
607 return (rc); 607 return (rc);
608} 608}
@@ -677,7 +677,7 @@ static int ea_put(tid_t tid, struct inode *inode, struct ea_buffer *ea_buf,
677 677
678 /* If old blocks exist, they must be removed from quota allocation. */ 678 /* If old blocks exist, they must be removed from quota allocation. */
679 if (old_blocks) 679 if (old_blocks)
680 DQUOT_FREE_BLOCK(inode, old_blocks); 680 vfs_dq_free_block(inode, old_blocks);
681 681
682 inode->i_ctime = CURRENT_TIME; 682 inode->i_ctime = CURRENT_TIME;
683 683
diff --git a/fs/libfs.c b/fs/libfs.c
index 49b44099dabb..4910a36f516e 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -44,7 +44,7 @@ static int simple_delete_dentry(struct dentry *dentry)
44 */ 44 */
45struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) 45struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
46{ 46{
47 static struct dentry_operations simple_dentry_operations = { 47 static const struct dentry_operations simple_dentry_operations = {
48 .d_delete = simple_delete_dentry, 48 .d_delete = simple_delete_dentry,
49 }; 49 };
50 50
@@ -242,7 +242,8 @@ int get_sb_pseudo(struct file_system_type *fs_type, char *name,
242 d_instantiate(dentry, root); 242 d_instantiate(dentry, root);
243 s->s_root = dentry; 243 s->s_root = dentry;
244 s->s_flags |= MS_ACTIVE; 244 s->s_flags |= MS_ACTIVE;
245 return simple_set_mnt(mnt, s); 245 simple_set_mnt(mnt, s);
246 return 0;
246 247
247Enomem: 248Enomem:
248 up_write(&s->s_umount); 249 up_write(&s->s_umount);
diff --git a/fs/namei.c b/fs/namei.c
index 199317642ad6..d040ce11785d 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1473,7 +1473,7 @@ int vfs_create(struct inode *dir, struct dentry *dentry, int mode,
1473 error = security_inode_create(dir, dentry, mode); 1473 error = security_inode_create(dir, dentry, mode);
1474 if (error) 1474 if (error)
1475 return error; 1475 return error;
1476 DQUOT_INIT(dir); 1476 vfs_dq_init(dir);
1477 error = dir->i_op->create(dir, dentry, mode, nd); 1477 error = dir->i_op->create(dir, dentry, mode, nd);
1478 if (!error) 1478 if (!error)
1479 fsnotify_create(dir, dentry); 1479 fsnotify_create(dir, dentry);
@@ -1489,24 +1489,22 @@ int may_open(struct path *path, int acc_mode, int flag)
1489 if (!inode) 1489 if (!inode)
1490 return -ENOENT; 1490 return -ENOENT;
1491 1491
1492 if (S_ISLNK(inode->i_mode)) 1492 switch (inode->i_mode & S_IFMT) {
1493 case S_IFLNK:
1493 return -ELOOP; 1494 return -ELOOP;
1494 1495 case S_IFDIR:
1495 if (S_ISDIR(inode->i_mode) && (acc_mode & MAY_WRITE)) 1496 if (acc_mode & MAY_WRITE)
1496 return -EISDIR; 1497 return -EISDIR;
1497 1498 break;
1498 /* 1499 case S_IFBLK:
1499 * FIFO's, sockets and device files are special: they don't 1500 case S_IFCHR:
1500 * actually live on the filesystem itself, and as such you
1501 * can write to them even if the filesystem is read-only.
1502 */
1503 if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
1504 flag &= ~O_TRUNC;
1505 } else if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
1506 if (path->mnt->mnt_flags & MNT_NODEV) 1501 if (path->mnt->mnt_flags & MNT_NODEV)
1507 return -EACCES; 1502 return -EACCES;
1508 1503 /*FALLTHRU*/
1504 case S_IFIFO:
1505 case S_IFSOCK:
1509 flag &= ~O_TRUNC; 1506 flag &= ~O_TRUNC;
1507 break;
1510 } 1508 }
1511 1509
1512 error = inode_permission(inode, acc_mode); 1510 error = inode_permission(inode, acc_mode);
@@ -1552,7 +1550,7 @@ int may_open(struct path *path, int acc_mode, int flag)
1552 error = security_path_truncate(path, 0, 1550 error = security_path_truncate(path, 0,
1553 ATTR_MTIME|ATTR_CTIME|ATTR_OPEN); 1551 ATTR_MTIME|ATTR_CTIME|ATTR_OPEN);
1554 if (!error) { 1552 if (!error) {
1555 DQUOT_INIT(inode); 1553 vfs_dq_init(inode);
1556 1554
1557 error = do_truncate(dentry, 0, 1555 error = do_truncate(dentry, 0,
1558 ATTR_MTIME|ATTR_CTIME|ATTR_OPEN, 1556 ATTR_MTIME|ATTR_CTIME|ATTR_OPEN,
@@ -1563,7 +1561,7 @@ int may_open(struct path *path, int acc_mode, int flag)
1563 return error; 1561 return error;
1564 } else 1562 } else
1565 if (flag & FMODE_WRITE) 1563 if (flag & FMODE_WRITE)
1566 DQUOT_INIT(inode); 1564 vfs_dq_init(inode);
1567 1565
1568 return 0; 1566 return 0;
1569} 1567}
@@ -1946,7 +1944,7 @@ int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1946 if (error) 1944 if (error)
1947 return error; 1945 return error;
1948 1946
1949 DQUOT_INIT(dir); 1947 vfs_dq_init(dir);
1950 error = dir->i_op->mknod(dir, dentry, mode, dev); 1948 error = dir->i_op->mknod(dir, dentry, mode, dev);
1951 if (!error) 1949 if (!error)
1952 fsnotify_create(dir, dentry); 1950 fsnotify_create(dir, dentry);
@@ -2045,7 +2043,7 @@ int vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
2045 if (error) 2043 if (error)
2046 return error; 2044 return error;
2047 2045
2048 DQUOT_INIT(dir); 2046 vfs_dq_init(dir);
2049 error = dir->i_op->mkdir(dir, dentry, mode); 2047 error = dir->i_op->mkdir(dir, dentry, mode);
2050 if (!error) 2048 if (!error)
2051 fsnotify_mkdir(dir, dentry); 2049 fsnotify_mkdir(dir, dentry);
@@ -2131,7 +2129,7 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
2131 if (!dir->i_op->rmdir) 2129 if (!dir->i_op->rmdir)
2132 return -EPERM; 2130 return -EPERM;
2133 2131
2134 DQUOT_INIT(dir); 2132 vfs_dq_init(dir);
2135 2133
2136 mutex_lock(&dentry->d_inode->i_mutex); 2134 mutex_lock(&dentry->d_inode->i_mutex);
2137 dentry_unhash(dentry); 2135 dentry_unhash(dentry);
@@ -2218,7 +2216,7 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry)
2218 if (!dir->i_op->unlink) 2216 if (!dir->i_op->unlink)
2219 return -EPERM; 2217 return -EPERM;
2220 2218
2221 DQUOT_INIT(dir); 2219 vfs_dq_init(dir);
2222 2220
2223 mutex_lock(&dentry->d_inode->i_mutex); 2221 mutex_lock(&dentry->d_inode->i_mutex);
2224 if (d_mountpoint(dentry)) 2222 if (d_mountpoint(dentry))
@@ -2329,7 +2327,7 @@ int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
2329 if (error) 2327 if (error)
2330 return error; 2328 return error;
2331 2329
2332 DQUOT_INIT(dir); 2330 vfs_dq_init(dir);
2333 error = dir->i_op->symlink(dir, dentry, oldname); 2331 error = dir->i_op->symlink(dir, dentry, oldname);
2334 if (!error) 2332 if (!error)
2335 fsnotify_create(dir, dentry); 2333 fsnotify_create(dir, dentry);
@@ -2413,7 +2411,7 @@ int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_de
2413 return error; 2411 return error;
2414 2412
2415 mutex_lock(&inode->i_mutex); 2413 mutex_lock(&inode->i_mutex);
2416 DQUOT_INIT(dir); 2414 vfs_dq_init(dir);
2417 error = dir->i_op->link(old_dentry, dir, new_dentry); 2415 error = dir->i_op->link(old_dentry, dir, new_dentry);
2418 mutex_unlock(&inode->i_mutex); 2416 mutex_unlock(&inode->i_mutex);
2419 if (!error) 2417 if (!error)
@@ -2612,8 +2610,8 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
2612 if (!old_dir->i_op->rename) 2610 if (!old_dir->i_op->rename)
2613 return -EPERM; 2611 return -EPERM;
2614 2612
2615 DQUOT_INIT(old_dir); 2613 vfs_dq_init(old_dir);
2616 DQUOT_INIT(new_dir); 2614 vfs_dq_init(new_dir);
2617 2615
2618 old_name = fsnotify_oldname_init(old_dentry->d_name.name); 2616 old_name = fsnotify_oldname_init(old_dentry->d_name.name);
2619 2617
diff --git a/fs/namespace.c b/fs/namespace.c
index f0e753097353..0a42e0e96027 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -397,11 +397,10 @@ static void __mnt_unmake_readonly(struct vfsmount *mnt)
397 spin_unlock(&vfsmount_lock); 397 spin_unlock(&vfsmount_lock);
398} 398}
399 399
400int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb) 400void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb)
401{ 401{
402 mnt->mnt_sb = sb; 402 mnt->mnt_sb = sb;
403 mnt->mnt_root = dget(sb->s_root); 403 mnt->mnt_root = dget(sb->s_root);
404 return 0;
405} 404}
406 405
407EXPORT_SYMBOL(simple_set_mnt); 406EXPORT_SYMBOL(simple_set_mnt);
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index 07e9715b8658..9c590722d87e 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -79,7 +79,7 @@ static int ncp_hash_dentry(struct dentry *, struct qstr *);
79static int ncp_compare_dentry (struct dentry *, struct qstr *, struct qstr *); 79static int ncp_compare_dentry (struct dentry *, struct qstr *, struct qstr *);
80static int ncp_delete_dentry(struct dentry *); 80static int ncp_delete_dentry(struct dentry *);
81 81
82static struct dentry_operations ncp_dentry_operations = 82static const struct dentry_operations ncp_dentry_operations =
83{ 83{
84 .d_revalidate = ncp_lookup_validate, 84 .d_revalidate = ncp_lookup_validate,
85 .d_hash = ncp_hash_dentry, 85 .d_hash = ncp_hash_dentry,
@@ -87,7 +87,7 @@ static struct dentry_operations ncp_dentry_operations =
87 .d_delete = ncp_delete_dentry, 87 .d_delete = ncp_delete_dentry,
88}; 88};
89 89
90struct dentry_operations ncp_root_dentry_operations = 90const struct dentry_operations ncp_root_dentry_operations =
91{ 91{
92 .d_hash = ncp_hash_dentry, 92 .d_hash = ncp_hash_dentry,
93 .d_compare = ncp_compare_dentry, 93 .d_compare = ncp_compare_dentry,
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 672368f865ca..78bf72fc1db3 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -899,7 +899,7 @@ static void nfs_dentry_iput(struct dentry *dentry, struct inode *inode)
899 iput(inode); 899 iput(inode);
900} 900}
901 901
902struct dentry_operations nfs_dentry_operations = { 902const struct dentry_operations nfs_dentry_operations = {
903 .d_revalidate = nfs_lookup_revalidate, 903 .d_revalidate = nfs_lookup_revalidate,
904 .d_delete = nfs_dentry_delete, 904 .d_delete = nfs_dentry_delete,
905 .d_iput = nfs_dentry_iput, 905 .d_iput = nfs_dentry_iput,
@@ -967,7 +967,7 @@ out:
967#ifdef CONFIG_NFS_V4 967#ifdef CONFIG_NFS_V4
968static int nfs_open_revalidate(struct dentry *, struct nameidata *); 968static int nfs_open_revalidate(struct dentry *, struct nameidata *);
969 969
970struct dentry_operations nfs4_dentry_operations = { 970const struct dentry_operations nfs4_dentry_operations = {
971 .d_revalidate = nfs_open_revalidate, 971 .d_revalidate = nfs_open_revalidate,
972 .d_delete = nfs_dentry_delete, 972 .d_delete = nfs_dentry_delete,
973 .d_iput = nfs_dentry_iput, 973 .d_iput = nfs_dentry_iput,
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 4e4d33204376..84345deab26f 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -179,7 +179,7 @@ struct nfs4_state_recovery_ops {
179 int (*recover_lock)(struct nfs4_state *, struct file_lock *); 179 int (*recover_lock)(struct nfs4_state *, struct file_lock *);
180}; 180};
181 181
182extern struct dentry_operations nfs4_dentry_operations; 182extern const struct dentry_operations nfs4_dentry_operations;
183extern const struct inode_operations nfs4_dir_inode_operations; 183extern const struct inode_operations nfs4_dir_inode_operations;
184 184
185/* inode.c */ 185/* inode.c */
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index c165a6403df0..78376b6c0236 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -356,7 +356,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
356 put_write_access(inode); 356 put_write_access(inode);
357 goto out_nfserr; 357 goto out_nfserr;
358 } 358 }
359 DQUOT_INIT(inode); 359 vfs_dq_init(inode);
360 } 360 }
361 361
362 /* sanitize the mode change */ 362 /* sanitize the mode change */
@@ -723,7 +723,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
723 else 723 else
724 flags = O_WRONLY|O_LARGEFILE; 724 flags = O_WRONLY|O_LARGEFILE;
725 725
726 DQUOT_INIT(inode); 726 vfs_dq_init(inode);
727 } 727 }
728 *filp = dentry_open(dget(dentry), mntget(fhp->fh_export->ex_path.mnt), 728 *filp = dentry_open(dget(dentry), mntget(fhp->fh_export->ex_path.mnt),
729 flags, cred); 729 flags, cred);
diff --git a/fs/notify/inotify/inotify.c b/fs/notify/inotify/inotify.c
index 331f2e88e284..220c13f0d73d 100644
--- a/fs/notify/inotify/inotify.c
+++ b/fs/notify/inotify/inotify.c
@@ -380,6 +380,14 @@ void inotify_unmount_inodes(struct list_head *list)
380 struct list_head *watches; 380 struct list_head *watches;
381 381
382 /* 382 /*
383 * We cannot __iget() an inode in state I_CLEAR, I_FREEING,
384 * I_WILL_FREE, or I_NEW which is fine because by that point
385 * the inode cannot have any associated watches.
386 */
387 if (inode->i_state & (I_CLEAR|I_FREEING|I_WILL_FREE|I_NEW))
388 continue;
389
390 /*
383 * If i_count is zero, the inode cannot have any watches and 391 * If i_count is zero, the inode cannot have any watches and
384 * doing an __iget/iput with MS_ACTIVE clear would actually 392 * doing an __iget/iput with MS_ACTIVE clear would actually
385 * evict all inodes with zero i_count from icache which is 393 * evict all inodes with zero i_count from icache which is
@@ -388,14 +396,6 @@ void inotify_unmount_inodes(struct list_head *list)
388 if (!atomic_read(&inode->i_count)) 396 if (!atomic_read(&inode->i_count))
389 continue; 397 continue;
390 398
391 /*
392 * We cannot __iget() an inode in state I_CLEAR, I_FREEING, or
393 * I_WILL_FREE which is fine because by that point the inode
394 * cannot have any associated watches.
395 */
396 if (inode->i_state & (I_CLEAR | I_FREEING | I_WILL_FREE))
397 continue;
398
399 need_iput_tmp = need_iput; 399 need_iput_tmp = need_iput;
400 need_iput = NULL; 400 need_iput = NULL;
401 /* In case inotify_remove_watch_locked() drops a reference. */ 401 /* In case inotify_remove_watch_locked() drops a reference. */
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
index e9d7c2038c0f..7d604480557a 100644
--- a/fs/ocfs2/dcache.c
+++ b/fs/ocfs2/dcache.c
@@ -455,7 +455,7 @@ out_move:
455 d_move(dentry, target); 455 d_move(dentry, target);
456} 456}
457 457
458struct dentry_operations ocfs2_dentry_ops = { 458const struct dentry_operations ocfs2_dentry_ops = {
459 .d_revalidate = ocfs2_dentry_revalidate, 459 .d_revalidate = ocfs2_dentry_revalidate,
460 .d_iput = ocfs2_dentry_iput, 460 .d_iput = ocfs2_dentry_iput,
461}; 461};
diff --git a/fs/ocfs2/dcache.h b/fs/ocfs2/dcache.h
index d06e16c06640..faa12e75f98d 100644
--- a/fs/ocfs2/dcache.h
+++ b/fs/ocfs2/dcache.h
@@ -26,7 +26,7 @@
26#ifndef OCFS2_DCACHE_H 26#ifndef OCFS2_DCACHE_H
27#define OCFS2_DCACHE_H 27#define OCFS2_DCACHE_H
28 28
29extern struct dentry_operations ocfs2_dentry_ops; 29extern const struct dentry_operations ocfs2_dentry_ops;
30 30
31struct ocfs2_dentry_lock { 31struct ocfs2_dentry_lock {
32 /* Use count of dentry lock */ 32 /* Use count of dentry lock */
diff --git a/fs/open.c b/fs/open.c
index a3a78ceb2a2b..75b61677daaf 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -273,7 +273,7 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
273 if (!error) 273 if (!error)
274 error = security_path_truncate(&path, length, 0); 274 error = security_path_truncate(&path, length, 0);
275 if (!error) { 275 if (!error) {
276 DQUOT_INIT(inode); 276 vfs_dq_init(inode);
277 error = do_truncate(path.dentry, length, 0, NULL); 277 error = do_truncate(path.dentry, length, 0, NULL);
278 } 278 }
279 279
diff --git a/fs/pipe.c b/fs/pipe.c
index 94ad15967cf9..4af7aa521813 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -860,7 +860,7 @@ static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
860 dentry->d_inode->i_ino); 860 dentry->d_inode->i_ino);
861} 861}
862 862
863static struct dentry_operations pipefs_dentry_operations = { 863static const struct dentry_operations pipefs_dentry_operations = {
864 .d_delete = pipefs_delete_dentry, 864 .d_delete = pipefs_delete_dentry,
865 .d_dname = pipefs_dname, 865 .d_dname = pipefs_dname,
866}; 866};
@@ -1024,11 +1024,6 @@ int do_pipe_flags(int *fd, int flags)
1024 return error; 1024 return error;
1025} 1025}
1026 1026
1027int do_pipe(int *fd)
1028{
1029 return do_pipe_flags(fd, 0);
1030}
1031
1032/* 1027/*
1033 * sys_pipe() is the normal C calling standard for creating 1028 * sys_pipe() is the normal C calling standard for creating
1034 * a pipe. It's not the way Unix traditionally does this, though. 1029 * a pipe. It's not the way Unix traditionally does this, though.
diff --git a/fs/proc/base.c b/fs/proc/base.c
index beaa0ce3b82e..aef6d55b7de6 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1545,7 +1545,7 @@ static int pid_delete_dentry(struct dentry * dentry)
1545 return !proc_pid(dentry->d_inode)->tasks[PIDTYPE_PID].first; 1545 return !proc_pid(dentry->d_inode)->tasks[PIDTYPE_PID].first;
1546} 1546}
1547 1547
1548static struct dentry_operations pid_dentry_operations = 1548static const struct dentry_operations pid_dentry_operations =
1549{ 1549{
1550 .d_revalidate = pid_revalidate, 1550 .d_revalidate = pid_revalidate,
1551 .d_delete = pid_delete_dentry, 1551 .d_delete = pid_delete_dentry,
@@ -1717,7 +1717,7 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
1717 return 0; 1717 return 0;
1718} 1718}
1719 1719
1720static struct dentry_operations tid_fd_dentry_operations = 1720static const struct dentry_operations tid_fd_dentry_operations =
1721{ 1721{
1722 .d_revalidate = tid_fd_revalidate, 1722 .d_revalidate = tid_fd_revalidate,
1723 .d_delete = pid_delete_dentry, 1723 .d_delete = pid_delete_dentry,
@@ -2339,7 +2339,7 @@ static int proc_base_revalidate(struct dentry *dentry, struct nameidata *nd)
2339 return 0; 2339 return 0;
2340} 2340}
2341 2341
2342static struct dentry_operations proc_base_dentry_operations = 2342static const struct dentry_operations proc_base_dentry_operations =
2343{ 2343{
2344 .d_revalidate = proc_base_revalidate, 2344 .d_revalidate = proc_base_revalidate,
2345 .d_delete = pid_delete_dentry, 2345 .d_delete = pid_delete_dentry,
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index db7fa5cab988..5d2989e9dcc1 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -363,7 +363,7 @@ static int proc_delete_dentry(struct dentry * dentry)
363 return 1; 363 return 1;
364} 364}
365 365
366static struct dentry_operations proc_dentry_operations = 366static const struct dentry_operations proc_dentry_operations =
367{ 367{
368 .d_delete = proc_delete_dentry, 368 .d_delete = proc_delete_dentry,
369}; 369};
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 94fcfff6863a..9b1e4e9a16bf 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -7,7 +7,7 @@
7#include <linux/security.h> 7#include <linux/security.h>
8#include "internal.h" 8#include "internal.h"
9 9
10static struct dentry_operations proc_sys_dentry_operations; 10static const struct dentry_operations proc_sys_dentry_operations;
11static const struct file_operations proc_sys_file_operations; 11static const struct file_operations proc_sys_file_operations;
12static const struct inode_operations proc_sys_inode_operations; 12static const struct inode_operations proc_sys_inode_operations;
13static const struct file_operations proc_sys_dir_file_operations; 13static const struct file_operations proc_sys_dir_file_operations;
@@ -396,7 +396,7 @@ static int proc_sys_compare(struct dentry *dir, struct qstr *qstr,
396 return !sysctl_is_seen(PROC_I(dentry->d_inode)->sysctl); 396 return !sysctl_is_seen(PROC_I(dentry->d_inode)->sysctl);
397} 397}
398 398
399static struct dentry_operations proc_sys_dentry_operations = { 399static const struct dentry_operations proc_sys_dentry_operations = {
400 .d_revalidate = proc_sys_revalidate, 400 .d_revalidate = proc_sys_revalidate,
401 .d_delete = proc_sys_delete, 401 .d_delete = proc_sys_delete,
402 .d_compare = proc_sys_compare, 402 .d_compare = proc_sys_compare,
diff --git a/fs/proc/root.c b/fs/proc/root.c
index f6299a25594e..1e15a2b176e8 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -83,7 +83,8 @@ static int proc_get_sb(struct file_system_type *fs_type,
83 ns->proc_mnt = mnt; 83 ns->proc_mnt = mnt;
84 } 84 }
85 85
86 return simple_set_mnt(mnt, sb); 86 simple_set_mnt(mnt, sb);
87 return 0;
87} 88}
88 89
89static void proc_kill_sb(struct super_block *sb) 90static void proc_kill_sb(struct super_block *sb)
diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig
new file mode 100644
index 000000000000..8047e01ef46b
--- /dev/null
+++ b/fs/quota/Kconfig
@@ -0,0 +1,59 @@
1#
2# Quota configuration
3#
4
5config QUOTA
6 bool "Quota support"
7 help
8 If you say Y here, you will be able to set per user limits for disk
9 usage (also called disk quotas). Currently, it works for the
10 ext2, ext3, and reiserfs file system. ext3 also supports journalled
11 quotas for which you don't need to run quotacheck(8) after an unclean
12 shutdown.
13 For further details, read the Quota mini-HOWTO, available from
14 <http://www.tldp.org/docs.html#howto>, or the documentation provided
15 with the quota tools. Probably the quota support is only useful for
16 multi user systems. If unsure, say N.
17
18config QUOTA_NETLINK_INTERFACE
19 bool "Report quota messages through netlink interface"
20 depends on QUOTA && NET
21 help
22 If you say Y here, quota warnings (about exceeding softlimit, reaching
23 hardlimit, etc.) will be reported through netlink interface. If unsure,
24 say Y.
25
26config PRINT_QUOTA_WARNING
27 bool "Print quota warnings to console (OBSOLETE)"
28 depends on QUOTA
29 default y
30 help
31 If you say Y here, quota warnings (about exceeding softlimit, reaching
32 hardlimit, etc.) will be printed to the process' controlling terminal.
33 Note that this behavior is currently deprecated and may go away in
34 future. Please use notification via netlink socket instead.
35
36# Generic support for tree structured quota files. Selected when needed.
37config QUOTA_TREE
38 tristate
39
40config QFMT_V1
41 tristate "Old quota format support"
42 depends on QUOTA
43 help
44 This quota format was (is) used by kernels earlier than 2.4.22. If
45 you have quota working and you don't want to convert to new quota
46 format say Y here.
47
48config QFMT_V2
49 tristate "Quota format v2 support"
50 depends on QUOTA
51 select QUOTA_TREE
52 help
53 This quota format allows using quotas with 32-bit UIDs/GIDs. If you
54 need this functionality say Y here.
55
56config QUOTACTL
57 bool
58 depends on XFS_QUOTA || QUOTA
59 default y
diff --git a/fs/quota/Makefile b/fs/quota/Makefile
new file mode 100644
index 000000000000..385a0831cc99
--- /dev/null
+++ b/fs/quota/Makefile
@@ -0,0 +1,14 @@
1#
2# Makefile for the Linux filesystems.
3#
4# 14 Sep 2000, Christoph Hellwig <hch@infradead.org>
5# Rewritten to use lists instead of if-statements.
6#
7
8obj-y :=
9
10obj-$(CONFIG_QUOTA) += dquot.o
11obj-$(CONFIG_QFMT_V1) += quota_v1.o
12obj-$(CONFIG_QFMT_V2) += quota_v2.o
13obj-$(CONFIG_QUOTA_TREE) += quota_tree.o
14obj-$(CONFIG_QUOTACTL) += quota.o
diff --git a/fs/dquot.c b/fs/quota/dquot.c
index d6add0bf5ad3..2ca967a5ef77 100644
--- a/fs/dquot.c
+++ b/fs/quota/dquot.c
@@ -129,9 +129,10 @@
129 * i_mutex on quota files is special (it's below dqio_mutex) 129 * i_mutex on quota files is special (it's below dqio_mutex)
130 */ 130 */
131 131
132static DEFINE_SPINLOCK(dq_list_lock); 132static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
133static DEFINE_SPINLOCK(dq_state_lock); 133static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
134DEFINE_SPINLOCK(dq_data_lock); 134__cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
135EXPORT_SYMBOL(dq_data_lock);
135 136
136static char *quotatypes[] = INITQFNAMES; 137static char *quotatypes[] = INITQFNAMES;
137static struct quota_format_type *quota_formats; /* List of registered formats */ 138static struct quota_format_type *quota_formats; /* List of registered formats */
@@ -148,35 +149,46 @@ int register_quota_format(struct quota_format_type *fmt)
148 spin_unlock(&dq_list_lock); 149 spin_unlock(&dq_list_lock);
149 return 0; 150 return 0;
150} 151}
152EXPORT_SYMBOL(register_quota_format);
151 153
152void unregister_quota_format(struct quota_format_type *fmt) 154void unregister_quota_format(struct quota_format_type *fmt)
153{ 155{
154 struct quota_format_type **actqf; 156 struct quota_format_type **actqf;
155 157
156 spin_lock(&dq_list_lock); 158 spin_lock(&dq_list_lock);
157 for (actqf = &quota_formats; *actqf && *actqf != fmt; actqf = &(*actqf)->qf_next); 159 for (actqf = &quota_formats; *actqf && *actqf != fmt;
160 actqf = &(*actqf)->qf_next)
161 ;
158 if (*actqf) 162 if (*actqf)
159 *actqf = (*actqf)->qf_next; 163 *actqf = (*actqf)->qf_next;
160 spin_unlock(&dq_list_lock); 164 spin_unlock(&dq_list_lock);
161} 165}
166EXPORT_SYMBOL(unregister_quota_format);
162 167
163static struct quota_format_type *find_quota_format(int id) 168static struct quota_format_type *find_quota_format(int id)
164{ 169{
165 struct quota_format_type *actqf; 170 struct quota_format_type *actqf;
166 171
167 spin_lock(&dq_list_lock); 172 spin_lock(&dq_list_lock);
168 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next); 173 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
174 actqf = actqf->qf_next)
175 ;
169 if (!actqf || !try_module_get(actqf->qf_owner)) { 176 if (!actqf || !try_module_get(actqf->qf_owner)) {
170 int qm; 177 int qm;
171 178
172 spin_unlock(&dq_list_lock); 179 spin_unlock(&dq_list_lock);
173 180
174 for (qm = 0; module_names[qm].qm_fmt_id && module_names[qm].qm_fmt_id != id; qm++); 181 for (qm = 0; module_names[qm].qm_fmt_id &&
175 if (!module_names[qm].qm_fmt_id || request_module(module_names[qm].qm_mod_name)) 182 module_names[qm].qm_fmt_id != id; qm++)
183 ;
184 if (!module_names[qm].qm_fmt_id ||
185 request_module(module_names[qm].qm_mod_name))
176 return NULL; 186 return NULL;
177 187
178 spin_lock(&dq_list_lock); 188 spin_lock(&dq_list_lock);
179 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next); 189 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
190 actqf = actqf->qf_next)
191 ;
180 if (actqf && !try_module_get(actqf->qf_owner)) 192 if (actqf && !try_module_get(actqf->qf_owner))
181 actqf = NULL; 193 actqf = NULL;
182 } 194 }
@@ -215,6 +227,7 @@ static unsigned int dq_hash_bits, dq_hash_mask;
215static struct hlist_head *dquot_hash; 227static struct hlist_head *dquot_hash;
216 228
217struct dqstats dqstats; 229struct dqstats dqstats;
230EXPORT_SYMBOL(dqstats);
218 231
219static inline unsigned int 232static inline unsigned int
220hashfn(const struct super_block *sb, unsigned int id, int type) 233hashfn(const struct super_block *sb, unsigned int id, int type)
@@ -230,7 +243,8 @@ hashfn(const struct super_block *sb, unsigned int id, int type)
230 */ 243 */
231static inline void insert_dquot_hash(struct dquot *dquot) 244static inline void insert_dquot_hash(struct dquot *dquot)
232{ 245{
233 struct hlist_head *head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type); 246 struct hlist_head *head;
247 head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type);
234 hlist_add_head(&dquot->dq_hash, head); 248 hlist_add_head(&dquot->dq_hash, head);
235} 249}
236 250
@@ -239,17 +253,19 @@ static inline void remove_dquot_hash(struct dquot *dquot)
239 hlist_del_init(&dquot->dq_hash); 253 hlist_del_init(&dquot->dq_hash);
240} 254}
241 255
242static inline struct dquot *find_dquot(unsigned int hashent, struct super_block *sb, unsigned int id, int type) 256static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
257 unsigned int id, int type)
243{ 258{
244 struct hlist_node *node; 259 struct hlist_node *node;
245 struct dquot *dquot; 260 struct dquot *dquot;
246 261
247 hlist_for_each (node, dquot_hash+hashent) { 262 hlist_for_each (node, dquot_hash+hashent) {
248 dquot = hlist_entry(node, struct dquot, dq_hash); 263 dquot = hlist_entry(node, struct dquot, dq_hash);
249 if (dquot->dq_sb == sb && dquot->dq_id == id && dquot->dq_type == type) 264 if (dquot->dq_sb == sb && dquot->dq_id == id &&
265 dquot->dq_type == type)
250 return dquot; 266 return dquot;
251 } 267 }
252 return NODQUOT; 268 return NULL;
253} 269}
254 270
255/* Add a dquot to the tail of the free list */ 271/* Add a dquot to the tail of the free list */
@@ -309,6 +325,7 @@ int dquot_mark_dquot_dirty(struct dquot *dquot)
309 spin_unlock(&dq_list_lock); 325 spin_unlock(&dq_list_lock);
310 return 0; 326 return 0;
311} 327}
328EXPORT_SYMBOL(dquot_mark_dquot_dirty);
312 329
313/* This function needs dq_list_lock */ 330/* This function needs dq_list_lock */
314static inline int clear_dquot_dirty(struct dquot *dquot) 331static inline int clear_dquot_dirty(struct dquot *dquot)
@@ -345,8 +362,10 @@ int dquot_acquire(struct dquot *dquot)
345 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) { 362 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) {
346 ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); 363 ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
347 /* Write the info if needed */ 364 /* Write the info if needed */
348 if (info_dirty(&dqopt->info[dquot->dq_type])) 365 if (info_dirty(&dqopt->info[dquot->dq_type])) {
349 ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type); 366 ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
367 dquot->dq_sb, dquot->dq_type);
368 }
350 if (ret < 0) 369 if (ret < 0)
351 goto out_iolock; 370 goto out_iolock;
352 if (ret2 < 0) { 371 if (ret2 < 0) {
@@ -360,6 +379,7 @@ out_iolock:
360 mutex_unlock(&dquot->dq_lock); 379 mutex_unlock(&dquot->dq_lock);
361 return ret; 380 return ret;
362} 381}
382EXPORT_SYMBOL(dquot_acquire);
363 383
364/* 384/*
365 * Write dquot to disk 385 * Write dquot to disk
@@ -380,8 +400,10 @@ int dquot_commit(struct dquot *dquot)
380 * => we have better not writing it */ 400 * => we have better not writing it */
381 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { 401 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
382 ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); 402 ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
383 if (info_dirty(&dqopt->info[dquot->dq_type])) 403 if (info_dirty(&dqopt->info[dquot->dq_type])) {
384 ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type); 404 ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
405 dquot->dq_sb, dquot->dq_type);
406 }
385 if (ret >= 0) 407 if (ret >= 0)
386 ret = ret2; 408 ret = ret2;
387 } 409 }
@@ -389,6 +411,7 @@ out_sem:
389 mutex_unlock(&dqopt->dqio_mutex); 411 mutex_unlock(&dqopt->dqio_mutex);
390 return ret; 412 return ret;
391} 413}
414EXPORT_SYMBOL(dquot_commit);
392 415
393/* 416/*
394 * Release dquot 417 * Release dquot
@@ -406,8 +429,10 @@ int dquot_release(struct dquot *dquot)
406 if (dqopt->ops[dquot->dq_type]->release_dqblk) { 429 if (dqopt->ops[dquot->dq_type]->release_dqblk) {
407 ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot); 430 ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot);
408 /* Write the info */ 431 /* Write the info */
409 if (info_dirty(&dqopt->info[dquot->dq_type])) 432 if (info_dirty(&dqopt->info[dquot->dq_type])) {
410 ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type); 433 ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
434 dquot->dq_sb, dquot->dq_type);
435 }
411 if (ret >= 0) 436 if (ret >= 0)
412 ret = ret2; 437 ret = ret2;
413 } 438 }
@@ -417,6 +442,7 @@ out_dqlock:
417 mutex_unlock(&dquot->dq_lock); 442 mutex_unlock(&dquot->dq_lock);
418 return ret; 443 return ret;
419} 444}
445EXPORT_SYMBOL(dquot_release);
420 446
421void dquot_destroy(struct dquot *dquot) 447void dquot_destroy(struct dquot *dquot)
422{ 448{
@@ -516,6 +542,7 @@ out:
516 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); 542 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
517 return ret; 543 return ret;
518} 544}
545EXPORT_SYMBOL(dquot_scan_active);
519 546
520int vfs_quota_sync(struct super_block *sb, int type) 547int vfs_quota_sync(struct super_block *sb, int type)
521{ 548{
@@ -533,7 +560,8 @@ int vfs_quota_sync(struct super_block *sb, int type)
533 spin_lock(&dq_list_lock); 560 spin_lock(&dq_list_lock);
534 dirty = &dqopt->info[cnt].dqi_dirty_list; 561 dirty = &dqopt->info[cnt].dqi_dirty_list;
535 while (!list_empty(dirty)) { 562 while (!list_empty(dirty)) {
536 dquot = list_first_entry(dirty, struct dquot, dq_dirty); 563 dquot = list_first_entry(dirty, struct dquot,
564 dq_dirty);
537 /* Dirty and inactive can be only bad dquot... */ 565 /* Dirty and inactive can be only bad dquot... */
538 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { 566 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
539 clear_dquot_dirty(dquot); 567 clear_dquot_dirty(dquot);
@@ -563,6 +591,7 @@ int vfs_quota_sync(struct super_block *sb, int type)
563 591
564 return 0; 592 return 0;
565} 593}
594EXPORT_SYMBOL(vfs_quota_sync);
566 595
567/* Free unused dquots from cache */ 596/* Free unused dquots from cache */
568static void prune_dqcache(int count) 597static void prune_dqcache(int count)
@@ -672,6 +701,7 @@ we_slept:
672 put_dquot_last(dquot); 701 put_dquot_last(dquot);
673 spin_unlock(&dq_list_lock); 702 spin_unlock(&dq_list_lock);
674} 703}
704EXPORT_SYMBOL(dqput);
675 705
676struct dquot *dquot_alloc(struct super_block *sb, int type) 706struct dquot *dquot_alloc(struct super_block *sb, int type)
677{ 707{
@@ -685,7 +715,7 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type)
685 715
686 dquot = sb->dq_op->alloc_dquot(sb, type); 716 dquot = sb->dq_op->alloc_dquot(sb, type);
687 if(!dquot) 717 if(!dquot)
688 return NODQUOT; 718 return NULL;
689 719
690 mutex_init(&dquot->dq_lock); 720 mutex_init(&dquot->dq_lock);
691 INIT_LIST_HEAD(&dquot->dq_free); 721 INIT_LIST_HEAD(&dquot->dq_free);
@@ -711,10 +741,10 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type)
711struct dquot *dqget(struct super_block *sb, unsigned int id, int type) 741struct dquot *dqget(struct super_block *sb, unsigned int id, int type)
712{ 742{
713 unsigned int hashent = hashfn(sb, id, type); 743 unsigned int hashent = hashfn(sb, id, type);
714 struct dquot *dquot = NODQUOT, *empty = NODQUOT; 744 struct dquot *dquot = NULL, *empty = NULL;
715 745
716 if (!sb_has_quota_active(sb, type)) 746 if (!sb_has_quota_active(sb, type))
717 return NODQUOT; 747 return NULL;
718we_slept: 748we_slept:
719 spin_lock(&dq_list_lock); 749 spin_lock(&dq_list_lock);
720 spin_lock(&dq_state_lock); 750 spin_lock(&dq_state_lock);
@@ -725,15 +755,17 @@ we_slept:
725 } 755 }
726 spin_unlock(&dq_state_lock); 756 spin_unlock(&dq_state_lock);
727 757
728 if ((dquot = find_dquot(hashent, sb, id, type)) == NODQUOT) { 758 dquot = find_dquot(hashent, sb, id, type);
729 if (empty == NODQUOT) { 759 if (!dquot) {
760 if (!empty) {
730 spin_unlock(&dq_list_lock); 761 spin_unlock(&dq_list_lock);
731 if ((empty = get_empty_dquot(sb, type)) == NODQUOT) 762 empty = get_empty_dquot(sb, type);
763 if (!empty)
732 schedule(); /* Try to wait for a moment... */ 764 schedule(); /* Try to wait for a moment... */
733 goto we_slept; 765 goto we_slept;
734 } 766 }
735 dquot = empty; 767 dquot = empty;
736 empty = NODQUOT; 768 empty = NULL;
737 dquot->dq_id = id; 769 dquot->dq_id = id;
738 /* all dquots go on the inuse_list */ 770 /* all dquots go on the inuse_list */
739 put_inuse(dquot); 771 put_inuse(dquot);
@@ -749,13 +781,14 @@ we_slept:
749 dqstats.lookups++; 781 dqstats.lookups++;
750 spin_unlock(&dq_list_lock); 782 spin_unlock(&dq_list_lock);
751 } 783 }
752 /* Wait for dq_lock - after this we know that either dquot_release() is already 784 /* Wait for dq_lock - after this we know that either dquot_release() is
753 * finished or it will be canceled due to dq_count > 1 test */ 785 * already finished or it will be canceled due to dq_count > 1 test */
754 wait_on_dquot(dquot); 786 wait_on_dquot(dquot);
755 /* Read the dquot and instantiate it (everything done only if needed) */ 787 /* Read the dquot / allocate space in quota file */
756 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && sb->dq_op->acquire_dquot(dquot) < 0) { 788 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) &&
789 sb->dq_op->acquire_dquot(dquot) < 0) {
757 dqput(dquot); 790 dqput(dquot);
758 dquot = NODQUOT; 791 dquot = NULL;
759 goto out; 792 goto out;
760 } 793 }
761#ifdef __DQUOT_PARANOIA 794#ifdef __DQUOT_PARANOIA
@@ -767,6 +800,7 @@ out:
767 800
768 return dquot; 801 return dquot;
769} 802}
803EXPORT_SYMBOL(dqget);
770 804
771static int dqinit_needed(struct inode *inode, int type) 805static int dqinit_needed(struct inode *inode, int type)
772{ 806{
@@ -775,9 +809,9 @@ static int dqinit_needed(struct inode *inode, int type)
775 if (IS_NOQUOTA(inode)) 809 if (IS_NOQUOTA(inode))
776 return 0; 810 return 0;
777 if (type != -1) 811 if (type != -1)
778 return inode->i_dquot[type] == NODQUOT; 812 return !inode->i_dquot[type];
779 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 813 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
780 if (inode->i_dquot[cnt] == NODQUOT) 814 if (!inode->i_dquot[cnt])
781 return 1; 815 return 1;
782 return 0; 816 return 0;
783} 817}
@@ -789,12 +823,12 @@ static void add_dquot_ref(struct super_block *sb, int type)
789 823
790 spin_lock(&inode_lock); 824 spin_lock(&inode_lock);
791 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 825 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
826 if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW))
827 continue;
792 if (!atomic_read(&inode->i_writecount)) 828 if (!atomic_read(&inode->i_writecount))
793 continue; 829 continue;
794 if (!dqinit_needed(inode, type)) 830 if (!dqinit_needed(inode, type))
795 continue; 831 continue;
796 if (inode->i_state & (I_FREEING|I_WILL_FREE))
797 continue;
798 832
799 __iget(inode); 833 __iget(inode);
800 spin_unlock(&inode_lock); 834 spin_unlock(&inode_lock);
@@ -813,7 +847,10 @@ static void add_dquot_ref(struct super_block *sb, int type)
813 iput(old_inode); 847 iput(old_inode);
814} 848}
815 849
816/* Return 0 if dqput() won't block (note that 1 doesn't necessarily mean blocking) */ 850/*
851 * Return 0 if dqput() won't block.
852 * (note that 1 doesn't necessarily mean blocking)
853 */
817static inline int dqput_blocks(struct dquot *dquot) 854static inline int dqput_blocks(struct dquot *dquot)
818{ 855{
819 if (atomic_read(&dquot->dq_count) <= 1) 856 if (atomic_read(&dquot->dq_count) <= 1)
@@ -821,22 +858,27 @@ static inline int dqput_blocks(struct dquot *dquot)
821 return 0; 858 return 0;
822} 859}
823 860
824/* Remove references to dquots from inode - add dquot to list for freeing if needed */ 861/*
825/* We can't race with anybody because we hold dqptr_sem for writing... */ 862 * Remove references to dquots from inode and add dquot to list for freeing
863 * if we have the last referece to dquot
864 * We can't race with anybody because we hold dqptr_sem for writing...
865 */
826static int remove_inode_dquot_ref(struct inode *inode, int type, 866static int remove_inode_dquot_ref(struct inode *inode, int type,
827 struct list_head *tofree_head) 867 struct list_head *tofree_head)
828{ 868{
829 struct dquot *dquot = inode->i_dquot[type]; 869 struct dquot *dquot = inode->i_dquot[type];
830 870
831 inode->i_dquot[type] = NODQUOT; 871 inode->i_dquot[type] = NULL;
832 if (dquot != NODQUOT) { 872 if (dquot) {
833 if (dqput_blocks(dquot)) { 873 if (dqput_blocks(dquot)) {
834#ifdef __DQUOT_PARANOIA 874#ifdef __DQUOT_PARANOIA
835 if (atomic_read(&dquot->dq_count) != 1) 875 if (atomic_read(&dquot->dq_count) != 1)
836 printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count)); 876 printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count));
837#endif 877#endif
838 spin_lock(&dq_list_lock); 878 spin_lock(&dq_list_lock);
839 list_add(&dquot->dq_free, tofree_head); /* As dquot must have currently users it can't be on the free list... */ 879 /* As dquot must have currently users it can't be on
880 * the free list... */
881 list_add(&dquot->dq_free, tofree_head);
840 spin_unlock(&dq_list_lock); 882 spin_unlock(&dq_list_lock);
841 return 1; 883 return 1;
842 } 884 }
@@ -846,19 +888,22 @@ static int remove_inode_dquot_ref(struct inode *inode, int type,
846 return 0; 888 return 0;
847} 889}
848 890
849/* Free list of dquots - called from inode.c */ 891/*
850/* dquots are removed from inodes, no new references can be got so we are the only ones holding reference */ 892 * Free list of dquots
893 * Dquots are removed from inodes and no new references can be got so we are
894 * the only ones holding reference
895 */
851static void put_dquot_list(struct list_head *tofree_head) 896static void put_dquot_list(struct list_head *tofree_head)
852{ 897{
853 struct list_head *act_head; 898 struct list_head *act_head;
854 struct dquot *dquot; 899 struct dquot *dquot;
855 900
856 act_head = tofree_head->next; 901 act_head = tofree_head->next;
857 /* So now we have dquots on the list... Just free them */
858 while (act_head != tofree_head) { 902 while (act_head != tofree_head) {
859 dquot = list_entry(act_head, struct dquot, dq_free); 903 dquot = list_entry(act_head, struct dquot, dq_free);
860 act_head = act_head->next; 904 act_head = act_head->next;
861 list_del_init(&dquot->dq_free); /* Remove dquot from the list so we won't have problems... */ 905 /* Remove dquot from the list so we won't have problems... */
906 list_del_init(&dquot->dq_free);
862 dqput(dquot); 907 dqput(dquot);
863 } 908 }
864} 909}
@@ -870,6 +915,12 @@ static void remove_dquot_ref(struct super_block *sb, int type,
870 915
871 spin_lock(&inode_lock); 916 spin_lock(&inode_lock);
872 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 917 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
918 /*
919 * We have to scan also I_NEW inodes because they can already
920 * have quota pointer initialized. Luckily, we need to touch
921 * only quota pointers and these have separate locking
922 * (dqptr_sem).
923 */
873 if (!IS_NOQUOTA(inode)) 924 if (!IS_NOQUOTA(inode))
874 remove_inode_dquot_ref(inode, type, tofree_head); 925 remove_inode_dquot_ref(inode, type, tofree_head);
875 } 926 }
@@ -899,7 +950,29 @@ static inline void dquot_incr_space(struct dquot *dquot, qsize_t number)
899 dquot->dq_dqb.dqb_curspace += number; 950 dquot->dq_dqb.dqb_curspace += number;
900} 951}
901 952
902static inline void dquot_decr_inodes(struct dquot *dquot, qsize_t number) 953static inline void dquot_resv_space(struct dquot *dquot, qsize_t number)
954{
955 dquot->dq_dqb.dqb_rsvspace += number;
956}
957
958/*
959 * Claim reserved quota space
960 */
961static void dquot_claim_reserved_space(struct dquot *dquot,
962 qsize_t number)
963{
964 WARN_ON(dquot->dq_dqb.dqb_rsvspace < number);
965 dquot->dq_dqb.dqb_curspace += number;
966 dquot->dq_dqb.dqb_rsvspace -= number;
967}
968
969static inline
970void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
971{
972 dquot->dq_dqb.dqb_rsvspace -= number;
973}
974
975static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
903{ 976{
904 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || 977 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
905 dquot->dq_dqb.dqb_curinodes >= number) 978 dquot->dq_dqb.dqb_curinodes >= number)
@@ -911,7 +984,7 @@ static inline void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
911 clear_bit(DQ_INODES_B, &dquot->dq_flags); 984 clear_bit(DQ_INODES_B, &dquot->dq_flags);
912} 985}
913 986
914static inline void dquot_decr_space(struct dquot *dquot, qsize_t number) 987static void dquot_decr_space(struct dquot *dquot, qsize_t number)
915{ 988{
916 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || 989 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
917 dquot->dq_dqb.dqb_curspace >= number) 990 dquot->dq_dqb.dqb_curspace >= number)
@@ -938,7 +1011,7 @@ static int warning_issued(struct dquot *dquot, const int warntype)
938#ifdef CONFIG_PRINT_QUOTA_WARNING 1011#ifdef CONFIG_PRINT_QUOTA_WARNING
939static int flag_print_warnings = 1; 1012static int flag_print_warnings = 1;
940 1013
941static inline int need_print_warning(struct dquot *dquot) 1014static int need_print_warning(struct dquot *dquot)
942{ 1015{
943 if (!flag_print_warnings) 1016 if (!flag_print_warnings)
944 return 0; 1017 return 0;
@@ -1065,13 +1138,17 @@ err_out:
1065 kfree_skb(skb); 1138 kfree_skb(skb);
1066} 1139}
1067#endif 1140#endif
1068 1141/*
1069static inline void flush_warnings(struct dquot * const *dquots, char *warntype) 1142 * Write warnings to the console and send warning messages over netlink.
1143 *
1144 * Note that this function can sleep.
1145 */
1146static void flush_warnings(struct dquot *const *dquots, char *warntype)
1070{ 1147{
1071 int i; 1148 int i;
1072 1149
1073 for (i = 0; i < MAXQUOTAS; i++) 1150 for (i = 0; i < MAXQUOTAS; i++)
1074 if (dquots[i] != NODQUOT && warntype[i] != QUOTA_NL_NOWARN && 1151 if (dquots[i] && warntype[i] != QUOTA_NL_NOWARN &&
1075 !warning_issued(dquots[i], warntype[i])) { 1152 !warning_issued(dquots[i], warntype[i])) {
1076#ifdef CONFIG_PRINT_QUOTA_WARNING 1153#ifdef CONFIG_PRINT_QUOTA_WARNING
1077 print_warning(dquots[i], warntype[i]); 1154 print_warning(dquots[i], warntype[i]);
@@ -1082,42 +1159,47 @@ static inline void flush_warnings(struct dquot * const *dquots, char *warntype)
1082 } 1159 }
1083} 1160}
1084 1161
1085static inline char ignore_hardlimit(struct dquot *dquot) 1162static int ignore_hardlimit(struct dquot *dquot)
1086{ 1163{
1087 struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type]; 1164 struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];
1088 1165
1089 return capable(CAP_SYS_RESOURCE) && 1166 return capable(CAP_SYS_RESOURCE) &&
1090 (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || !(info->dqi_flags & V1_DQF_RSQUASH)); 1167 (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
1168 !(info->dqi_flags & V1_DQF_RSQUASH));
1091} 1169}
1092 1170
1093/* needs dq_data_lock */ 1171/* needs dq_data_lock */
1094static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype) 1172static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype)
1095{ 1173{
1174 qsize_t newinodes = dquot->dq_dqb.dqb_curinodes + inodes;
1175
1096 *warntype = QUOTA_NL_NOWARN; 1176 *warntype = QUOTA_NL_NOWARN;
1097 if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) || 1177 if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) ||
1098 test_bit(DQ_FAKE_B, &dquot->dq_flags)) 1178 test_bit(DQ_FAKE_B, &dquot->dq_flags))
1099 return QUOTA_OK; 1179 return QUOTA_OK;
1100 1180
1101 if (dquot->dq_dqb.dqb_ihardlimit && 1181 if (dquot->dq_dqb.dqb_ihardlimit &&
1102 (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_ihardlimit && 1182 newinodes > dquot->dq_dqb.dqb_ihardlimit &&
1103 !ignore_hardlimit(dquot)) { 1183 !ignore_hardlimit(dquot)) {
1104 *warntype = QUOTA_NL_IHARDWARN; 1184 *warntype = QUOTA_NL_IHARDWARN;
1105 return NO_QUOTA; 1185 return NO_QUOTA;
1106 } 1186 }
1107 1187
1108 if (dquot->dq_dqb.dqb_isoftlimit && 1188 if (dquot->dq_dqb.dqb_isoftlimit &&
1109 (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit && 1189 newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1110 dquot->dq_dqb.dqb_itime && get_seconds() >= dquot->dq_dqb.dqb_itime && 1190 dquot->dq_dqb.dqb_itime &&
1191 get_seconds() >= dquot->dq_dqb.dqb_itime &&
1111 !ignore_hardlimit(dquot)) { 1192 !ignore_hardlimit(dquot)) {
1112 *warntype = QUOTA_NL_ISOFTLONGWARN; 1193 *warntype = QUOTA_NL_ISOFTLONGWARN;
1113 return NO_QUOTA; 1194 return NO_QUOTA;
1114 } 1195 }
1115 1196
1116 if (dquot->dq_dqb.dqb_isoftlimit && 1197 if (dquot->dq_dqb.dqb_isoftlimit &&
1117 (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit && 1198 newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1118 dquot->dq_dqb.dqb_itime == 0) { 1199 dquot->dq_dqb.dqb_itime == 0) {
1119 *warntype = QUOTA_NL_ISOFTWARN; 1200 *warntype = QUOTA_NL_ISOFTWARN;
1120 dquot->dq_dqb.dqb_itime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace; 1201 dquot->dq_dqb.dqb_itime = get_seconds() +
1202 sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace;
1121 } 1203 }
1122 1204
1123 return QUOTA_OK; 1205 return QUOTA_OK;
@@ -1126,13 +1208,19 @@ static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype)
1126/* needs dq_data_lock */ 1208/* needs dq_data_lock */
1127static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype) 1209static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype)
1128{ 1210{
1211 qsize_t tspace;
1212 struct super_block *sb = dquot->dq_sb;
1213
1129 *warntype = QUOTA_NL_NOWARN; 1214 *warntype = QUOTA_NL_NOWARN;
1130 if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) || 1215 if (!sb_has_quota_limits_enabled(sb, dquot->dq_type) ||
1131 test_bit(DQ_FAKE_B, &dquot->dq_flags)) 1216 test_bit(DQ_FAKE_B, &dquot->dq_flags))
1132 return QUOTA_OK; 1217 return QUOTA_OK;
1133 1218
1219 tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace
1220 + space;
1221
1134 if (dquot->dq_dqb.dqb_bhardlimit && 1222 if (dquot->dq_dqb.dqb_bhardlimit &&
1135 dquot->dq_dqb.dqb_curspace + space > dquot->dq_dqb.dqb_bhardlimit && 1223 tspace > dquot->dq_dqb.dqb_bhardlimit &&
1136 !ignore_hardlimit(dquot)) { 1224 !ignore_hardlimit(dquot)) {
1137 if (!prealloc) 1225 if (!prealloc)
1138 *warntype = QUOTA_NL_BHARDWARN; 1226 *warntype = QUOTA_NL_BHARDWARN;
@@ -1140,8 +1228,9 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war
1140 } 1228 }
1141 1229
1142 if (dquot->dq_dqb.dqb_bsoftlimit && 1230 if (dquot->dq_dqb.dqb_bsoftlimit &&
1143 dquot->dq_dqb.dqb_curspace + space > dquot->dq_dqb.dqb_bsoftlimit && 1231 tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1144 dquot->dq_dqb.dqb_btime && get_seconds() >= dquot->dq_dqb.dqb_btime && 1232 dquot->dq_dqb.dqb_btime &&
1233 get_seconds() >= dquot->dq_dqb.dqb_btime &&
1145 !ignore_hardlimit(dquot)) { 1234 !ignore_hardlimit(dquot)) {
1146 if (!prealloc) 1235 if (!prealloc)
1147 *warntype = QUOTA_NL_BSOFTLONGWARN; 1236 *warntype = QUOTA_NL_BSOFTLONGWARN;
@@ -1149,11 +1238,12 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war
1149 } 1238 }
1150 1239
1151 if (dquot->dq_dqb.dqb_bsoftlimit && 1240 if (dquot->dq_dqb.dqb_bsoftlimit &&
1152 dquot->dq_dqb.dqb_curspace + space > dquot->dq_dqb.dqb_bsoftlimit && 1241 tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1153 dquot->dq_dqb.dqb_btime == 0) { 1242 dquot->dq_dqb.dqb_btime == 0) {
1154 if (!prealloc) { 1243 if (!prealloc) {
1155 *warntype = QUOTA_NL_BSOFTWARN; 1244 *warntype = QUOTA_NL_BSOFTWARN;
1156 dquot->dq_dqb.dqb_btime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_bgrace; 1245 dquot->dq_dqb.dqb_btime = get_seconds() +
1246 sb_dqopt(sb)->info[dquot->dq_type].dqi_bgrace;
1157 } 1247 }
1158 else 1248 else
1159 /* 1249 /*
@@ -1168,15 +1258,18 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war
1168 1258
1169static int info_idq_free(struct dquot *dquot, qsize_t inodes) 1259static int info_idq_free(struct dquot *dquot, qsize_t inodes)
1170{ 1260{
1261 qsize_t newinodes;
1262
1171 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) || 1263 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1172 dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit || 1264 dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit ||
1173 !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type)) 1265 !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type))
1174 return QUOTA_NL_NOWARN; 1266 return QUOTA_NL_NOWARN;
1175 1267
1176 if (dquot->dq_dqb.dqb_curinodes - inodes <= dquot->dq_dqb.dqb_isoftlimit) 1268 newinodes = dquot->dq_dqb.dqb_curinodes - inodes;
1269 if (newinodes <= dquot->dq_dqb.dqb_isoftlimit)
1177 return QUOTA_NL_ISOFTBELOW; 1270 return QUOTA_NL_ISOFTBELOW;
1178 if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit && 1271 if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit &&
1179 dquot->dq_dqb.dqb_curinodes - inodes < dquot->dq_dqb.dqb_ihardlimit) 1272 newinodes < dquot->dq_dqb.dqb_ihardlimit)
1180 return QUOTA_NL_IHARDBELOW; 1273 return QUOTA_NL_IHARDBELOW;
1181 return QUOTA_NL_NOWARN; 1274 return QUOTA_NL_NOWARN;
1182} 1275}
@@ -1203,7 +1296,7 @@ int dquot_initialize(struct inode *inode, int type)
1203{ 1296{
1204 unsigned int id = 0; 1297 unsigned int id = 0;
1205 int cnt, ret = 0; 1298 int cnt, ret = 0;
1206 struct dquot *got[MAXQUOTAS] = { NODQUOT, NODQUOT }; 1299 struct dquot *got[MAXQUOTAS] = { NULL, NULL };
1207 struct super_block *sb = inode->i_sb; 1300 struct super_block *sb = inode->i_sb;
1208 1301
1209 /* First test before acquiring mutex - solves deadlocks when we 1302 /* First test before acquiring mutex - solves deadlocks when we
@@ -1236,9 +1329,9 @@ int dquot_initialize(struct inode *inode, int type)
1236 /* Avoid races with quotaoff() */ 1329 /* Avoid races with quotaoff() */
1237 if (!sb_has_quota_active(sb, cnt)) 1330 if (!sb_has_quota_active(sb, cnt))
1238 continue; 1331 continue;
1239 if (inode->i_dquot[cnt] == NODQUOT) { 1332 if (!inode->i_dquot[cnt]) {
1240 inode->i_dquot[cnt] = got[cnt]; 1333 inode->i_dquot[cnt] = got[cnt];
1241 got[cnt] = NODQUOT; 1334 got[cnt] = NULL;
1242 } 1335 }
1243 } 1336 }
1244out_err: 1337out_err:
@@ -1248,6 +1341,7 @@ out_err:
1248 dqput(got[cnt]); 1341 dqput(got[cnt]);
1249 return ret; 1342 return ret;
1250} 1343}
1344EXPORT_SYMBOL(dquot_initialize);
1251 1345
1252/* 1346/*
1253 * Release all quotas referenced by inode 1347 * Release all quotas referenced by inode
@@ -1260,7 +1354,7 @@ int dquot_drop(struct inode *inode)
1260 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1354 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1261 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1355 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1262 put[cnt] = inode->i_dquot[cnt]; 1356 put[cnt] = inode->i_dquot[cnt];
1263 inode->i_dquot[cnt] = NODQUOT; 1357 inode->i_dquot[cnt] = NULL;
1264 } 1358 }
1265 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1359 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1266 1360
@@ -1268,6 +1362,7 @@ int dquot_drop(struct inode *inode)
1268 dqput(put[cnt]); 1362 dqput(put[cnt]);
1269 return 0; 1363 return 0;
1270} 1364}
1365EXPORT_SYMBOL(dquot_drop);
1271 1366
1272/* Wrapper to remove references to quota structures from inode */ 1367/* Wrapper to remove references to quota structures from inode */
1273void vfs_dq_drop(struct inode *inode) 1368void vfs_dq_drop(struct inode *inode)
@@ -1284,12 +1379,13 @@ void vfs_dq_drop(struct inode *inode)
1284 * must assure that nobody can come after the DQUOT_DROP and 1379 * must assure that nobody can come after the DQUOT_DROP and
1285 * add quota pointers back anyway */ 1380 * add quota pointers back anyway */
1286 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1381 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1287 if (inode->i_dquot[cnt] != NODQUOT) 1382 if (inode->i_dquot[cnt])
1288 break; 1383 break;
1289 if (cnt < MAXQUOTAS) 1384 if (cnt < MAXQUOTAS)
1290 inode->i_sb->dq_op->drop(inode); 1385 inode->i_sb->dq_op->drop(inode);
1291 } 1386 }
1292} 1387}
1388EXPORT_SYMBOL(vfs_dq_drop);
1293 1389
1294/* 1390/*
1295 * Following four functions update i_blocks+i_bytes fields and 1391 * Following four functions update i_blocks+i_bytes fields and
@@ -1303,51 +1399,93 @@ void vfs_dq_drop(struct inode *inode)
1303/* 1399/*
1304 * This operation can block, but only after everything is updated 1400 * This operation can block, but only after everything is updated
1305 */ 1401 */
1306int dquot_alloc_space(struct inode *inode, qsize_t number, int warn) 1402int __dquot_alloc_space(struct inode *inode, qsize_t number,
1403 int warn, int reserve)
1307{ 1404{
1308 int cnt, ret = NO_QUOTA; 1405 int cnt, ret = QUOTA_OK;
1309 char warntype[MAXQUOTAS]; 1406 char warntype[MAXQUOTAS];
1310 1407
1311 /* First test before acquiring mutex - solves deadlocks when we
1312 * re-enter the quota code and are already holding the mutex */
1313 if (IS_NOQUOTA(inode)) {
1314out_add:
1315 inode_add_bytes(inode, number);
1316 return QUOTA_OK;
1317 }
1318 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1408 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1319 warntype[cnt] = QUOTA_NL_NOWARN; 1409 warntype[cnt] = QUOTA_NL_NOWARN;
1320 1410
1321 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1322 if (IS_NOQUOTA(inode)) { /* Now we can do reliable test... */
1323 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1324 goto out_add;
1325 }
1326 spin_lock(&dq_data_lock); 1411 spin_lock(&dq_data_lock);
1327 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1412 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1328 if (inode->i_dquot[cnt] == NODQUOT) 1413 if (!inode->i_dquot[cnt])
1329 continue; 1414 continue;
1330 if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) == NO_QUOTA) 1415 if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt)
1331 goto warn_put_all; 1416 == NO_QUOTA) {
1417 ret = NO_QUOTA;
1418 goto out_unlock;
1419 }
1332 } 1420 }
1333 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1421 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1334 if (inode->i_dquot[cnt] == NODQUOT) 1422 if (!inode->i_dquot[cnt])
1335 continue; 1423 continue;
1336 dquot_incr_space(inode->i_dquot[cnt], number); 1424 if (reserve)
1425 dquot_resv_space(inode->i_dquot[cnt], number);
1426 else
1427 dquot_incr_space(inode->i_dquot[cnt], number);
1337 } 1428 }
1338 inode_add_bytes(inode, number); 1429 if (!reserve)
1339 ret = QUOTA_OK; 1430 inode_add_bytes(inode, number);
1340warn_put_all: 1431out_unlock:
1341 spin_unlock(&dq_data_lock); 1432 spin_unlock(&dq_data_lock);
1342 if (ret == QUOTA_OK)
1343 /* Dirtify all the dquots - this can block when journalling */
1344 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1345 if (inode->i_dquot[cnt])
1346 mark_dquot_dirty(inode->i_dquot[cnt]);
1347 flush_warnings(inode->i_dquot, warntype); 1433 flush_warnings(inode->i_dquot, warntype);
1434 return ret;
1435}
1436
1437int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
1438{
1439 int cnt, ret = QUOTA_OK;
1440
1441 /*
1442 * First test before acquiring mutex - solves deadlocks when we
1443 * re-enter the quota code and are already holding the mutex
1444 */
1445 if (IS_NOQUOTA(inode)) {
1446 inode_add_bytes(inode, number);
1447 goto out;
1448 }
1449
1450 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1451 if (IS_NOQUOTA(inode)) {
1452 inode_add_bytes(inode, number);
1453 goto out_unlock;
1454 }
1455
1456 ret = __dquot_alloc_space(inode, number, warn, 0);
1457 if (ret == NO_QUOTA)
1458 goto out_unlock;
1459
1460 /* Dirtify all the dquots - this can block when journalling */
1461 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1462 if (inode->i_dquot[cnt])
1463 mark_dquot_dirty(inode->i_dquot[cnt]);
1464out_unlock:
1348 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1465 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1466out:
1349 return ret; 1467 return ret;
1350} 1468}
1469EXPORT_SYMBOL(dquot_alloc_space);
1470
1471int dquot_reserve_space(struct inode *inode, qsize_t number, int warn)
1472{
1473 int ret = QUOTA_OK;
1474
1475 if (IS_NOQUOTA(inode))
1476 goto out;
1477
1478 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1479 if (IS_NOQUOTA(inode))
1480 goto out_unlock;
1481
1482 ret = __dquot_alloc_space(inode, number, warn, 1);
1483out_unlock:
1484 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1485out:
1486 return ret;
1487}
1488EXPORT_SYMBOL(dquot_reserve_space);
1351 1489
1352/* 1490/*
1353 * This operation can block, but only after everything is updated 1491 * This operation can block, but only after everything is updated
@@ -1370,14 +1508,15 @@ int dquot_alloc_inode(const struct inode *inode, qsize_t number)
1370 } 1508 }
1371 spin_lock(&dq_data_lock); 1509 spin_lock(&dq_data_lock);
1372 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1510 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1373 if (inode->i_dquot[cnt] == NODQUOT) 1511 if (!inode->i_dquot[cnt])
1374 continue; 1512 continue;
1375 if (check_idq(inode->i_dquot[cnt], number, warntype+cnt) == NO_QUOTA) 1513 if (check_idq(inode->i_dquot[cnt], number, warntype+cnt)
1514 == NO_QUOTA)
1376 goto warn_put_all; 1515 goto warn_put_all;
1377 } 1516 }
1378 1517
1379 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1518 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1380 if (inode->i_dquot[cnt] == NODQUOT) 1519 if (!inode->i_dquot[cnt])
1381 continue; 1520 continue;
1382 dquot_incr_inodes(inode->i_dquot[cnt], number); 1521 dquot_incr_inodes(inode->i_dquot[cnt], number);
1383 } 1522 }
@@ -1393,6 +1532,73 @@ warn_put_all:
1393 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1532 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1394 return ret; 1533 return ret;
1395} 1534}
1535EXPORT_SYMBOL(dquot_alloc_inode);
1536
1537int dquot_claim_space(struct inode *inode, qsize_t number)
1538{
1539 int cnt;
1540 int ret = QUOTA_OK;
1541
1542 if (IS_NOQUOTA(inode)) {
1543 inode_add_bytes(inode, number);
1544 goto out;
1545 }
1546
1547 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1548 if (IS_NOQUOTA(inode)) {
1549 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1550 inode_add_bytes(inode, number);
1551 goto out;
1552 }
1553
1554 spin_lock(&dq_data_lock);
1555 /* Claim reserved quotas to allocated quotas */
1556 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1557 if (inode->i_dquot[cnt])
1558 dquot_claim_reserved_space(inode->i_dquot[cnt],
1559 number);
1560 }
1561 /* Update inode bytes */
1562 inode_add_bytes(inode, number);
1563 spin_unlock(&dq_data_lock);
1564 /* Dirtify all the dquots - this can block when journalling */
1565 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1566 if (inode->i_dquot[cnt])
1567 mark_dquot_dirty(inode->i_dquot[cnt]);
1568 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1569out:
1570 return ret;
1571}
1572EXPORT_SYMBOL(dquot_claim_space);
1573
1574/*
1575 * Release reserved quota space
1576 */
1577void dquot_release_reserved_space(struct inode *inode, qsize_t number)
1578{
1579 int cnt;
1580
1581 if (IS_NOQUOTA(inode))
1582 goto out;
1583
1584 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1585 if (IS_NOQUOTA(inode))
1586 goto out_unlock;
1587
1588 spin_lock(&dq_data_lock);
1589 /* Release reserved dquots */
1590 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1591 if (inode->i_dquot[cnt])
1592 dquot_free_reserved_space(inode->i_dquot[cnt], number);
1593 }
1594 spin_unlock(&dq_data_lock);
1595
1596out_unlock:
1597 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1598out:
1599 return;
1600}
1601EXPORT_SYMBOL(dquot_release_reserved_space);
1396 1602
1397/* 1603/*
1398 * This operation can block, but only after everything is updated 1604 * This operation can block, but only after everything is updated
@@ -1418,7 +1624,7 @@ out_sub:
1418 } 1624 }
1419 spin_lock(&dq_data_lock); 1625 spin_lock(&dq_data_lock);
1420 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1626 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1421 if (inode->i_dquot[cnt] == NODQUOT) 1627 if (!inode->i_dquot[cnt])
1422 continue; 1628 continue;
1423 warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number); 1629 warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number);
1424 dquot_decr_space(inode->i_dquot[cnt], number); 1630 dquot_decr_space(inode->i_dquot[cnt], number);
@@ -1433,6 +1639,7 @@ out_sub:
1433 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1639 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1434 return QUOTA_OK; 1640 return QUOTA_OK;
1435} 1641}
1642EXPORT_SYMBOL(dquot_free_space);
1436 1643
1437/* 1644/*
1438 * This operation can block, but only after everything is updated 1645 * This operation can block, but only after everything is updated
@@ -1455,7 +1662,7 @@ int dquot_free_inode(const struct inode *inode, qsize_t number)
1455 } 1662 }
1456 spin_lock(&dq_data_lock); 1663 spin_lock(&dq_data_lock);
1457 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1664 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1458 if (inode->i_dquot[cnt] == NODQUOT) 1665 if (!inode->i_dquot[cnt])
1459 continue; 1666 continue;
1460 warntype[cnt] = info_idq_free(inode->i_dquot[cnt], number); 1667 warntype[cnt] = info_idq_free(inode->i_dquot[cnt], number);
1461 dquot_decr_inodes(inode->i_dquot[cnt], number); 1668 dquot_decr_inodes(inode->i_dquot[cnt], number);
@@ -1469,6 +1676,20 @@ int dquot_free_inode(const struct inode *inode, qsize_t number)
1469 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1676 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1470 return QUOTA_OK; 1677 return QUOTA_OK;
1471} 1678}
1679EXPORT_SYMBOL(dquot_free_inode);
1680
1681/*
1682 * call back function, get reserved quota space from underlying fs
1683 */
1684qsize_t dquot_get_reserved_space(struct inode *inode)
1685{
1686 qsize_t reserved_space = 0;
1687
1688 if (sb_any_quota_active(inode->i_sb) &&
1689 inode->i_sb->dq_op->get_reserved_space)
1690 reserved_space = inode->i_sb->dq_op->get_reserved_space(inode);
1691 return reserved_space;
1692}
1472 1693
1473/* 1694/*
1474 * Transfer the number of inode and blocks from one diskquota to an other. 1695 * Transfer the number of inode and blocks from one diskquota to an other.
@@ -1478,7 +1699,8 @@ int dquot_free_inode(const struct inode *inode, qsize_t number)
1478 */ 1699 */
1479int dquot_transfer(struct inode *inode, struct iattr *iattr) 1700int dquot_transfer(struct inode *inode, struct iattr *iattr)
1480{ 1701{
1481 qsize_t space; 1702 qsize_t space, cur_space;
1703 qsize_t rsv_space = 0;
1482 struct dquot *transfer_from[MAXQUOTAS]; 1704 struct dquot *transfer_from[MAXQUOTAS];
1483 struct dquot *transfer_to[MAXQUOTAS]; 1705 struct dquot *transfer_to[MAXQUOTAS];
1484 int cnt, ret = QUOTA_OK; 1706 int cnt, ret = QUOTA_OK;
@@ -1493,22 +1715,16 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
1493 return QUOTA_OK; 1715 return QUOTA_OK;
1494 /* Initialize the arrays */ 1716 /* Initialize the arrays */
1495 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1717 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1496 transfer_from[cnt] = NODQUOT; 1718 transfer_from[cnt] = NULL;
1497 transfer_to[cnt] = NODQUOT; 1719 transfer_to[cnt] = NULL;
1498 warntype_to[cnt] = QUOTA_NL_NOWARN; 1720 warntype_to[cnt] = QUOTA_NL_NOWARN;
1499 switch (cnt) {
1500 case USRQUOTA:
1501 if (!chuid)
1502 continue;
1503 transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_uid, cnt);
1504 break;
1505 case GRPQUOTA:
1506 if (!chgid)
1507 continue;
1508 transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_gid, cnt);
1509 break;
1510 }
1511 } 1721 }
1722 if (chuid)
1723 transfer_to[USRQUOTA] = dqget(inode->i_sb, iattr->ia_uid,
1724 USRQUOTA);
1725 if (chgid)
1726 transfer_to[GRPQUOTA] = dqget(inode->i_sb, iattr->ia_gid,
1727 GRPQUOTA);
1512 1728
1513 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1729 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1514 /* Now recheck reliably when holding dqptr_sem */ 1730 /* Now recheck reliably when holding dqptr_sem */
@@ -1517,10 +1733,12 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
1517 goto put_all; 1733 goto put_all;
1518 } 1734 }
1519 spin_lock(&dq_data_lock); 1735 spin_lock(&dq_data_lock);
1520 space = inode_get_bytes(inode); 1736 cur_space = inode_get_bytes(inode);
1737 rsv_space = dquot_get_reserved_space(inode);
1738 space = cur_space + rsv_space;
1521 /* Build the transfer_from list and check the limits */ 1739 /* Build the transfer_from list and check the limits */
1522 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1740 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1523 if (transfer_to[cnt] == NODQUOT) 1741 if (!transfer_to[cnt])
1524 continue; 1742 continue;
1525 transfer_from[cnt] = inode->i_dquot[cnt]; 1743 transfer_from[cnt] = inode->i_dquot[cnt];
1526 if (check_idq(transfer_to[cnt], 1, warntype_to + cnt) == 1744 if (check_idq(transfer_to[cnt], 1, warntype_to + cnt) ==
@@ -1536,7 +1754,7 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
1536 /* 1754 /*
1537 * Skip changes for same uid or gid or for turned off quota-type. 1755 * Skip changes for same uid or gid or for turned off quota-type.
1538 */ 1756 */
1539 if (transfer_to[cnt] == NODQUOT) 1757 if (!transfer_to[cnt])
1540 continue; 1758 continue;
1541 1759
1542 /* Due to IO error we might not have transfer_from[] structure */ 1760 /* Due to IO error we might not have transfer_from[] structure */
@@ -1546,11 +1764,14 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
1546 warntype_from_space[cnt] = 1764 warntype_from_space[cnt] =
1547 info_bdq_free(transfer_from[cnt], space); 1765 info_bdq_free(transfer_from[cnt], space);
1548 dquot_decr_inodes(transfer_from[cnt], 1); 1766 dquot_decr_inodes(transfer_from[cnt], 1);
1549 dquot_decr_space(transfer_from[cnt], space); 1767 dquot_decr_space(transfer_from[cnt], cur_space);
1768 dquot_free_reserved_space(transfer_from[cnt],
1769 rsv_space);
1550 } 1770 }
1551 1771
1552 dquot_incr_inodes(transfer_to[cnt], 1); 1772 dquot_incr_inodes(transfer_to[cnt], 1);
1553 dquot_incr_space(transfer_to[cnt], space); 1773 dquot_incr_space(transfer_to[cnt], cur_space);
1774 dquot_resv_space(transfer_to[cnt], rsv_space);
1554 1775
1555 inode->i_dquot[cnt] = transfer_to[cnt]; 1776 inode->i_dquot[cnt] = transfer_to[cnt];
1556 } 1777 }
@@ -1564,7 +1785,7 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
1564 if (transfer_to[cnt]) { 1785 if (transfer_to[cnt]) {
1565 mark_dquot_dirty(transfer_to[cnt]); 1786 mark_dquot_dirty(transfer_to[cnt]);
1566 /* The reference we got is transferred to the inode */ 1787 /* The reference we got is transferred to the inode */
1567 transfer_to[cnt] = NODQUOT; 1788 transfer_to[cnt] = NULL;
1568 } 1789 }
1569 } 1790 }
1570warn_put_all: 1791warn_put_all:
@@ -1582,10 +1803,11 @@ over_quota:
1582 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1803 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1583 /* Clear dquot pointers we don't want to dqput() */ 1804 /* Clear dquot pointers we don't want to dqput() */
1584 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1805 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1585 transfer_from[cnt] = NODQUOT; 1806 transfer_from[cnt] = NULL;
1586 ret = NO_QUOTA; 1807 ret = NO_QUOTA;
1587 goto warn_put_all; 1808 goto warn_put_all;
1588} 1809}
1810EXPORT_SYMBOL(dquot_transfer);
1589 1811
1590/* Wrapper for transferring ownership of an inode */ 1812/* Wrapper for transferring ownership of an inode */
1591int vfs_dq_transfer(struct inode *inode, struct iattr *iattr) 1813int vfs_dq_transfer(struct inode *inode, struct iattr *iattr)
@@ -1597,7 +1819,7 @@ int vfs_dq_transfer(struct inode *inode, struct iattr *iattr)
1597 } 1819 }
1598 return 0; 1820 return 0;
1599} 1821}
1600 1822EXPORT_SYMBOL(vfs_dq_transfer);
1601 1823
1602/* 1824/*
1603 * Write info of quota file to disk 1825 * Write info of quota file to disk
@@ -1612,6 +1834,7 @@ int dquot_commit_info(struct super_block *sb, int type)
1612 mutex_unlock(&dqopt->dqio_mutex); 1834 mutex_unlock(&dqopt->dqio_mutex);
1613 return ret; 1835 return ret;
1614} 1836}
1837EXPORT_SYMBOL(dquot_commit_info);
1615 1838
1616/* 1839/*
1617 * Definitions of diskquota operations. 1840 * Definitions of diskquota operations.
@@ -1697,8 +1920,8 @@ int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags)
1697 drop_dquot_ref(sb, cnt); 1920 drop_dquot_ref(sb, cnt);
1698 invalidate_dquots(sb, cnt); 1921 invalidate_dquots(sb, cnt);
1699 /* 1922 /*
1700 * Now all dquots should be invalidated, all writes done so we should be only 1923 * Now all dquots should be invalidated, all writes done so we
1701 * users of the info. No locks needed. 1924 * should be only users of the info. No locks needed.
1702 */ 1925 */
1703 if (info_dirty(&dqopt->info[cnt])) 1926 if (info_dirty(&dqopt->info[cnt]))
1704 sb->dq_op->write_info(sb, cnt); 1927 sb->dq_op->write_info(sb, cnt);
@@ -1736,10 +1959,12 @@ int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags)
1736 /* If quota was reenabled in the meantime, we have 1959 /* If quota was reenabled in the meantime, we have
1737 * nothing to do */ 1960 * nothing to do */
1738 if (!sb_has_quota_loaded(sb, cnt)) { 1961 if (!sb_has_quota_loaded(sb, cnt)) {
1739 mutex_lock_nested(&toputinode[cnt]->i_mutex, I_MUTEX_QUOTA); 1962 mutex_lock_nested(&toputinode[cnt]->i_mutex,
1963 I_MUTEX_QUOTA);
1740 toputinode[cnt]->i_flags &= ~(S_IMMUTABLE | 1964 toputinode[cnt]->i_flags &= ~(S_IMMUTABLE |
1741 S_NOATIME | S_NOQUOTA); 1965 S_NOATIME | S_NOQUOTA);
1742 truncate_inode_pages(&toputinode[cnt]->i_data, 0); 1966 truncate_inode_pages(&toputinode[cnt]->i_data,
1967 0);
1743 mutex_unlock(&toputinode[cnt]->i_mutex); 1968 mutex_unlock(&toputinode[cnt]->i_mutex);
1744 mark_inode_dirty(toputinode[cnt]); 1969 mark_inode_dirty(toputinode[cnt]);
1745 } 1970 }
@@ -1764,13 +1989,14 @@ put_inodes:
1764 } 1989 }
1765 return ret; 1990 return ret;
1766} 1991}
1992EXPORT_SYMBOL(vfs_quota_disable);
1767 1993
1768int vfs_quota_off(struct super_block *sb, int type, int remount) 1994int vfs_quota_off(struct super_block *sb, int type, int remount)
1769{ 1995{
1770 return vfs_quota_disable(sb, type, remount ? DQUOT_SUSPENDED : 1996 return vfs_quota_disable(sb, type, remount ? DQUOT_SUSPENDED :
1771 (DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED)); 1997 (DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED));
1772} 1998}
1773 1999EXPORT_SYMBOL(vfs_quota_off);
1774/* 2000/*
1775 * Turn quotas on on a device 2001 * Turn quotas on on a device
1776 */ 2002 */
@@ -1828,7 +2054,8 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
1828 * possible) Also nobody should write to the file - we use 2054 * possible) Also nobody should write to the file - we use
1829 * special IO operations which ignore the immutable bit. */ 2055 * special IO operations which ignore the immutable bit. */
1830 down_write(&dqopt->dqptr_sem); 2056 down_write(&dqopt->dqptr_sem);
1831 oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE | S_NOQUOTA); 2057 oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE |
2058 S_NOQUOTA);
1832 inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE; 2059 inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE;
1833 up_write(&dqopt->dqptr_sem); 2060 up_write(&dqopt->dqptr_sem);
1834 sb->dq_op->drop(inode); 2061 sb->dq_op->drop(inode);
@@ -1847,7 +2074,8 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
1847 dqopt->info[type].dqi_fmt_id = format_id; 2074 dqopt->info[type].dqi_fmt_id = format_id;
1848 INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list); 2075 INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
1849 mutex_lock(&dqopt->dqio_mutex); 2076 mutex_lock(&dqopt->dqio_mutex);
1850 if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) { 2077 error = dqopt->ops[type]->read_file_info(sb, type);
2078 if (error < 0) {
1851 mutex_unlock(&dqopt->dqio_mutex); 2079 mutex_unlock(&dqopt->dqio_mutex);
1852 goto out_file_init; 2080 goto out_file_init;
1853 } 2081 }
@@ -1927,6 +2155,7 @@ int vfs_quota_on_path(struct super_block *sb, int type, int format_id,
1927 DQUOT_LIMITS_ENABLED); 2155 DQUOT_LIMITS_ENABLED);
1928 return error; 2156 return error;
1929} 2157}
2158EXPORT_SYMBOL(vfs_quota_on_path);
1930 2159
1931int vfs_quota_on(struct super_block *sb, int type, int format_id, char *name, 2160int vfs_quota_on(struct super_block *sb, int type, int format_id, char *name,
1932 int remount) 2161 int remount)
@@ -1944,6 +2173,7 @@ int vfs_quota_on(struct super_block *sb, int type, int format_id, char *name,
1944 } 2173 }
1945 return error; 2174 return error;
1946} 2175}
2176EXPORT_SYMBOL(vfs_quota_on);
1947 2177
1948/* 2178/*
1949 * More powerful function for turning on quotas allowing setting 2179 * More powerful function for turning on quotas allowing setting
@@ -1990,6 +2220,7 @@ out_lock:
1990load_quota: 2220load_quota:
1991 return vfs_load_quota_inode(inode, type, format_id, flags); 2221 return vfs_load_quota_inode(inode, type, format_id, flags);
1992} 2222}
2223EXPORT_SYMBOL(vfs_quota_enable);
1993 2224
1994/* 2225/*
1995 * This function is used when filesystem needs to initialize quotas 2226 * This function is used when filesystem needs to initialize quotas
@@ -2019,6 +2250,7 @@ out:
2019 dput(dentry); 2250 dput(dentry);
2020 return error; 2251 return error;
2021} 2252}
2253EXPORT_SYMBOL(vfs_quota_on_mount);
2022 2254
2023/* Wrapper to turn on quotas when remounting rw */ 2255/* Wrapper to turn on quotas when remounting rw */
2024int vfs_dq_quota_on_remount(struct super_block *sb) 2256int vfs_dq_quota_on_remount(struct super_block *sb)
@@ -2035,6 +2267,7 @@ int vfs_dq_quota_on_remount(struct super_block *sb)
2035 } 2267 }
2036 return ret; 2268 return ret;
2037} 2269}
2270EXPORT_SYMBOL(vfs_dq_quota_on_remount);
2038 2271
2039static inline qsize_t qbtos(qsize_t blocks) 2272static inline qsize_t qbtos(qsize_t blocks)
2040{ 2273{
@@ -2054,7 +2287,7 @@ static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di)
2054 spin_lock(&dq_data_lock); 2287 spin_lock(&dq_data_lock);
2055 di->dqb_bhardlimit = stoqb(dm->dqb_bhardlimit); 2288 di->dqb_bhardlimit = stoqb(dm->dqb_bhardlimit);
2056 di->dqb_bsoftlimit = stoqb(dm->dqb_bsoftlimit); 2289 di->dqb_bsoftlimit = stoqb(dm->dqb_bsoftlimit);
2057 di->dqb_curspace = dm->dqb_curspace; 2290 di->dqb_curspace = dm->dqb_curspace + dm->dqb_rsvspace;
2058 di->dqb_ihardlimit = dm->dqb_ihardlimit; 2291 di->dqb_ihardlimit = dm->dqb_ihardlimit;
2059 di->dqb_isoftlimit = dm->dqb_isoftlimit; 2292 di->dqb_isoftlimit = dm->dqb_isoftlimit;
2060 di->dqb_curinodes = dm->dqb_curinodes; 2293 di->dqb_curinodes = dm->dqb_curinodes;
@@ -2064,18 +2297,20 @@ static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di)
2064 spin_unlock(&dq_data_lock); 2297 spin_unlock(&dq_data_lock);
2065} 2298}
2066 2299
2067int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di) 2300int vfs_get_dqblk(struct super_block *sb, int type, qid_t id,
2301 struct if_dqblk *di)
2068{ 2302{
2069 struct dquot *dquot; 2303 struct dquot *dquot;
2070 2304
2071 dquot = dqget(sb, id, type); 2305 dquot = dqget(sb, id, type);
2072 if (dquot == NODQUOT) 2306 if (!dquot)
2073 return -ESRCH; 2307 return -ESRCH;
2074 do_get_dqblk(dquot, di); 2308 do_get_dqblk(dquot, di);
2075 dqput(dquot); 2309 dqput(dquot);
2076 2310
2077 return 0; 2311 return 0;
2078} 2312}
2313EXPORT_SYMBOL(vfs_get_dqblk);
2079 2314
2080/* Generic routine for setting common part of quota structure */ 2315/* Generic routine for setting common part of quota structure */
2081static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di) 2316static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
@@ -2094,7 +2329,7 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
2094 2329
2095 spin_lock(&dq_data_lock); 2330 spin_lock(&dq_data_lock);
2096 if (di->dqb_valid & QIF_SPACE) { 2331 if (di->dqb_valid & QIF_SPACE) {
2097 dm->dqb_curspace = di->dqb_curspace; 2332 dm->dqb_curspace = di->dqb_curspace - dm->dqb_rsvspace;
2098 check_blim = 1; 2333 check_blim = 1;
2099 __set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); 2334 __set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
2100 } 2335 }
@@ -2127,22 +2362,25 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
2127 } 2362 }
2128 2363
2129 if (check_blim) { 2364 if (check_blim) {
2130 if (!dm->dqb_bsoftlimit || dm->dqb_curspace < dm->dqb_bsoftlimit) { 2365 if (!dm->dqb_bsoftlimit ||
2366 dm->dqb_curspace < dm->dqb_bsoftlimit) {
2131 dm->dqb_btime = 0; 2367 dm->dqb_btime = 0;
2132 clear_bit(DQ_BLKS_B, &dquot->dq_flags); 2368 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
2133 } 2369 } else if (!(di->dqb_valid & QIF_BTIME))
2134 else if (!(di->dqb_valid & QIF_BTIME)) /* Set grace only if user hasn't provided his own... */ 2370 /* Set grace only if user hasn't provided his own... */
2135 dm->dqb_btime = get_seconds() + dqi->dqi_bgrace; 2371 dm->dqb_btime = get_seconds() + dqi->dqi_bgrace;
2136 } 2372 }
2137 if (check_ilim) { 2373 if (check_ilim) {
2138 if (!dm->dqb_isoftlimit || dm->dqb_curinodes < dm->dqb_isoftlimit) { 2374 if (!dm->dqb_isoftlimit ||
2375 dm->dqb_curinodes < dm->dqb_isoftlimit) {
2139 dm->dqb_itime = 0; 2376 dm->dqb_itime = 0;
2140 clear_bit(DQ_INODES_B, &dquot->dq_flags); 2377 clear_bit(DQ_INODES_B, &dquot->dq_flags);
2141 } 2378 } else if (!(di->dqb_valid & QIF_ITIME))
2142 else if (!(di->dqb_valid & QIF_ITIME)) /* Set grace only if user hasn't provided his own... */ 2379 /* Set grace only if user hasn't provided his own... */
2143 dm->dqb_itime = get_seconds() + dqi->dqi_igrace; 2380 dm->dqb_itime = get_seconds() + dqi->dqi_igrace;
2144 } 2381 }
2145 if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit || dm->dqb_isoftlimit) 2382 if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit ||
2383 dm->dqb_isoftlimit)
2146 clear_bit(DQ_FAKE_B, &dquot->dq_flags); 2384 clear_bit(DQ_FAKE_B, &dquot->dq_flags);
2147 else 2385 else
2148 set_bit(DQ_FAKE_B, &dquot->dq_flags); 2386 set_bit(DQ_FAKE_B, &dquot->dq_flags);
@@ -2152,7 +2390,8 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
2152 return 0; 2390 return 0;
2153} 2391}
2154 2392
2155int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di) 2393int vfs_set_dqblk(struct super_block *sb, int type, qid_t id,
2394 struct if_dqblk *di)
2156{ 2395{
2157 struct dquot *dquot; 2396 struct dquot *dquot;
2158 int rc; 2397 int rc;
@@ -2167,6 +2406,7 @@ int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d
2167out: 2406out:
2168 return rc; 2407 return rc;
2169} 2408}
2409EXPORT_SYMBOL(vfs_set_dqblk);
2170 2410
2171/* Generic routine for getting common part of quota file information */ 2411/* Generic routine for getting common part of quota file information */
2172int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) 2412int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
@@ -2188,6 +2428,7 @@ int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
2188 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); 2428 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
2189 return 0; 2429 return 0;
2190} 2430}
2431EXPORT_SYMBOL(vfs_get_dqinfo);
2191 2432
2192/* Generic routine for setting common part of quota file information */ 2433/* Generic routine for setting common part of quota file information */
2193int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) 2434int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
@@ -2207,7 +2448,8 @@ int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
2207 if (ii->dqi_valid & IIF_IGRACE) 2448 if (ii->dqi_valid & IIF_IGRACE)
2208 mi->dqi_igrace = ii->dqi_igrace; 2449 mi->dqi_igrace = ii->dqi_igrace;
2209 if (ii->dqi_valid & IIF_FLAGS) 2450 if (ii->dqi_valid & IIF_FLAGS)
2210 mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) | (ii->dqi_flags & DQF_MASK); 2451 mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) |
2452 (ii->dqi_flags & DQF_MASK);
2211 spin_unlock(&dq_data_lock); 2453 spin_unlock(&dq_data_lock);
2212 mark_info_dirty(sb, type); 2454 mark_info_dirty(sb, type);
2213 /* Force write to disk */ 2455 /* Force write to disk */
@@ -2216,6 +2458,7 @@ out:
2216 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); 2458 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
2217 return err; 2459 return err;
2218} 2460}
2461EXPORT_SYMBOL(vfs_set_dqinfo);
2219 2462
2220struct quotactl_ops vfs_quotactl_ops = { 2463struct quotactl_ops vfs_quotactl_ops = {
2221 .quota_on = vfs_quota_on, 2464 .quota_on = vfs_quota_on,
@@ -2365,43 +2608,10 @@ static int __init dquot_init(void)
2365 2608
2366#ifdef CONFIG_QUOTA_NETLINK_INTERFACE 2609#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
2367 if (genl_register_family(&quota_genl_family) != 0) 2610 if (genl_register_family(&quota_genl_family) != 0)
2368 printk(KERN_ERR "VFS: Failed to create quota netlink interface.\n"); 2611 printk(KERN_ERR
2612 "VFS: Failed to create quota netlink interface.\n");
2369#endif 2613#endif
2370 2614
2371 return 0; 2615 return 0;
2372} 2616}
2373module_init(dquot_init); 2617module_init(dquot_init);
2374
2375EXPORT_SYMBOL(register_quota_format);
2376EXPORT_SYMBOL(unregister_quota_format);
2377EXPORT_SYMBOL(dqstats);
2378EXPORT_SYMBOL(dq_data_lock);
2379EXPORT_SYMBOL(vfs_quota_enable);
2380EXPORT_SYMBOL(vfs_quota_on);
2381EXPORT_SYMBOL(vfs_quota_on_path);
2382EXPORT_SYMBOL(vfs_quota_on_mount);
2383EXPORT_SYMBOL(vfs_quota_disable);
2384EXPORT_SYMBOL(vfs_quota_off);
2385EXPORT_SYMBOL(dquot_scan_active);
2386EXPORT_SYMBOL(vfs_quota_sync);
2387EXPORT_SYMBOL(vfs_get_dqinfo);
2388EXPORT_SYMBOL(vfs_set_dqinfo);
2389EXPORT_SYMBOL(vfs_get_dqblk);
2390EXPORT_SYMBOL(vfs_set_dqblk);
2391EXPORT_SYMBOL(dquot_commit);
2392EXPORT_SYMBOL(dquot_commit_info);
2393EXPORT_SYMBOL(dquot_acquire);
2394EXPORT_SYMBOL(dquot_release);
2395EXPORT_SYMBOL(dquot_mark_dquot_dirty);
2396EXPORT_SYMBOL(dquot_initialize);
2397EXPORT_SYMBOL(dquot_drop);
2398EXPORT_SYMBOL(vfs_dq_drop);
2399EXPORT_SYMBOL(dqget);
2400EXPORT_SYMBOL(dqput);
2401EXPORT_SYMBOL(dquot_alloc_space);
2402EXPORT_SYMBOL(dquot_alloc_inode);
2403EXPORT_SYMBOL(dquot_free_space);
2404EXPORT_SYMBOL(dquot_free_inode);
2405EXPORT_SYMBOL(dquot_transfer);
2406EXPORT_SYMBOL(vfs_dq_transfer);
2407EXPORT_SYMBOL(vfs_dq_quota_on_remount);
diff --git a/fs/quota.c b/fs/quota/quota.c
index d76ada914f98..b7f5a468f076 100644
--- a/fs/quota.c
+++ b/fs/quota/quota.c
@@ -20,7 +20,8 @@
20#include <linux/types.h> 20#include <linux/types.h>
21 21
22/* Check validity of generic quotactl commands */ 22/* Check validity of generic quotactl commands */
23static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id) 23static int generic_quotactl_valid(struct super_block *sb, int type, int cmd,
24 qid_t id)
24{ 25{
25 if (type >= MAXQUOTAS) 26 if (type >= MAXQUOTAS)
26 return -EINVAL; 27 return -EINVAL;
@@ -72,7 +73,8 @@ static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid
72 case Q_SETINFO: 73 case Q_SETINFO:
73 case Q_SETQUOTA: 74 case Q_SETQUOTA:
74 case Q_GETQUOTA: 75 case Q_GETQUOTA:
75 /* This is just informative test so we are satisfied without a lock */ 76 /* This is just an informative test so we are satisfied
77 * without the lock */
76 if (!sb_has_quota_active(sb, type)) 78 if (!sb_has_quota_active(sb, type))
77 return -ESRCH; 79 return -ESRCH;
78 } 80 }
@@ -92,7 +94,8 @@ static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid
92} 94}
93 95
94/* Check validity of XFS Quota Manager commands */ 96/* Check validity of XFS Quota Manager commands */
95static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id) 97static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd,
98 qid_t id)
96{ 99{
97 if (type >= XQM_MAXQUOTAS) 100 if (type >= XQM_MAXQUOTAS)
98 return -EINVAL; 101 return -EINVAL;
@@ -142,7 +145,8 @@ static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t i
142 return 0; 145 return 0;
143} 146}
144 147
145static int check_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id) 148static int check_quotactl_valid(struct super_block *sb, int type, int cmd,
149 qid_t id)
146{ 150{
147 int error; 151 int error;
148 152
@@ -180,7 +184,8 @@ static void quota_sync_sb(struct super_block *sb, int type)
180 continue; 184 continue;
181 if (!sb_has_quota_active(sb, cnt)) 185 if (!sb_has_quota_active(sb, cnt))
182 continue; 186 continue;
183 mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex, I_MUTEX_QUOTA); 187 mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex,
188 I_MUTEX_QUOTA);
184 truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0); 189 truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0);
185 mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex); 190 mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex);
186 } 191 }
@@ -200,14 +205,15 @@ void sync_dquots(struct super_block *sb, int type)
200 spin_lock(&sb_lock); 205 spin_lock(&sb_lock);
201restart: 206restart:
202 list_for_each_entry(sb, &super_blocks, s_list) { 207 list_for_each_entry(sb, &super_blocks, s_list) {
203 /* This test just improves performance so it needn't be reliable... */ 208 /* This test just improves performance so it needn't be
209 * reliable... */
204 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 210 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
205 if (type != -1 && type != cnt) 211 if (type != -1 && type != cnt)
206 continue; 212 continue;
207 if (!sb_has_quota_active(sb, cnt)) 213 if (!sb_has_quota_active(sb, cnt))
208 continue; 214 continue;
209 if (!info_dirty(&sb_dqopt(sb)->info[cnt]) && 215 if (!info_dirty(&sb_dqopt(sb)->info[cnt]) &&
210 list_empty(&sb_dqopt(sb)->info[cnt].dqi_dirty_list)) 216 list_empty(&sb_dqopt(sb)->info[cnt].dqi_dirty_list))
211 continue; 217 continue;
212 break; 218 break;
213 } 219 }
@@ -227,7 +233,8 @@ restart:
227} 233}
228 234
229/* Copy parameters and call proper function */ 235/* Copy parameters and call proper function */
230static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void __user *addr) 236static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
237 void __user *addr)
231{ 238{
232 int ret; 239 int ret;
233 240
@@ -235,7 +242,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void
235 case Q_QUOTAON: { 242 case Q_QUOTAON: {
236 char *pathname; 243 char *pathname;
237 244
238 if (IS_ERR(pathname = getname(addr))) 245 pathname = getname(addr);
246 if (IS_ERR(pathname))
239 return PTR_ERR(pathname); 247 return PTR_ERR(pathname);
240 ret = sb->s_qcop->quota_on(sb, type, id, pathname, 0); 248 ret = sb->s_qcop->quota_on(sb, type, id, pathname, 0);
241 putname(pathname); 249 putname(pathname);
@@ -261,7 +269,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void
261 case Q_GETINFO: { 269 case Q_GETINFO: {
262 struct if_dqinfo info; 270 struct if_dqinfo info;
263 271
264 if ((ret = sb->s_qcop->get_info(sb, type, &info))) 272 ret = sb->s_qcop->get_info(sb, type, &info);
273 if (ret)
265 return ret; 274 return ret;
266 if (copy_to_user(addr, &info, sizeof(info))) 275 if (copy_to_user(addr, &info, sizeof(info)))
267 return -EFAULT; 276 return -EFAULT;
@@ -277,7 +286,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void
277 case Q_GETQUOTA: { 286 case Q_GETQUOTA: {
278 struct if_dqblk idq; 287 struct if_dqblk idq;
279 288
280 if ((ret = sb->s_qcop->get_dqblk(sb, type, id, &idq))) 289 ret = sb->s_qcop->get_dqblk(sb, type, id, &idq);
290 if (ret)
281 return ret; 291 return ret;
282 if (copy_to_user(addr, &idq, sizeof(idq))) 292 if (copy_to_user(addr, &idq, sizeof(idq)))
283 return -EFAULT; 293 return -EFAULT;
@@ -322,7 +332,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void
322 case Q_XGETQUOTA: { 332 case Q_XGETQUOTA: {
323 struct fs_disk_quota fdq; 333 struct fs_disk_quota fdq;
324 334
325 if ((ret = sb->s_qcop->get_xquota(sb, type, id, &fdq))) 335 ret = sb->s_qcop->get_xquota(sb, type, id, &fdq);
336 if (ret)
326 return ret; 337 return ret;
327 if (copy_to_user(addr, &fdq, sizeof(fdq))) 338 if (copy_to_user(addr, &fdq, sizeof(fdq)))
328 return -EFAULT; 339 return -EFAULT;
@@ -341,7 +352,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void
341 * look up a superblock on which quota ops will be performed 352 * look up a superblock on which quota ops will be performed
342 * - use the name of a block device to find the superblock thereon 353 * - use the name of a block device to find the superblock thereon
343 */ 354 */
344static inline struct super_block *quotactl_block(const char __user *special) 355static struct super_block *quotactl_block(const char __user *special)
345{ 356{
346#ifdef CONFIG_BLOCK 357#ifdef CONFIG_BLOCK
347 struct block_device *bdev; 358 struct block_device *bdev;
diff --git a/fs/quota_tree.c b/fs/quota/quota_tree.c
index 953404c95b17..f81f4bcfb178 100644
--- a/fs/quota_tree.c
+++ b/fs/quota/quota_tree.c
@@ -22,8 +22,6 @@ MODULE_LICENSE("GPL");
22 22
23#define __QUOTA_QT_PARANOIA 23#define __QUOTA_QT_PARANOIA
24 24
25typedef char *dqbuf_t;
26
27static int get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth) 25static int get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth)
28{ 26{
29 unsigned int epb = info->dqi_usable_bs >> 2; 27 unsigned int epb = info->dqi_usable_bs >> 2;
@@ -35,46 +33,42 @@ static int get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth)
35} 33}
36 34
37/* Number of entries in one blocks */ 35/* Number of entries in one blocks */
38static inline int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info) 36static int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info)
39{ 37{
40 return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader)) 38 return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader))
41 / info->dqi_entry_size; 39 / info->dqi_entry_size;
42} 40}
43 41
44static dqbuf_t getdqbuf(size_t size) 42static char *getdqbuf(size_t size)
45{ 43{
46 dqbuf_t buf = kmalloc(size, GFP_NOFS); 44 char *buf = kmalloc(size, GFP_NOFS);
47 if (!buf) 45 if (!buf)
48 printk(KERN_WARNING "VFS: Not enough memory for quota buffers.\n"); 46 printk(KERN_WARNING
47 "VFS: Not enough memory for quota buffers.\n");
49 return buf; 48 return buf;
50} 49}
51 50
52static inline void freedqbuf(dqbuf_t buf) 51static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
53{
54 kfree(buf);
55}
56
57static inline ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, dqbuf_t buf)
58{ 52{
59 struct super_block *sb = info->dqi_sb; 53 struct super_block *sb = info->dqi_sb;
60 54
61 memset(buf, 0, info->dqi_usable_bs); 55 memset(buf, 0, info->dqi_usable_bs);
62 return sb->s_op->quota_read(sb, info->dqi_type, (char *)buf, 56 return sb->s_op->quota_read(sb, info->dqi_type, buf,
63 info->dqi_usable_bs, blk << info->dqi_blocksize_bits); 57 info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
64} 58}
65 59
66static inline ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, dqbuf_t buf) 60static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
67{ 61{
68 struct super_block *sb = info->dqi_sb; 62 struct super_block *sb = info->dqi_sb;
69 63
70 return sb->s_op->quota_write(sb, info->dqi_type, (char *)buf, 64 return sb->s_op->quota_write(sb, info->dqi_type, buf,
71 info->dqi_usable_bs, blk << info->dqi_blocksize_bits); 65 info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
72} 66}
73 67
74/* Remove empty block from list and return it */ 68/* Remove empty block from list and return it */
75static int get_free_dqblk(struct qtree_mem_dqinfo *info) 69static int get_free_dqblk(struct qtree_mem_dqinfo *info)
76{ 70{
77 dqbuf_t buf = getdqbuf(info->dqi_usable_bs); 71 char *buf = getdqbuf(info->dqi_usable_bs);
78 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; 72 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
79 int ret, blk; 73 int ret, blk;
80 74
@@ -98,12 +92,12 @@ static int get_free_dqblk(struct qtree_mem_dqinfo *info)
98 mark_info_dirty(info->dqi_sb, info->dqi_type); 92 mark_info_dirty(info->dqi_sb, info->dqi_type);
99 ret = blk; 93 ret = blk;
100out_buf: 94out_buf:
101 freedqbuf(buf); 95 kfree(buf);
102 return ret; 96 return ret;
103} 97}
104 98
105/* Insert empty block to the list */ 99/* Insert empty block to the list */
106static int put_free_dqblk(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk) 100static int put_free_dqblk(struct qtree_mem_dqinfo *info, char *buf, uint blk)
107{ 101{
108 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; 102 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
109 int err; 103 int err;
@@ -120,9 +114,10 @@ static int put_free_dqblk(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk)
120} 114}
121 115
122/* Remove given block from the list of blocks with free entries */ 116/* Remove given block from the list of blocks with free entries */
123static int remove_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk) 117static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
118 uint blk)
124{ 119{
125 dqbuf_t tmpbuf = getdqbuf(info->dqi_usable_bs); 120 char *tmpbuf = getdqbuf(info->dqi_usable_bs);
126 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; 121 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
127 uint nextblk = le32_to_cpu(dh->dqdh_next_free); 122 uint nextblk = le32_to_cpu(dh->dqdh_next_free);
128 uint prevblk = le32_to_cpu(dh->dqdh_prev_free); 123 uint prevblk = le32_to_cpu(dh->dqdh_prev_free);
@@ -153,21 +148,24 @@ static int remove_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint
153 info->dqi_free_entry = nextblk; 148 info->dqi_free_entry = nextblk;
154 mark_info_dirty(info->dqi_sb, info->dqi_type); 149 mark_info_dirty(info->dqi_sb, info->dqi_type);
155 } 150 }
156 freedqbuf(tmpbuf); 151 kfree(tmpbuf);
157 dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0); 152 dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0);
158 /* No matter whether write succeeds block is out of list */ 153 /* No matter whether write succeeds block is out of list */
159 if (write_blk(info, blk, buf) < 0) 154 if (write_blk(info, blk, buf) < 0)
160 printk(KERN_ERR "VFS: Can't write block (%u) with free entries.\n", blk); 155 printk(KERN_ERR
156 "VFS: Can't write block (%u) with free entries.\n",
157 blk);
161 return 0; 158 return 0;
162out_buf: 159out_buf:
163 freedqbuf(tmpbuf); 160 kfree(tmpbuf);
164 return err; 161 return err;
165} 162}
166 163
167/* Insert given block to the beginning of list with free entries */ 164/* Insert given block to the beginning of list with free entries */
168static int insert_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk) 165static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
166 uint blk)
169{ 167{
170 dqbuf_t tmpbuf = getdqbuf(info->dqi_usable_bs); 168 char *tmpbuf = getdqbuf(info->dqi_usable_bs);
171 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; 169 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
172 int err; 170 int err;
173 171
@@ -188,12 +186,12 @@ static int insert_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint
188 if (err < 0) 186 if (err < 0)
189 goto out_buf; 187 goto out_buf;
190 } 188 }
191 freedqbuf(tmpbuf); 189 kfree(tmpbuf);
192 info->dqi_free_entry = blk; 190 info->dqi_free_entry = blk;
193 mark_info_dirty(info->dqi_sb, info->dqi_type); 191 mark_info_dirty(info->dqi_sb, info->dqi_type);
194 return 0; 192 return 0;
195out_buf: 193out_buf:
196 freedqbuf(tmpbuf); 194 kfree(tmpbuf);
197 return err; 195 return err;
198} 196}
199 197
@@ -215,7 +213,7 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
215{ 213{
216 uint blk, i; 214 uint blk, i;
217 struct qt_disk_dqdbheader *dh; 215 struct qt_disk_dqdbheader *dh;
218 dqbuf_t buf = getdqbuf(info->dqi_usable_bs); 216 char *buf = getdqbuf(info->dqi_usable_bs);
219 char *ddquot; 217 char *ddquot;
220 218
221 *err = 0; 219 *err = 0;
@@ -233,11 +231,12 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
233 blk = get_free_dqblk(info); 231 blk = get_free_dqblk(info);
234 if ((int)blk < 0) { 232 if ((int)blk < 0) {
235 *err = blk; 233 *err = blk;
236 freedqbuf(buf); 234 kfree(buf);
237 return 0; 235 return 0;
238 } 236 }
239 memset(buf, 0, info->dqi_usable_bs); 237 memset(buf, 0, info->dqi_usable_bs);
240 /* This is enough as block is already zeroed and entry list is empty... */ 238 /* This is enough as the block is already zeroed and the entry
239 * list is empty... */
241 info->dqi_free_entry = blk; 240 info->dqi_free_entry = blk;
242 mark_info_dirty(dquot->dq_sb, dquot->dq_type); 241 mark_info_dirty(dquot->dq_sb, dquot->dq_type);
243 } 242 }
@@ -253,9 +252,12 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
253 } 252 }
254 le16_add_cpu(&dh->dqdh_entries, 1); 253 le16_add_cpu(&dh->dqdh_entries, 1);
255 /* Find free structure in block */ 254 /* Find free structure in block */
256 for (i = 0, ddquot = ((char *)buf) + sizeof(struct qt_disk_dqdbheader); 255 ddquot = buf + sizeof(struct qt_disk_dqdbheader);
257 i < qtree_dqstr_in_blk(info) && !qtree_entry_unused(info, ddquot); 256 for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
258 i++, ddquot += info->dqi_entry_size); 257 if (qtree_entry_unused(info, ddquot))
258 break;
259 ddquot += info->dqi_entry_size;
260 }
259#ifdef __QUOTA_QT_PARANOIA 261#ifdef __QUOTA_QT_PARANOIA
260 if (i == qtree_dqstr_in_blk(info)) { 262 if (i == qtree_dqstr_in_blk(info)) {
261 printk(KERN_ERR "VFS: find_free_dqentry(): Data block full " 263 printk(KERN_ERR "VFS: find_free_dqentry(): Data block full "
@@ -273,10 +275,10 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
273 dquot->dq_off = (blk << info->dqi_blocksize_bits) + 275 dquot->dq_off = (blk << info->dqi_blocksize_bits) +
274 sizeof(struct qt_disk_dqdbheader) + 276 sizeof(struct qt_disk_dqdbheader) +
275 i * info->dqi_entry_size; 277 i * info->dqi_entry_size;
276 freedqbuf(buf); 278 kfree(buf);
277 return blk; 279 return blk;
278out_buf: 280out_buf:
279 freedqbuf(buf); 281 kfree(buf);
280 return 0; 282 return 0;
281} 283}
282 284
@@ -284,7 +286,7 @@ out_buf:
284static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, 286static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
285 uint *treeblk, int depth) 287 uint *treeblk, int depth)
286{ 288{
287 dqbuf_t buf = getdqbuf(info->dqi_usable_bs); 289 char *buf = getdqbuf(info->dqi_usable_bs);
288 int ret = 0, newson = 0, newact = 0; 290 int ret = 0, newson = 0, newact = 0;
289 __le32 *ref; 291 __le32 *ref;
290 uint newblk; 292 uint newblk;
@@ -333,7 +335,7 @@ static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
333 put_free_dqblk(info, buf, *treeblk); 335 put_free_dqblk(info, buf, *treeblk);
334 } 336 }
335out_buf: 337out_buf:
336 freedqbuf(buf); 338 kfree(buf);
337 return ret; 339 return ret;
338} 340}
339 341
@@ -346,14 +348,15 @@ static inline int dq_insert_tree(struct qtree_mem_dqinfo *info,
346} 348}
347 349
348/* 350/*
349 * We don't have to be afraid of deadlocks as we never have quotas on quota files... 351 * We don't have to be afraid of deadlocks as we never have quotas on quota
352 * files...
350 */ 353 */
351int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) 354int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
352{ 355{
353 int type = dquot->dq_type; 356 int type = dquot->dq_type;
354 struct super_block *sb = dquot->dq_sb; 357 struct super_block *sb = dquot->dq_sb;
355 ssize_t ret; 358 ssize_t ret;
356 dqbuf_t ddquot = getdqbuf(info->dqi_entry_size); 359 char *ddquot = getdqbuf(info->dqi_entry_size);
357 360
358 if (!ddquot) 361 if (!ddquot)
359 return -ENOMEM; 362 return -ENOMEM;
@@ -364,15 +367,15 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
364 if (ret < 0) { 367 if (ret < 0) {
365 printk(KERN_ERR "VFS: Error %zd occurred while " 368 printk(KERN_ERR "VFS: Error %zd occurred while "
366 "creating quota.\n", ret); 369 "creating quota.\n", ret);
367 freedqbuf(ddquot); 370 kfree(ddquot);
368 return ret; 371 return ret;
369 } 372 }
370 } 373 }
371 spin_lock(&dq_data_lock); 374 spin_lock(&dq_data_lock);
372 info->dqi_ops->mem2disk_dqblk(ddquot, dquot); 375 info->dqi_ops->mem2disk_dqblk(ddquot, dquot);
373 spin_unlock(&dq_data_lock); 376 spin_unlock(&dq_data_lock);
374 ret = sb->s_op->quota_write(sb, type, (char *)ddquot, 377 ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size,
375 info->dqi_entry_size, dquot->dq_off); 378 dquot->dq_off);
376 if (ret != info->dqi_entry_size) { 379 if (ret != info->dqi_entry_size) {
377 printk(KERN_WARNING "VFS: dquota write failed on dev %s\n", 380 printk(KERN_WARNING "VFS: dquota write failed on dev %s\n",
378 sb->s_id); 381 sb->s_id);
@@ -382,7 +385,7 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
382 ret = 0; 385 ret = 0;
383 } 386 }
384 dqstats.writes++; 387 dqstats.writes++;
385 freedqbuf(ddquot); 388 kfree(ddquot);
386 389
387 return ret; 390 return ret;
388} 391}
@@ -393,7 +396,7 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
393 uint blk) 396 uint blk)
394{ 397{
395 struct qt_disk_dqdbheader *dh; 398 struct qt_disk_dqdbheader *dh;
396 dqbuf_t buf = getdqbuf(info->dqi_usable_bs); 399 char *buf = getdqbuf(info->dqi_usable_bs);
397 int ret = 0; 400 int ret = 0;
398 401
399 if (!buf) 402 if (!buf)
@@ -444,7 +447,7 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
444 } 447 }
445 dquot->dq_off = 0; /* Quota is now unattached */ 448 dquot->dq_off = 0; /* Quota is now unattached */
446out_buf: 449out_buf:
447 freedqbuf(buf); 450 kfree(buf);
448 return ret; 451 return ret;
449} 452}
450 453
@@ -452,7 +455,7 @@ out_buf:
452static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, 455static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
453 uint *blk, int depth) 456 uint *blk, int depth)
454{ 457{
455 dqbuf_t buf = getdqbuf(info->dqi_usable_bs); 458 char *buf = getdqbuf(info->dqi_usable_bs);
456 int ret = 0; 459 int ret = 0;
457 uint newblk; 460 uint newblk;
458 __le32 *ref = (__le32 *)buf; 461 __le32 *ref = (__le32 *)buf;
@@ -475,9 +478,8 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
475 int i; 478 int i;
476 ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0); 479 ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0);
477 /* Block got empty? */ 480 /* Block got empty? */
478 for (i = 0; 481 for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++)
479 i < (info->dqi_usable_bs >> 2) && !ref[i]; 482 ;
480 i++);
481 /* Don't put the root block into the free block list */ 483 /* Don't put the root block into the free block list */
482 if (i == (info->dqi_usable_bs >> 2) 484 if (i == (info->dqi_usable_bs >> 2)
483 && *blk != QT_TREEOFF) { 485 && *blk != QT_TREEOFF) {
@@ -491,7 +493,7 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
491 } 493 }
492 } 494 }
493out_buf: 495out_buf:
494 freedqbuf(buf); 496 kfree(buf);
495 return ret; 497 return ret;
496} 498}
497 499
@@ -510,7 +512,7 @@ EXPORT_SYMBOL(qtree_delete_dquot);
510static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info, 512static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
511 struct dquot *dquot, uint blk) 513 struct dquot *dquot, uint blk)
512{ 514{
513 dqbuf_t buf = getdqbuf(info->dqi_usable_bs); 515 char *buf = getdqbuf(info->dqi_usable_bs);
514 loff_t ret = 0; 516 loff_t ret = 0;
515 int i; 517 int i;
516 char *ddquot; 518 char *ddquot;
@@ -522,9 +524,12 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
522 printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk); 524 printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk);
523 goto out_buf; 525 goto out_buf;
524 } 526 }
525 for (i = 0, ddquot = ((char *)buf) + sizeof(struct qt_disk_dqdbheader); 527 ddquot = buf + sizeof(struct qt_disk_dqdbheader);
526 i < qtree_dqstr_in_blk(info) && !info->dqi_ops->is_id(ddquot, dquot); 528 for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
527 i++, ddquot += info->dqi_entry_size); 529 if (info->dqi_ops->is_id(ddquot, dquot))
530 break;
531 ddquot += info->dqi_entry_size;
532 }
528 if (i == qtree_dqstr_in_blk(info)) { 533 if (i == qtree_dqstr_in_blk(info)) {
529 printk(KERN_ERR "VFS: Quota for id %u referenced " 534 printk(KERN_ERR "VFS: Quota for id %u referenced "
530 "but not present.\n", dquot->dq_id); 535 "but not present.\n", dquot->dq_id);
@@ -535,7 +540,7 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
535 qt_disk_dqdbheader) + i * info->dqi_entry_size; 540 qt_disk_dqdbheader) + i * info->dqi_entry_size;
536 } 541 }
537out_buf: 542out_buf:
538 freedqbuf(buf); 543 kfree(buf);
539 return ret; 544 return ret;
540} 545}
541 546
@@ -543,7 +548,7 @@ out_buf:
543static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info, 548static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
544 struct dquot *dquot, uint blk, int depth) 549 struct dquot *dquot, uint blk, int depth)
545{ 550{
546 dqbuf_t buf = getdqbuf(info->dqi_usable_bs); 551 char *buf = getdqbuf(info->dqi_usable_bs);
547 loff_t ret = 0; 552 loff_t ret = 0;
548 __le32 *ref = (__le32 *)buf; 553 __le32 *ref = (__le32 *)buf;
549 554
@@ -563,7 +568,7 @@ static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
563 else 568 else
564 ret = find_block_dqentry(info, dquot, blk); 569 ret = find_block_dqentry(info, dquot, blk);
565out_buf: 570out_buf:
566 freedqbuf(buf); 571 kfree(buf);
567 return ret; 572 return ret;
568} 573}
569 574
@@ -579,7 +584,7 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
579 int type = dquot->dq_type; 584 int type = dquot->dq_type;
580 struct super_block *sb = dquot->dq_sb; 585 struct super_block *sb = dquot->dq_sb;
581 loff_t offset; 586 loff_t offset;
582 dqbuf_t ddquot; 587 char *ddquot;
583 int ret = 0; 588 int ret = 0;
584 589
585#ifdef __QUOTA_QT_PARANOIA 590#ifdef __QUOTA_QT_PARANOIA
@@ -607,8 +612,8 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
607 ddquot = getdqbuf(info->dqi_entry_size); 612 ddquot = getdqbuf(info->dqi_entry_size);
608 if (!ddquot) 613 if (!ddquot)
609 return -ENOMEM; 614 return -ENOMEM;
610 ret = sb->s_op->quota_read(sb, type, (char *)ddquot, 615 ret = sb->s_op->quota_read(sb, type, ddquot, info->dqi_entry_size,
611 info->dqi_entry_size, dquot->dq_off); 616 dquot->dq_off);
612 if (ret != info->dqi_entry_size) { 617 if (ret != info->dqi_entry_size) {
613 if (ret >= 0) 618 if (ret >= 0)
614 ret = -EIO; 619 ret = -EIO;
@@ -616,7 +621,7 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
616 "structure for id %u.\n", dquot->dq_id); 621 "structure for id %u.\n", dquot->dq_id);
617 set_bit(DQ_FAKE_B, &dquot->dq_flags); 622 set_bit(DQ_FAKE_B, &dquot->dq_flags);
618 memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk)); 623 memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
619 freedqbuf(ddquot); 624 kfree(ddquot);
620 goto out; 625 goto out;
621 } 626 }
622 spin_lock(&dq_data_lock); 627 spin_lock(&dq_data_lock);
@@ -627,7 +632,7 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
627 !dquot->dq_dqb.dqb_isoftlimit) 632 !dquot->dq_dqb.dqb_isoftlimit)
628 set_bit(DQ_FAKE_B, &dquot->dq_flags); 633 set_bit(DQ_FAKE_B, &dquot->dq_flags);
629 spin_unlock(&dq_data_lock); 634 spin_unlock(&dq_data_lock);
630 freedqbuf(ddquot); 635 kfree(ddquot);
631out: 636out:
632 dqstats.reads++; 637 dqstats.reads++;
633 return ret; 638 return ret;
@@ -638,7 +643,8 @@ EXPORT_SYMBOL(qtree_read_dquot);
638 * the only one operating on dquot (thanks to dq_lock) */ 643 * the only one operating on dquot (thanks to dq_lock) */
639int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) 644int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
640{ 645{
641 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) && !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace)) 646 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) &&
647 !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace))
642 return qtree_delete_dquot(info, dquot); 648 return qtree_delete_dquot(info, dquot);
643 return 0; 649 return 0;
644} 650}
diff --git a/fs/quota_tree.h b/fs/quota/quota_tree.h
index a1ab8db81a51..a1ab8db81a51 100644
--- a/fs/quota_tree.h
+++ b/fs/quota/quota_tree.h
diff --git a/fs/quota_v1.c b/fs/quota/quota_v1.c
index b4af1c69ad16..0edcf42b1778 100644
--- a/fs/quota_v1.c
+++ b/fs/quota/quota_v1.c
@@ -62,11 +62,14 @@ static int v1_read_dqblk(struct dquot *dquot)
62 62
63 /* Set structure to 0s in case read fails/is after end of file */ 63 /* Set structure to 0s in case read fails/is after end of file */
64 memset(&dqblk, 0, sizeof(struct v1_disk_dqblk)); 64 memset(&dqblk, 0, sizeof(struct v1_disk_dqblk));
65 dquot->dq_sb->s_op->quota_read(dquot->dq_sb, type, (char *)&dqblk, sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id)); 65 dquot->dq_sb->s_op->quota_read(dquot->dq_sb, type, (char *)&dqblk,
66 sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id));
66 67
67 v1_disk2mem_dqblk(&dquot->dq_dqb, &dqblk); 68 v1_disk2mem_dqblk(&dquot->dq_dqb, &dqblk);
68 if (dquot->dq_dqb.dqb_bhardlimit == 0 && dquot->dq_dqb.dqb_bsoftlimit == 0 && 69 if (dquot->dq_dqb.dqb_bhardlimit == 0 &&
69 dquot->dq_dqb.dqb_ihardlimit == 0 && dquot->dq_dqb.dqb_isoftlimit == 0) 70 dquot->dq_dqb.dqb_bsoftlimit == 0 &&
71 dquot->dq_dqb.dqb_ihardlimit == 0 &&
72 dquot->dq_dqb.dqb_isoftlimit == 0)
70 set_bit(DQ_FAKE_B, &dquot->dq_flags); 73 set_bit(DQ_FAKE_B, &dquot->dq_flags);
71 dqstats.reads++; 74 dqstats.reads++;
72 75
@@ -81,13 +84,16 @@ static int v1_commit_dqblk(struct dquot *dquot)
81 84
82 v1_mem2disk_dqblk(&dqblk, &dquot->dq_dqb); 85 v1_mem2disk_dqblk(&dqblk, &dquot->dq_dqb);
83 if (dquot->dq_id == 0) { 86 if (dquot->dq_id == 0) {
84 dqblk.dqb_btime = sb_dqopt(dquot->dq_sb)->info[type].dqi_bgrace; 87 dqblk.dqb_btime =
85 dqblk.dqb_itime = sb_dqopt(dquot->dq_sb)->info[type].dqi_igrace; 88 sb_dqopt(dquot->dq_sb)->info[type].dqi_bgrace;
89 dqblk.dqb_itime =
90 sb_dqopt(dquot->dq_sb)->info[type].dqi_igrace;
86 } 91 }
87 ret = 0; 92 ret = 0;
88 if (sb_dqopt(dquot->dq_sb)->files[type]) 93 if (sb_dqopt(dquot->dq_sb)->files[type])
89 ret = dquot->dq_sb->s_op->quota_write(dquot->dq_sb, type, (char *)&dqblk, 94 ret = dquot->dq_sb->s_op->quota_write(dquot->dq_sb, type,
90 sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id)); 95 (char *)&dqblk, sizeof(struct v1_disk_dqblk),
96 v1_dqoff(dquot->dq_id));
91 if (ret != sizeof(struct v1_disk_dqblk)) { 97 if (ret != sizeof(struct v1_disk_dqblk)) {
92 printk(KERN_WARNING "VFS: dquota write failed on dev %s\n", 98 printk(KERN_WARNING "VFS: dquota write failed on dev %s\n",
93 dquot->dq_sb->s_id); 99 dquot->dq_sb->s_id);
@@ -130,15 +136,20 @@ static int v1_check_quota_file(struct super_block *sb, int type)
130 return 0; 136 return 0;
131 blocks = isize >> BLOCK_SIZE_BITS; 137 blocks = isize >> BLOCK_SIZE_BITS;
132 off = isize & (BLOCK_SIZE - 1); 138 off = isize & (BLOCK_SIZE - 1);
133 if ((blocks % sizeof(struct v1_disk_dqblk) * BLOCK_SIZE + off) % sizeof(struct v1_disk_dqblk)) 139 if ((blocks % sizeof(struct v1_disk_dqblk) * BLOCK_SIZE + off) %
140 sizeof(struct v1_disk_dqblk))
134 return 0; 141 return 0;
135 /* Doublecheck whether we didn't get file with new format - with old quotactl() this could happen */ 142 /* Doublecheck whether we didn't get file with new format - with old
136 size = sb->s_op->quota_read(sb, type, (char *)&dqhead, sizeof(struct v2_disk_dqheader), 0); 143 * quotactl() this could happen */
144 size = sb->s_op->quota_read(sb, type, (char *)&dqhead,
145 sizeof(struct v2_disk_dqheader), 0);
137 if (size != sizeof(struct v2_disk_dqheader)) 146 if (size != sizeof(struct v2_disk_dqheader))
138 return 1; /* Probably not new format */ 147 return 1; /* Probably not new format */
139 if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type]) 148 if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type])
140 return 1; /* Definitely not new format */ 149 return 1; /* Definitely not new format */
141 printk(KERN_INFO "VFS: %s: Refusing to turn on old quota format on given file. It probably contains newer quota format.\n", sb->s_id); 150 printk(KERN_INFO
151 "VFS: %s: Refusing to turn on old quota format on given file."
152 " It probably contains newer quota format.\n", sb->s_id);
142 return 0; /* Seems like a new format file -> refuse it */ 153 return 0; /* Seems like a new format file -> refuse it */
143} 154}
144 155
@@ -148,7 +159,9 @@ static int v1_read_file_info(struct super_block *sb, int type)
148 struct v1_disk_dqblk dqblk; 159 struct v1_disk_dqblk dqblk;
149 int ret; 160 int ret;
150 161
151 if ((ret = sb->s_op->quota_read(sb, type, (char *)&dqblk, sizeof(struct v1_disk_dqblk), v1_dqoff(0))) != sizeof(struct v1_disk_dqblk)) { 162 ret = sb->s_op->quota_read(sb, type, (char *)&dqblk,
163 sizeof(struct v1_disk_dqblk), v1_dqoff(0));
164 if (ret != sizeof(struct v1_disk_dqblk)) {
152 if (ret >= 0) 165 if (ret >= 0)
153 ret = -EIO; 166 ret = -EIO;
154 goto out; 167 goto out;
@@ -157,8 +170,10 @@ static int v1_read_file_info(struct super_block *sb, int type)
157 /* limits are stored as unsigned 32-bit data */ 170 /* limits are stored as unsigned 32-bit data */
158 dqopt->info[type].dqi_maxblimit = 0xffffffff; 171 dqopt->info[type].dqi_maxblimit = 0xffffffff;
159 dqopt->info[type].dqi_maxilimit = 0xffffffff; 172 dqopt->info[type].dqi_maxilimit = 0xffffffff;
160 dqopt->info[type].dqi_igrace = dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME; 173 dqopt->info[type].dqi_igrace =
161 dqopt->info[type].dqi_bgrace = dqblk.dqb_btime ? dqblk.dqb_btime : MAX_DQ_TIME; 174 dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME;
175 dqopt->info[type].dqi_bgrace =
176 dqblk.dqb_btime ? dqblk.dqb_btime : MAX_DQ_TIME;
162out: 177out:
163 return ret; 178 return ret;
164} 179}
@@ -170,8 +185,9 @@ static int v1_write_file_info(struct super_block *sb, int type)
170 int ret; 185 int ret;
171 186
172 dqopt->info[type].dqi_flags &= ~DQF_INFO_DIRTY; 187 dqopt->info[type].dqi_flags &= ~DQF_INFO_DIRTY;
173 if ((ret = sb->s_op->quota_read(sb, type, (char *)&dqblk, 188 ret = sb->s_op->quota_read(sb, type, (char *)&dqblk,
174 sizeof(struct v1_disk_dqblk), v1_dqoff(0))) != sizeof(struct v1_disk_dqblk)) { 189 sizeof(struct v1_disk_dqblk), v1_dqoff(0));
190 if (ret != sizeof(struct v1_disk_dqblk)) {
175 if (ret >= 0) 191 if (ret >= 0)
176 ret = -EIO; 192 ret = -EIO;
177 goto out; 193 goto out;
diff --git a/fs/quota_v2.c b/fs/quota/quota_v2.c
index b618b563635c..a5475fb1ae44 100644
--- a/fs/quota_v2.c
+++ b/fs/quota/quota_v2.c
@@ -54,7 +54,8 @@ static int v2_check_quota_file(struct super_block *sb, int type)
54 static const uint quota_magics[] = V2_INITQMAGICS; 54 static const uint quota_magics[] = V2_INITQMAGICS;
55 static const uint quota_versions[] = V2_INITQVERSIONS; 55 static const uint quota_versions[] = V2_INITQVERSIONS;
56 56
57 size = sb->s_op->quota_read(sb, type, (char *)&dqhead, sizeof(struct v2_disk_dqheader), 0); 57 size = sb->s_op->quota_read(sb, type, (char *)&dqhead,
58 sizeof(struct v2_disk_dqheader), 0);
58 if (size != sizeof(struct v2_disk_dqheader)) { 59 if (size != sizeof(struct v2_disk_dqheader)) {
59 printk("quota_v2: failed read expected=%zd got=%zd\n", 60 printk("quota_v2: failed read expected=%zd got=%zd\n",
60 sizeof(struct v2_disk_dqheader), size); 61 sizeof(struct v2_disk_dqheader), size);
diff --git a/fs/quotaio_v1.h b/fs/quota/quotaio_v1.h
index 746654b5de70..746654b5de70 100644
--- a/fs/quotaio_v1.h
+++ b/fs/quota/quotaio_v1.h
diff --git a/fs/quotaio_v2.h b/fs/quota/quotaio_v2.h
index 530fe580685c..530fe580685c 100644
--- a/fs/quotaio_v2.h
+++ b/fs/quota/quotaio_v2.h
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index 5d7c7ececa64..995ef1d6686c 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -18,7 +18,6 @@
18#include <linux/string.h> 18#include <linux/string.h>
19#include <linux/backing-dev.h> 19#include <linux/backing-dev.h>
20#include <linux/ramfs.h> 20#include <linux/ramfs.h>
21#include <linux/quotaops.h>
22#include <linux/pagevec.h> 21#include <linux/pagevec.h>
23#include <linux/mman.h> 22#include <linux/mman.h>
24 23
@@ -205,11 +204,6 @@ static int ramfs_nommu_setattr(struct dentry *dentry, struct iattr *ia)
205 if (ret) 204 if (ret)
206 return ret; 205 return ret;
207 206
208 /* by providing our own setattr() method, we skip this quotaism */
209 if ((old_ia_valid & ATTR_UID && ia->ia_uid != inode->i_uid) ||
210 (old_ia_valid & ATTR_GID && ia->ia_gid != inode->i_gid))
211 ret = DQUOT_TRANSFER(inode, ia) ? -EDQUOT : 0;
212
213 /* pick out size-changing events */ 207 /* pick out size-changing events */
214 if (ia->ia_valid & ATTR_SIZE) { 208 if (ia->ia_valid & ATTR_SIZE) {
215 loff_t size = i_size_read(inode); 209 loff_t size = i_size_read(inode);
diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c
index 4646caa60455..f32d1425cc9f 100644
--- a/fs/reiserfs/bitmap.c
+++ b/fs/reiserfs/bitmap.c
@@ -430,7 +430,7 @@ static void _reiserfs_free_block(struct reiserfs_transaction_handle *th,
430 430
431 journal_mark_dirty(th, s, sbh); 431 journal_mark_dirty(th, s, sbh);
432 if (for_unformatted) 432 if (for_unformatted)
433 DQUOT_FREE_BLOCK_NODIRTY(inode, 1); 433 vfs_dq_free_block_nodirty(inode, 1);
434} 434}
435 435
436void reiserfs_free_block(struct reiserfs_transaction_handle *th, 436void reiserfs_free_block(struct reiserfs_transaction_handle *th,
@@ -1055,7 +1055,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
1055 amount_needed, hint->inode->i_uid); 1055 amount_needed, hint->inode->i_uid);
1056#endif 1056#endif
1057 quota_ret = 1057 quota_ret =
1058 DQUOT_ALLOC_BLOCK_NODIRTY(hint->inode, amount_needed); 1058 vfs_dq_alloc_block_nodirty(hint->inode, amount_needed);
1059 if (quota_ret) /* Quota exceeded? */ 1059 if (quota_ret) /* Quota exceeded? */
1060 return QUOTA_EXCEEDED; 1060 return QUOTA_EXCEEDED;
1061 if (hint->preallocate && hint->prealloc_size) { 1061 if (hint->preallocate && hint->prealloc_size) {
@@ -1064,8 +1064,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
1064 "reiserquota: allocating (prealloc) %d blocks id=%u", 1064 "reiserquota: allocating (prealloc) %d blocks id=%u",
1065 hint->prealloc_size, hint->inode->i_uid); 1065 hint->prealloc_size, hint->inode->i_uid);
1066#endif 1066#endif
1067 quota_ret = 1067 quota_ret = vfs_dq_prealloc_block_nodirty(hint->inode,
1068 DQUOT_PREALLOC_BLOCK_NODIRTY(hint->inode,
1069 hint->prealloc_size); 1068 hint->prealloc_size);
1070 if (quota_ret) 1069 if (quota_ret)
1071 hint->preallocate = hint->prealloc_size = 0; 1070 hint->preallocate = hint->prealloc_size = 0;
@@ -1098,7 +1097,10 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
1098 nr_allocated, 1097 nr_allocated,
1099 hint->inode->i_uid); 1098 hint->inode->i_uid);
1100#endif 1099#endif
1101 DQUOT_FREE_BLOCK_NODIRTY(hint->inode, amount_needed + hint->prealloc_size - nr_allocated); /* Free not allocated blocks */ 1100 /* Free not allocated blocks */
1101 vfs_dq_free_block_nodirty(hint->inode,
1102 amount_needed + hint->prealloc_size -
1103 nr_allocated);
1102 } 1104 }
1103 while (nr_allocated--) 1105 while (nr_allocated--)
1104 reiserfs_free_block(hint->th, hint->inode, 1106 reiserfs_free_block(hint->th, hint->inode,
@@ -1129,7 +1131,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
1129 REISERFS_I(hint->inode)->i_prealloc_count, 1131 REISERFS_I(hint->inode)->i_prealloc_count,
1130 hint->inode->i_uid); 1132 hint->inode->i_uid);
1131#endif 1133#endif
1132 DQUOT_FREE_BLOCK_NODIRTY(hint->inode, amount_needed + 1134 vfs_dq_free_block_nodirty(hint->inode, amount_needed +
1133 hint->prealloc_size - nr_allocated - 1135 hint->prealloc_size - nr_allocated -
1134 REISERFS_I(hint->inode)-> 1136 REISERFS_I(hint->inode)->
1135 i_prealloc_count); 1137 i_prealloc_count);
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 55fce92cdf18..823227a7662a 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -53,7 +53,7 @@ void reiserfs_delete_inode(struct inode *inode)
53 * after delete_object so that quota updates go into the same transaction as 53 * after delete_object so that quota updates go into the same transaction as
54 * stat data deletion */ 54 * stat data deletion */
55 if (!err) 55 if (!err)
56 DQUOT_FREE_INODE(inode); 56 vfs_dq_free_inode(inode);
57 57
58 if (journal_end(&th, inode->i_sb, jbegin_count)) 58 if (journal_end(&th, inode->i_sb, jbegin_count))
59 goto out; 59 goto out;
@@ -1763,7 +1763,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
1763 1763
1764 BUG_ON(!th->t_trans_id); 1764 BUG_ON(!th->t_trans_id);
1765 1765
1766 if (DQUOT_ALLOC_INODE(inode)) { 1766 if (vfs_dq_alloc_inode(inode)) {
1767 err = -EDQUOT; 1767 err = -EDQUOT;
1768 goto out_end_trans; 1768 goto out_end_trans;
1769 } 1769 }
@@ -1947,12 +1947,12 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
1947 INODE_PKEY(inode)->k_objectid = 0; 1947 INODE_PKEY(inode)->k_objectid = 0;
1948 1948
1949 /* Quota change must be inside a transaction for journaling */ 1949 /* Quota change must be inside a transaction for journaling */
1950 DQUOT_FREE_INODE(inode); 1950 vfs_dq_free_inode(inode);
1951 1951
1952 out_end_trans: 1952 out_end_trans:
1953 journal_end(th, th->t_super, th->t_blocks_allocated); 1953 journal_end(th, th->t_super, th->t_blocks_allocated);
1954 /* Drop can be outside and it needs more credits so it's better to have it outside */ 1954 /* Drop can be outside and it needs more credits so it's better to have it outside */
1955 DQUOT_DROP(inode); 1955 vfs_dq_drop(inode);
1956 inode->i_flags |= S_NOQUOTA; 1956 inode->i_flags |= S_NOQUOTA;
1957 make_bad_inode(inode); 1957 make_bad_inode(inode);
1958 1958
@@ -3119,7 +3119,7 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
3119 if (error) 3119 if (error)
3120 goto out; 3120 goto out;
3121 error = 3121 error =
3122 DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0; 3122 vfs_dq_transfer(inode, attr) ? -EDQUOT : 0;
3123 if (error) { 3123 if (error) {
3124 journal_end(&th, inode->i_sb, 3124 journal_end(&th, inode->i_sb,
3125 jbegin_count); 3125 jbegin_count);
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index 738967f6c8ee..639d635d9d4b 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -555,7 +555,7 @@ static int reiserfs_add_entry(struct reiserfs_transaction_handle *th,
555*/ 555*/
556static int drop_new_inode(struct inode *inode) 556static int drop_new_inode(struct inode *inode)
557{ 557{
558 DQUOT_DROP(inode); 558 vfs_dq_drop(inode);
559 make_bad_inode(inode); 559 make_bad_inode(inode);
560 inode->i_flags |= S_NOQUOTA; 560 inode->i_flags |= S_NOQUOTA;
561 iput(inode); 561 iput(inode);
@@ -563,7 +563,7 @@ static int drop_new_inode(struct inode *inode)
563} 563}
564 564
565/* utility function that does setup for reiserfs_new_inode. 565/* utility function that does setup for reiserfs_new_inode.
566** DQUOT_INIT needs lots of credits so it's better to have it 566** vfs_dq_init needs lots of credits so it's better to have it
567** outside of a transaction, so we had to pull some bits of 567** outside of a transaction, so we had to pull some bits of
568** reiserfs_new_inode out into this func. 568** reiserfs_new_inode out into this func.
569*/ 569*/
@@ -586,7 +586,7 @@ static int new_inode_init(struct inode *inode, struct inode *dir, int mode)
586 } else { 586 } else {
587 inode->i_gid = current_fsgid(); 587 inode->i_gid = current_fsgid();
588 } 588 }
589 DQUOT_INIT(inode); 589 vfs_dq_init(inode);
590 return 0; 590 return 0;
591} 591}
592 592
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index abbc64dcc8d4..73aaa33f6735 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -1297,7 +1297,7 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th, struct treepath
1297 "reiserquota delete_item(): freeing %u, id=%u type=%c", 1297 "reiserquota delete_item(): freeing %u, id=%u type=%c",
1298 quota_cut_bytes, p_s_inode->i_uid, head2type(&s_ih)); 1298 quota_cut_bytes, p_s_inode->i_uid, head2type(&s_ih));
1299#endif 1299#endif
1300 DQUOT_FREE_SPACE_NODIRTY(p_s_inode, quota_cut_bytes); 1300 vfs_dq_free_space_nodirty(p_s_inode, quota_cut_bytes);
1301 1301
1302 /* Return deleted body length */ 1302 /* Return deleted body length */
1303 return n_ret_value; 1303 return n_ret_value;
@@ -1383,7 +1383,7 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
1383 quota_cut_bytes, inode->i_uid, 1383 quota_cut_bytes, inode->i_uid,
1384 key2type(key)); 1384 key2type(key));
1385#endif 1385#endif
1386 DQUOT_FREE_SPACE_NODIRTY(inode, 1386 vfs_dq_free_space_nodirty(inode,
1387 quota_cut_bytes); 1387 quota_cut_bytes);
1388 } 1388 }
1389 break; 1389 break;
@@ -1734,7 +1734,7 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
1734 "reiserquota cut_from_item(): freeing %u id=%u type=%c", 1734 "reiserquota cut_from_item(): freeing %u id=%u type=%c",
1735 quota_cut_bytes, p_s_inode->i_uid, '?'); 1735 quota_cut_bytes, p_s_inode->i_uid, '?');
1736#endif 1736#endif
1737 DQUOT_FREE_SPACE_NODIRTY(p_s_inode, quota_cut_bytes); 1737 vfs_dq_free_space_nodirty(p_s_inode, quota_cut_bytes);
1738 return n_ret_value; 1738 return n_ret_value;
1739} 1739}
1740 1740
@@ -1971,7 +1971,7 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
1971 key2type(&(p_s_key->on_disk_key))); 1971 key2type(&(p_s_key->on_disk_key)));
1972#endif 1972#endif
1973 1973
1974 if (DQUOT_ALLOC_SPACE_NODIRTY(inode, n_pasted_size)) { 1974 if (vfs_dq_alloc_space_nodirty(inode, n_pasted_size)) {
1975 pathrelse(p_s_search_path); 1975 pathrelse(p_s_search_path);
1976 return -EDQUOT; 1976 return -EDQUOT;
1977 } 1977 }
@@ -2027,7 +2027,7 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
2027 n_pasted_size, inode->i_uid, 2027 n_pasted_size, inode->i_uid,
2028 key2type(&(p_s_key->on_disk_key))); 2028 key2type(&(p_s_key->on_disk_key)));
2029#endif 2029#endif
2030 DQUOT_FREE_SPACE_NODIRTY(inode, n_pasted_size); 2030 vfs_dq_free_space_nodirty(inode, n_pasted_size);
2031 return retval; 2031 return retval;
2032} 2032}
2033 2033
@@ -2060,7 +2060,7 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th, struct treepath
2060#endif 2060#endif
2061 /* We can't dirty inode here. It would be immediately written but 2061 /* We can't dirty inode here. It would be immediately written but
2062 * appropriate stat item isn't inserted yet... */ 2062 * appropriate stat item isn't inserted yet... */
2063 if (DQUOT_ALLOC_SPACE_NODIRTY(inode, quota_bytes)) { 2063 if (vfs_dq_alloc_space_nodirty(inode, quota_bytes)) {
2064 pathrelse(p_s_path); 2064 pathrelse(p_s_path);
2065 return -EDQUOT; 2065 return -EDQUOT;
2066 } 2066 }
@@ -2112,6 +2112,6 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th, struct treepath
2112 quota_bytes, inode->i_uid, head2type(p_s_ih)); 2112 quota_bytes, inode->i_uid, head2type(p_s_ih));
2113#endif 2113#endif
2114 if (inode) 2114 if (inode)
2115 DQUOT_FREE_SPACE_NODIRTY(inode, quota_bytes); 2115 vfs_dq_free_space_nodirty(inode, quota_bytes);
2116 return retval; 2116 return retval;
2117} 2117}
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index f3c820b75829..5dbafb739401 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -250,7 +250,7 @@ static int finish_unfinished(struct super_block *s)
250 retval = remove_save_link_only(s, &save_link_key, 0); 250 retval = remove_save_link_only(s, &save_link_key, 0);
251 continue; 251 continue;
252 } 252 }
253 DQUOT_INIT(inode); 253 vfs_dq_init(inode);
254 254
255 if (truncate && S_ISDIR(inode->i_mode)) { 255 if (truncate && S_ISDIR(inode->i_mode)) {
256 /* We got a truncate request for a dir which is impossible. 256 /* We got a truncate request for a dir which is impossible.
@@ -629,8 +629,6 @@ static const struct super_operations reiserfs_sops = {
629#ifdef CONFIG_QUOTA 629#ifdef CONFIG_QUOTA
630#define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group") 630#define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group")
631 631
632static int reiserfs_dquot_initialize(struct inode *, int);
633static int reiserfs_dquot_drop(struct inode *);
634static int reiserfs_write_dquot(struct dquot *); 632static int reiserfs_write_dquot(struct dquot *);
635static int reiserfs_acquire_dquot(struct dquot *); 633static int reiserfs_acquire_dquot(struct dquot *);
636static int reiserfs_release_dquot(struct dquot *); 634static int reiserfs_release_dquot(struct dquot *);
@@ -639,8 +637,8 @@ static int reiserfs_write_info(struct super_block *, int);
639static int reiserfs_quota_on(struct super_block *, int, int, char *, int); 637static int reiserfs_quota_on(struct super_block *, int, int, char *, int);
640 638
641static struct dquot_operations reiserfs_quota_operations = { 639static struct dquot_operations reiserfs_quota_operations = {
642 .initialize = reiserfs_dquot_initialize, 640 .initialize = dquot_initialize,
643 .drop = reiserfs_dquot_drop, 641 .drop = dquot_drop,
644 .alloc_space = dquot_alloc_space, 642 .alloc_space = dquot_alloc_space,
645 .alloc_inode = dquot_alloc_inode, 643 .alloc_inode = dquot_alloc_inode,
646 .free_space = dquot_free_space, 644 .free_space = dquot_free_space,
@@ -1896,58 +1894,6 @@ static int reiserfs_statfs(struct dentry *dentry, struct kstatfs *buf)
1896} 1894}
1897 1895
1898#ifdef CONFIG_QUOTA 1896#ifdef CONFIG_QUOTA
1899static int reiserfs_dquot_initialize(struct inode *inode, int type)
1900{
1901 struct reiserfs_transaction_handle th;
1902 int ret, err;
1903
1904 /* We may create quota structure so we need to reserve enough blocks */
1905 reiserfs_write_lock(inode->i_sb);
1906 ret =
1907 journal_begin(&th, inode->i_sb,
1908 2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb));
1909 if (ret)
1910 goto out;
1911 ret = dquot_initialize(inode, type);
1912 err =
1913 journal_end(&th, inode->i_sb,
1914 2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb));
1915 if (!ret && err)
1916 ret = err;
1917 out:
1918 reiserfs_write_unlock(inode->i_sb);
1919 return ret;
1920}
1921
1922static int reiserfs_dquot_drop(struct inode *inode)
1923{
1924 struct reiserfs_transaction_handle th;
1925 int ret, err;
1926
1927 /* We may delete quota structure so we need to reserve enough blocks */
1928 reiserfs_write_lock(inode->i_sb);
1929 ret =
1930 journal_begin(&th, inode->i_sb,
1931 2 * REISERFS_QUOTA_DEL_BLOCKS(inode->i_sb));
1932 if (ret) {
1933 /*
1934 * We call dquot_drop() anyway to at least release references
1935 * to quota structures so that umount does not hang.
1936 */
1937 dquot_drop(inode);
1938 goto out;
1939 }
1940 ret = dquot_drop(inode);
1941 err =
1942 journal_end(&th, inode->i_sb,
1943 2 * REISERFS_QUOTA_DEL_BLOCKS(inode->i_sb));
1944 if (!ret && err)
1945 ret = err;
1946 out:
1947 reiserfs_write_unlock(inode->i_sb);
1948 return ret;
1949}
1950
1951static int reiserfs_write_dquot(struct dquot *dquot) 1897static int reiserfs_write_dquot(struct dquot *dquot)
1952{ 1898{
1953 struct reiserfs_transaction_handle th; 1899 struct reiserfs_transaction_handle th;
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index ad92461cbfc3..ae881ccd2f03 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -1136,7 +1136,7 @@ xattr_lookup_poison(struct dentry *dentry, struct qstr *q1, struct qstr *name)
1136 return 1; 1136 return 1;
1137} 1137}
1138 1138
1139static struct dentry_operations xattr_lookup_poison_ops = { 1139static const struct dentry_operations xattr_lookup_poison_ops = {
1140 .d_compare = xattr_lookup_poison, 1140 .d_compare = xattr_lookup_poison,
1141}; 1141};
1142 1142
diff --git a/fs/smbfs/dir.c b/fs/smbfs/dir.c
index e7ddd0328ddc..3e4803b4427e 100644
--- a/fs/smbfs/dir.c
+++ b/fs/smbfs/dir.c
@@ -277,7 +277,7 @@ static int smb_hash_dentry(struct dentry *, struct qstr *);
277static int smb_compare_dentry(struct dentry *, struct qstr *, struct qstr *); 277static int smb_compare_dentry(struct dentry *, struct qstr *, struct qstr *);
278static int smb_delete_dentry(struct dentry *); 278static int smb_delete_dentry(struct dentry *);
279 279
280static struct dentry_operations smbfs_dentry_operations = 280static const struct dentry_operations smbfs_dentry_operations =
281{ 281{
282 .d_revalidate = smb_lookup_validate, 282 .d_revalidate = smb_lookup_validate,
283 .d_hash = smb_hash_dentry, 283 .d_hash = smb_hash_dentry,
@@ -285,7 +285,7 @@ static struct dentry_operations smbfs_dentry_operations =
285 .d_delete = smb_delete_dentry, 285 .d_delete = smb_delete_dentry,
286}; 286};
287 287
288static struct dentry_operations smbfs_dentry_operations_case = 288static const struct dentry_operations smbfs_dentry_operations_case =
289{ 289{
290 .d_revalidate = smb_lookup_validate, 290 .d_revalidate = smb_lookup_validate,
291 .d_delete = smb_delete_dentry, 291 .d_delete = smb_delete_dentry,
diff --git a/fs/super.c b/fs/super.c
index dd4acb158b5e..2ba481518ba7 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -197,7 +197,7 @@ void deactivate_super(struct super_block *s)
197 if (atomic_dec_and_lock(&s->s_active, &sb_lock)) { 197 if (atomic_dec_and_lock(&s->s_active, &sb_lock)) {
198 s->s_count -= S_BIAS-1; 198 s->s_count -= S_BIAS-1;
199 spin_unlock(&sb_lock); 199 spin_unlock(&sb_lock);
200 DQUOT_OFF(s, 0); 200 vfs_dq_off(s, 0);
201 down_write(&s->s_umount); 201 down_write(&s->s_umount);
202 fs->kill_sb(s); 202 fs->kill_sb(s);
203 put_filesystem(fs); 203 put_filesystem(fs);
@@ -266,7 +266,7 @@ EXPORT_SYMBOL(unlock_super);
266void __fsync_super(struct super_block *sb) 266void __fsync_super(struct super_block *sb)
267{ 267{
268 sync_inodes_sb(sb, 0); 268 sync_inodes_sb(sb, 0);
269 DQUOT_SYNC(sb); 269 vfs_dq_sync(sb);
270 lock_super(sb); 270 lock_super(sb);
271 if (sb->s_dirt && sb->s_op->write_super) 271 if (sb->s_dirt && sb->s_op->write_super)
272 sb->s_op->write_super(sb); 272 sb->s_op->write_super(sb);
@@ -655,7 +655,7 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
655 mark_files_ro(sb); 655 mark_files_ro(sb);
656 else if (!fs_may_remount_ro(sb)) 656 else if (!fs_may_remount_ro(sb))
657 return -EBUSY; 657 return -EBUSY;
658 retval = DQUOT_OFF(sb, 1); 658 retval = vfs_dq_off(sb, 1);
659 if (retval < 0 && retval != -ENOSYS) 659 if (retval < 0 && retval != -ENOSYS)
660 return -EBUSY; 660 return -EBUSY;
661 } 661 }
@@ -670,7 +670,7 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
670 } 670 }
671 sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK); 671 sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK);
672 if (remount_rw) 672 if (remount_rw)
673 DQUOT_ON_REMOUNT(sb); 673 vfs_dq_quota_on_remount(sb);
674 return 0; 674 return 0;
675} 675}
676 676
@@ -838,7 +838,8 @@ int get_sb_bdev(struct file_system_type *fs_type,
838 bdev->bd_super = s; 838 bdev->bd_super = s;
839 } 839 }
840 840
841 return simple_set_mnt(mnt, s); 841 simple_set_mnt(mnt, s);
842 return 0;
842 843
843error_s: 844error_s:
844 error = PTR_ERR(s); 845 error = PTR_ERR(s);
@@ -884,7 +885,8 @@ int get_sb_nodev(struct file_system_type *fs_type,
884 return error; 885 return error;
885 } 886 }
886 s->s_flags |= MS_ACTIVE; 887 s->s_flags |= MS_ACTIVE;
887 return simple_set_mnt(mnt, s); 888 simple_set_mnt(mnt, s);
889 return 0;
888} 890}
889 891
890EXPORT_SYMBOL(get_sb_nodev); 892EXPORT_SYMBOL(get_sb_nodev);
@@ -916,7 +918,8 @@ int get_sb_single(struct file_system_type *fs_type,
916 s->s_flags |= MS_ACTIVE; 918 s->s_flags |= MS_ACTIVE;
917 } 919 }
918 do_remount_sb(s, flags, data, 0); 920 do_remount_sb(s, flags, data, 0);
919 return simple_set_mnt(mnt, s); 921 simple_set_mnt(mnt, s);
922 return 0;
920} 923}
921 924
922EXPORT_SYMBOL(get_sb_single); 925EXPORT_SYMBOL(get_sb_single);
diff --git a/fs/sync.c b/fs/sync.c
index ec95a69d17aa..7abc65fbf21d 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -25,7 +25,7 @@ static void do_sync(unsigned long wait)
25{ 25{
26 wakeup_pdflush(0); 26 wakeup_pdflush(0);
27 sync_inodes(0); /* All mappings, inodes and their blockdevs */ 27 sync_inodes(0); /* All mappings, inodes and their blockdevs */
28 DQUOT_SYNC(NULL); 28 vfs_dq_sync(NULL);
29 sync_supers(); /* Write the superblocks */ 29 sync_supers(); /* Write the superblocks */
30 sync_filesystems(0); /* Start syncing the filesystems */ 30 sync_filesystems(0); /* Start syncing the filesystems */
31 sync_filesystems(wait); /* Waitingly sync the filesystems */ 31 sync_filesystems(wait); /* Waitingly sync the filesystems */
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 66aeb4fff0c3..d88d0fac9fa5 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -302,7 +302,7 @@ static void sysfs_d_iput(struct dentry * dentry, struct inode * inode)
302 iput(inode); 302 iput(inode);
303} 303}
304 304
305static struct dentry_operations sysfs_dentry_ops = { 305static const struct dentry_operations sysfs_dentry_ops = {
306 .d_iput = sysfs_d_iput, 306 .d_iput = sysfs_d_iput,
307}; 307};
308 308
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c
index a1f1ef33e81c..33e047b59b8d 100644
--- a/fs/sysv/namei.c
+++ b/fs/sysv/namei.c
@@ -38,7 +38,7 @@ static int sysv_hash(struct dentry *dentry, struct qstr *qstr)
38 return 0; 38 return 0;
39} 39}
40 40
41struct dentry_operations sysv_dentry_operations = { 41const struct dentry_operations sysv_dentry_operations = {
42 .d_hash = sysv_hash, 42 .d_hash = sysv_hash,
43}; 43};
44 44
diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
index 38ebe3f85b3d..5784a318c883 100644
--- a/fs/sysv/sysv.h
+++ b/fs/sysv/sysv.h
@@ -170,7 +170,7 @@ extern const struct file_operations sysv_file_operations;
170extern const struct file_operations sysv_dir_operations; 170extern const struct file_operations sysv_dir_operations;
171extern const struct address_space_operations sysv_aops; 171extern const struct address_space_operations sysv_aops;
172extern const struct super_operations sysv_sops; 172extern const struct super_operations sysv_sops;
173extern struct dentry_operations sysv_dentry_operations; 173extern const struct dentry_operations sysv_dentry_operations;
174 174
175 175
176enum { 176enum {
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 1182b66a5491..c5c98355459a 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -2034,7 +2034,8 @@ static int ubifs_get_sb(struct file_system_type *fs_type, int flags,
2034 /* 'fill_super()' opens ubi again so we must close it here */ 2034 /* 'fill_super()' opens ubi again so we must close it here */
2035 ubi_close_volume(ubi); 2035 ubi_close_volume(ubi);
2036 2036
2037 return simple_set_mnt(mnt, sb); 2037 simple_set_mnt(mnt, sb);
2038 return 0;
2038 2039
2039out_deact: 2040out_deact:
2040 up_write(&sb->s_umount); 2041 up_write(&sb->s_umount);
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index 1b809bd494bd..2bb788a2acb1 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -206,7 +206,7 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
206 ((char *)bh->b_data)[(bit + i) >> 3]); 206 ((char *)bh->b_data)[(bit + i) >> 3]);
207 } else { 207 } else {
208 if (inode) 208 if (inode)
209 DQUOT_FREE_BLOCK(inode, 1); 209 vfs_dq_free_block(inode, 1);
210 udf_add_free_space(sbi, sbi->s_partition, 1); 210 udf_add_free_space(sbi, sbi->s_partition, 1);
211 } 211 }
212 } 212 }
@@ -261,11 +261,11 @@ static int udf_bitmap_prealloc_blocks(struct super_block *sb,
261 while (bit < (sb->s_blocksize << 3) && block_count > 0) { 261 while (bit < (sb->s_blocksize << 3) && block_count > 0) {
262 if (!udf_test_bit(bit, bh->b_data)) 262 if (!udf_test_bit(bit, bh->b_data))
263 goto out; 263 goto out;
264 else if (DQUOT_PREALLOC_BLOCK(inode, 1)) 264 else if (vfs_dq_prealloc_block(inode, 1))
265 goto out; 265 goto out;
266 else if (!udf_clear_bit(bit, bh->b_data)) { 266 else if (!udf_clear_bit(bit, bh->b_data)) {
267 udf_debug("bit already cleared for block %d\n", bit); 267 udf_debug("bit already cleared for block %d\n", bit);
268 DQUOT_FREE_BLOCK(inode, 1); 268 vfs_dq_free_block(inode, 1);
269 goto out; 269 goto out;
270 } 270 }
271 block_count--; 271 block_count--;
@@ -393,7 +393,7 @@ got_block:
393 /* 393 /*
394 * Check quota for allocation of this block. 394 * Check quota for allocation of this block.
395 */ 395 */
396 if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) { 396 if (inode && vfs_dq_alloc_block(inode, 1)) {
397 mutex_unlock(&sbi->s_alloc_mutex); 397 mutex_unlock(&sbi->s_alloc_mutex);
398 *err = -EDQUOT; 398 *err = -EDQUOT;
399 return 0; 399 return 0;
@@ -452,7 +452,7 @@ static void udf_table_free_blocks(struct super_block *sb,
452 /* We do this up front - There are some error conditions that 452 /* We do this up front - There are some error conditions that
453 could occure, but.. oh well */ 453 could occure, but.. oh well */
454 if (inode) 454 if (inode)
455 DQUOT_FREE_BLOCK(inode, count); 455 vfs_dq_free_block(inode, count);
456 if (udf_add_free_space(sbi, sbi->s_partition, count)) 456 if (udf_add_free_space(sbi, sbi->s_partition, count))
457 mark_buffer_dirty(sbi->s_lvid_bh); 457 mark_buffer_dirty(sbi->s_lvid_bh);
458 458
@@ -700,7 +700,7 @@ static int udf_table_prealloc_blocks(struct super_block *sb,
700 epos.offset -= adsize; 700 epos.offset -= adsize;
701 701
702 alloc_count = (elen >> sb->s_blocksize_bits); 702 alloc_count = (elen >> sb->s_blocksize_bits);
703 if (inode && DQUOT_PREALLOC_BLOCK(inode, 703 if (inode && vfs_dq_prealloc_block(inode,
704 alloc_count > block_count ? block_count : alloc_count)) 704 alloc_count > block_count ? block_count : alloc_count))
705 alloc_count = 0; 705 alloc_count = 0;
706 else if (alloc_count > block_count) { 706 else if (alloc_count > block_count) {
@@ -806,7 +806,7 @@ static int udf_table_new_block(struct super_block *sb,
806 goal_eloc.logicalBlockNum++; 806 goal_eloc.logicalBlockNum++;
807 goal_elen -= sb->s_blocksize; 807 goal_elen -= sb->s_blocksize;
808 808
809 if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) { 809 if (inode && vfs_dq_alloc_block(inode, 1)) {
810 brelse(goal_epos.bh); 810 brelse(goal_epos.bh);
811 mutex_unlock(&sbi->s_alloc_mutex); 811 mutex_unlock(&sbi->s_alloc_mutex);
812 *err = -EDQUOT; 812 *err = -EDQUOT;
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index 31fc84297ddb..47dbe5613f90 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -36,8 +36,8 @@ void udf_free_inode(struct inode *inode)
36 * Note: we must free any quota before locking the superblock, 36 * Note: we must free any quota before locking the superblock,
37 * as writing the quota to disk may need the lock as well. 37 * as writing the quota to disk may need the lock as well.
38 */ 38 */
39 DQUOT_FREE_INODE(inode); 39 vfs_dq_free_inode(inode);
40 DQUOT_DROP(inode); 40 vfs_dq_drop(inode);
41 41
42 clear_inode(inode); 42 clear_inode(inode);
43 43
@@ -154,8 +154,8 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
154 insert_inode_hash(inode); 154 insert_inode_hash(inode);
155 mark_inode_dirty(inode); 155 mark_inode_dirty(inode);
156 156
157 if (DQUOT_ALLOC_INODE(inode)) { 157 if (vfs_dq_alloc_inode(inode)) {
158 DQUOT_DROP(inode); 158 vfs_dq_drop(inode);
159 inode->i_flags |= S_NOQUOTA; 159 inode->i_flags |= S_NOQUOTA;
160 inode->i_nlink = 0; 160 inode->i_nlink = 0;
161 iput(inode); 161 iput(inode);
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index 0d9ada173739..54c16ec95dff 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -85,7 +85,7 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
85 "bit already cleared for fragment %u", i); 85 "bit already cleared for fragment %u", i);
86 } 86 }
87 87
88 DQUOT_FREE_BLOCK (inode, count); 88 vfs_dq_free_block(inode, count);
89 89
90 90
91 fs32_add(sb, &ucg->cg_cs.cs_nffree, count); 91 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
@@ -195,7 +195,7 @@ do_more:
195 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); 195 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
196 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) 196 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
197 ufs_clusteracct (sb, ucpi, blkno, 1); 197 ufs_clusteracct (sb, ucpi, blkno, 1);
198 DQUOT_FREE_BLOCK(inode, uspi->s_fpb); 198 vfs_dq_free_block(inode, uspi->s_fpb);
199 199
200 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1); 200 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
201 uspi->cs_total.cs_nbfree++; 201 uspi->cs_total.cs_nbfree++;
@@ -556,7 +556,7 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
556 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1); 556 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
557 for (i = oldcount; i < newcount; i++) 557 for (i = oldcount; i < newcount; i++)
558 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i); 558 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
559 if(DQUOT_ALLOC_BLOCK(inode, count)) { 559 if (vfs_dq_alloc_block(inode, count)) {
560 *err = -EDQUOT; 560 *err = -EDQUOT;
561 return 0; 561 return 0;
562 } 562 }
@@ -664,7 +664,7 @@ cg_found:
664 for (i = count; i < uspi->s_fpb; i++) 664 for (i = count; i < uspi->s_fpb; i++)
665 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i); 665 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
666 i = uspi->s_fpb - count; 666 i = uspi->s_fpb - count;
667 DQUOT_FREE_BLOCK(inode, i); 667 vfs_dq_free_block(inode, i);
668 668
669 fs32_add(sb, &ucg->cg_cs.cs_nffree, i); 669 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
670 uspi->cs_total.cs_nffree += i; 670 uspi->cs_total.cs_nffree += i;
@@ -676,7 +676,7 @@ cg_found:
676 result = ufs_bitmap_search (sb, ucpi, goal, allocsize); 676 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
677 if (result == INVBLOCK) 677 if (result == INVBLOCK)
678 return 0; 678 return 0;
679 if(DQUOT_ALLOC_BLOCK(inode, count)) { 679 if (vfs_dq_alloc_block(inode, count)) {
680 *err = -EDQUOT; 680 *err = -EDQUOT;
681 return 0; 681 return 0;
682 } 682 }
@@ -747,7 +747,7 @@ gotit:
747 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); 747 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
748 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) 748 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
749 ufs_clusteracct (sb, ucpi, blkno, -1); 749 ufs_clusteracct (sb, ucpi, blkno, -1);
750 if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) { 750 if (vfs_dq_alloc_block(inode, uspi->s_fpb)) {
751 *err = -EDQUOT; 751 *err = -EDQUOT;
752 return INVBLOCK; 752 return INVBLOCK;
753 } 753 }
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
index 6f5dcf006096..3527c00fef0d 100644
--- a/fs/ufs/ialloc.c
+++ b/fs/ufs/ialloc.c
@@ -95,8 +95,8 @@ void ufs_free_inode (struct inode * inode)
95 95
96 is_directory = S_ISDIR(inode->i_mode); 96 is_directory = S_ISDIR(inode->i_mode);
97 97
98 DQUOT_FREE_INODE(inode); 98 vfs_dq_free_inode(inode);
99 DQUOT_DROP(inode); 99 vfs_dq_drop(inode);
100 100
101 clear_inode (inode); 101 clear_inode (inode);
102 102
@@ -355,8 +355,8 @@ cg_found:
355 355
356 unlock_super (sb); 356 unlock_super (sb);
357 357
358 if (DQUOT_ALLOC_INODE(inode)) { 358 if (vfs_dq_alloc_inode(inode)) {
359 DQUOT_DROP(inode); 359 vfs_dq_drop(inode);
360 err = -EDQUOT; 360 err = -EDQUOT;
361 goto fail_without_unlock; 361 goto fail_without_unlock;
362 } 362 }
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 39f877898565..3d2512c21f05 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -622,7 +622,6 @@ static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
622 struct ufs_inode_info *ufsi = UFS_I(inode); 622 struct ufs_inode_info *ufsi = UFS_I(inode);
623 struct super_block *sb = inode->i_sb; 623 struct super_block *sb = inode->i_sb;
624 mode_t mode; 624 mode_t mode;
625 unsigned i;
626 625
627 /* 626 /*
628 * Copy data to the in-core inode. 627 * Copy data to the in-core inode.
@@ -655,11 +654,12 @@ static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
655 654
656 655
657 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { 656 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
658 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++) 657 memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr,
659 ufsi->i_u1.i_data[i] = ufs_inode->ui_u2.ui_addr.ui_db[i]; 658 sizeof(ufs_inode->ui_u2.ui_addr));
660 } else { 659 } else {
661 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++) 660 memcpy(ufsi->i_u1.i_symlink, ufs_inode->ui_u2.ui_symlink,
662 ufsi->i_u1.i_symlink[i] = ufs_inode->ui_u2.ui_symlink[i]; 661 sizeof(ufs_inode->ui_u2.ui_symlink) - 1);
662 ufsi->i_u1.i_symlink[sizeof(ufs_inode->ui_u2.ui_symlink) - 1] = 0;
663 } 663 }
664 return 0; 664 return 0;
665} 665}
@@ -669,7 +669,6 @@ static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
669 struct ufs_inode_info *ufsi = UFS_I(inode); 669 struct ufs_inode_info *ufsi = UFS_I(inode);
670 struct super_block *sb = inode->i_sb; 670 struct super_block *sb = inode->i_sb;
671 mode_t mode; 671 mode_t mode;
672 unsigned i;
673 672
674 UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino); 673 UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino);
675 /* 674 /*
@@ -704,12 +703,12 @@ static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
704 */ 703 */
705 704
706 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { 705 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
707 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++) 706 memcpy(ufsi->i_u1.u2_i_data, &ufs2_inode->ui_u2.ui_addr,
708 ufsi->i_u1.u2_i_data[i] = 707 sizeof(ufs2_inode->ui_u2.ui_addr));
709 ufs2_inode->ui_u2.ui_addr.ui_db[i];
710 } else { 708 } else {
711 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++) 709 memcpy(ufsi->i_u1.i_symlink, ufs2_inode->ui_u2.ui_symlink,
712 ufsi->i_u1.i_symlink[i] = ufs2_inode->ui_u2.ui_symlink[i]; 710 sizeof(ufs2_inode->ui_u2.ui_symlink) - 1);
711 ufsi->i_u1.i_symlink[sizeof(ufs2_inode->ui_u2.ui_symlink) - 1] = 0;
713 } 712 }
714 return 0; 713 return 0;
715} 714}
@@ -781,7 +780,6 @@ static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
781{ 780{
782 struct super_block *sb = inode->i_sb; 781 struct super_block *sb = inode->i_sb;
783 struct ufs_inode_info *ufsi = UFS_I(inode); 782 struct ufs_inode_info *ufsi = UFS_I(inode);
784 unsigned i;
785 783
786 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode); 784 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
787 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink); 785 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
@@ -809,12 +807,12 @@ static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
809 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */ 807 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
810 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0]; 808 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0];
811 } else if (inode->i_blocks) { 809 } else if (inode->i_blocks) {
812 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++) 810 memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.i_data,
813 ufs_inode->ui_u2.ui_addr.ui_db[i] = ufsi->i_u1.i_data[i]; 811 sizeof(ufs_inode->ui_u2.ui_addr));
814 } 812 }
815 else { 813 else {
816 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++) 814 memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
817 ufs_inode->ui_u2.ui_symlink[i] = ufsi->i_u1.i_symlink[i]; 815 sizeof(ufs_inode->ui_u2.ui_symlink));
818 } 816 }
819 817
820 if (!inode->i_nlink) 818 if (!inode->i_nlink)
@@ -825,7 +823,6 @@ static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode)
825{ 823{
826 struct super_block *sb = inode->i_sb; 824 struct super_block *sb = inode->i_sb;
827 struct ufs_inode_info *ufsi = UFS_I(inode); 825 struct ufs_inode_info *ufsi = UFS_I(inode);
828 unsigned i;
829 826
830 UFSD("ENTER\n"); 827 UFSD("ENTER\n");
831 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode); 828 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
@@ -850,11 +847,11 @@ static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode)
850 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */ 847 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
851 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0]; 848 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0];
852 } else if (inode->i_blocks) { 849 } else if (inode->i_blocks) {
853 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++) 850 memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.u2_i_data,
854 ufs_inode->ui_u2.ui_addr.ui_db[i] = ufsi->i_u1.u2_i_data[i]; 851 sizeof(ufs_inode->ui_u2.ui_addr));
855 } else { 852 } else {
856 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++) 853 memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
857 ufs_inode->ui_u2.ui_symlink[i] = ufsi->i_u1.i_symlink[i]; 854 sizeof(ufs_inode->ui_u2.ui_symlink));
858 } 855 }
859 856
860 if (!inode->i_nlink) 857 if (!inode->i_nlink)
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index e3a9b1fac75a..23119fe7ad62 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -147,7 +147,7 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry,
147 } else { 147 } else {
148 /* fast symlink */ 148 /* fast symlink */
149 inode->i_op = &ufs_fast_symlink_inode_operations; 149 inode->i_op = &ufs_fast_symlink_inode_operations;
150 memcpy((char*)&UFS_I(inode)->i_u1.i_data,symname,l); 150 memcpy(UFS_I(inode)->i_u1.i_symlink, symname, l);
151 inode->i_size = l-1; 151 inode->i_size = l-1;
152 } 152 }
153 mark_inode_dirty(inode); 153 mark_inode_dirty(inode);
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 261a1c2f22dd..e1c1fc5ee239 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -636,6 +636,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
636 unsigned block_size, super_block_size; 636 unsigned block_size, super_block_size;
637 unsigned flags; 637 unsigned flags;
638 unsigned super_block_offset; 638 unsigned super_block_offset;
639 unsigned maxsymlen;
639 int ret = -EINVAL; 640 int ret = -EINVAL;
640 641
641 uspi = NULL; 642 uspi = NULL;
@@ -1069,6 +1070,16 @@ magic_found:
1069 uspi->s_maxsymlinklen = 1070 uspi->s_maxsymlinklen =
1070 fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_maxsymlinklen); 1071 fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_maxsymlinklen);
1071 1072
1073 if (uspi->fs_magic == UFS2_MAGIC)
1074 maxsymlen = 2 * 4 * (UFS_NDADDR + UFS_NINDIR);
1075 else
1076 maxsymlen = 4 * (UFS_NDADDR + UFS_NINDIR);
1077 if (uspi->s_maxsymlinklen > maxsymlen) {
1078 ufs_warning(sb, __func__, "ufs_read_super: excessive maximum "
1079 "fast symlink size (%u)\n", uspi->s_maxsymlinklen);
1080 uspi->s_maxsymlinklen = maxsymlen;
1081 }
1082
1072 inode = ufs_iget(sb, UFS_ROOTINO); 1083 inode = ufs_iget(sb, UFS_ROOTINO);
1073 if (IS_ERR(inode)) { 1084 if (IS_ERR(inode)) {
1074 ret = PTR_ERR(inode); 1085 ret = PTR_ERR(inode);
diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h
index 11c035168ea6..69b3427d7885 100644
--- a/fs/ufs/ufs.h
+++ b/fs/ufs/ufs.h
@@ -23,7 +23,7 @@ struct ufs_sb_info {
23struct ufs_inode_info { 23struct ufs_inode_info {
24 union { 24 union {
25 __fs32 i_data[15]; 25 __fs32 i_data[15];
26 __u8 i_symlink[4*15]; 26 __u8 i_symlink[2 * 4 * 15];
27 __fs64 u2_i_data[15]; 27 __fs64 u2_i_data[15];
28 } i_u1; 28 } i_u1;
29 __u32 i_flags; 29 __u32 i_flags;
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index e5f4ae989abf..c19a93c3be85 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -758,6 +758,8 @@ struct drm_driver {
758 758
759 int (*proc_init)(struct drm_minor *minor); 759 int (*proc_init)(struct drm_minor *minor);
760 void (*proc_cleanup)(struct drm_minor *minor); 760 void (*proc_cleanup)(struct drm_minor *minor);
761 int (*debugfs_init)(struct drm_minor *minor);
762 void (*debugfs_cleanup)(struct drm_minor *minor);
761 763
762 /** 764 /**
763 * Driver-specific constructor for drm_gem_objects, to set up 765 * Driver-specific constructor for drm_gem_objects, to set up
@@ -793,6 +795,48 @@ struct drm_driver {
793#define DRM_MINOR_CONTROL 2 795#define DRM_MINOR_CONTROL 2
794#define DRM_MINOR_RENDER 3 796#define DRM_MINOR_RENDER 3
795 797
798
799/**
800 * debugfs node list. This structure represents a debugfs file to
801 * be created by the drm core
802 */
803struct drm_debugfs_list {
804 const char *name; /** file name */
805 int (*show)(struct seq_file*, void*); /** show callback */
806 u32 driver_features; /**< Required driver features for this entry */
807};
808
809/**
810 * debugfs node structure. This structure represents a debugfs file.
811 */
812struct drm_debugfs_node {
813 struct list_head list;
814 struct drm_minor *minor;
815 struct drm_debugfs_list *debugfs_ent;
816 struct dentry *dent;
817};
818
819/**
820 * Info file list entry. This structure represents a debugfs or proc file to
821 * be created by the drm core
822 */
823struct drm_info_list {
824 const char *name; /** file name */
825 int (*show)(struct seq_file*, void*); /** show callback */
826 u32 driver_features; /**< Required driver features for this entry */
827 void *data;
828};
829
830/**
831 * debugfs node structure. This structure represents a debugfs file.
832 */
833struct drm_info_node {
834 struct list_head list;
835 struct drm_minor *minor;
836 struct drm_info_list *info_ent;
837 struct dentry *dent;
838};
839
796/** 840/**
797 * DRM minor structure. This structure represents a drm minor number. 841 * DRM minor structure. This structure represents a drm minor number.
798 */ 842 */
@@ -802,7 +846,12 @@ struct drm_minor {
802 dev_t device; /**< Device number for mknod */ 846 dev_t device; /**< Device number for mknod */
803 struct device kdev; /**< Linux device */ 847 struct device kdev; /**< Linux device */
804 struct drm_device *dev; 848 struct drm_device *dev;
805 struct proc_dir_entry *dev_root; /**< proc directory entry */ 849
850 struct proc_dir_entry *proc_root; /**< proc directory entry */
851 struct drm_info_node proc_nodes;
852 struct dentry *debugfs_root;
853 struct drm_info_node debugfs_nodes;
854
806 struct drm_master *master; /* currently active master for this node */ 855 struct drm_master *master; /* currently active master for this node */
807 struct list_head master_list; 856 struct list_head master_list;
808 struct drm_mode_group mode_group; 857 struct drm_mode_group mode_group;
@@ -1258,6 +1307,7 @@ extern unsigned int drm_debug;
1258 1307
1259extern struct class *drm_class; 1308extern struct class *drm_class;
1260extern struct proc_dir_entry *drm_proc_root; 1309extern struct proc_dir_entry *drm_proc_root;
1310extern struct dentry *drm_debugfs_root;
1261 1311
1262extern struct idr drm_minors_idr; 1312extern struct idr drm_minors_idr;
1263 1313
@@ -1268,6 +1318,31 @@ extern int drm_proc_init(struct drm_minor *minor, int minor_id,
1268 struct proc_dir_entry *root); 1318 struct proc_dir_entry *root);
1269extern int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root); 1319extern int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root);
1270 1320
1321 /* Debugfs support */
1322#if defined(CONFIG_DEBUG_FS)
1323extern int drm_debugfs_init(struct drm_minor *minor, int minor_id,
1324 struct dentry *root);
1325extern int drm_debugfs_create_files(struct drm_info_list *files, int count,
1326 struct dentry *root, struct drm_minor *minor);
1327extern int drm_debugfs_remove_files(struct drm_info_list *files, int count,
1328 struct drm_minor *minor);
1329extern int drm_debugfs_cleanup(struct drm_minor *minor);
1330#endif
1331
1332 /* Info file support */
1333extern int drm_name_info(struct seq_file *m, void *data);
1334extern int drm_vm_info(struct seq_file *m, void *data);
1335extern int drm_queues_info(struct seq_file *m, void *data);
1336extern int drm_bufs_info(struct seq_file *m, void *data);
1337extern int drm_vblank_info(struct seq_file *m, void *data);
1338extern int drm_clients_info(struct seq_file *m, void* data);
1339extern int drm_gem_name_info(struct seq_file *m, void *data);
1340extern int drm_gem_object_info(struct seq_file *m, void* data);
1341
1342#if DRM_DEBUG_CODE
1343extern int drm_vma_info(struct seq_file *m, void *data);
1344#endif
1345
1271 /* Scatter Gather Support (drm_scatter.h) */ 1346 /* Scatter Gather Support (drm_scatter.h) */
1272extern void drm_sg_cleanup(struct drm_sg_mem * entry); 1347extern void drm_sg_cleanup(struct drm_sg_mem * entry);
1273extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data, 1348extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 5165f240aa68..76c4c8243038 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -418,4 +418,6 @@
418 {0x8086, 0x2e02, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 418 {0x8086, 0x2e02, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
419 {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 419 {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
420 {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 420 {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
421 {0x8086, 0xa001, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
422 {0x8086, 0xa011, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
421 {0, 0, 0} 423 {0, 0, 0}
diff --git a/include/linux/bsg.h b/include/linux/bsg.h
index 3f0c64ace424..ecb4730d0868 100644
--- a/include/linux/bsg.h
+++ b/include/linux/bsg.h
@@ -1,6 +1,8 @@
1#ifndef BSG_H 1#ifndef BSG_H
2#define BSG_H 2#define BSG_H
3 3
4#include <linux/types.h>
5
4#define BSG_PROTOCOL_SCSI 0 6#define BSG_PROTOCOL_SCSI 0
5 7
6#define BSG_SUB_PROTOCOL_SCSI_CMD 0 8#define BSG_SUB_PROTOCOL_SCSI_CMD 0
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index bd7ac793be19..f19fd9045ea0 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -165,15 +165,8 @@ int sync_mapping_buffers(struct address_space *mapping);
165void unmap_underlying_metadata(struct block_device *bdev, sector_t block); 165void unmap_underlying_metadata(struct block_device *bdev, sector_t block);
166 166
167void mark_buffer_async_write(struct buffer_head *bh); 167void mark_buffer_async_write(struct buffer_head *bh);
168void invalidate_bdev(struct block_device *);
169int sync_blockdev(struct block_device *bdev);
170void __wait_on_buffer(struct buffer_head *); 168void __wait_on_buffer(struct buffer_head *);
171wait_queue_head_t *bh_waitq_head(struct buffer_head *bh); 169wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
172int fsync_bdev(struct block_device *);
173struct super_block *freeze_bdev(struct block_device *);
174int thaw_bdev(struct block_device *, struct super_block *);
175int fsync_super(struct super_block *);
176int fsync_no_super(struct block_device *);
177struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block, 170struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
178 unsigned size); 171 unsigned size);
179struct buffer_head *__getblk(struct block_device *bdev, sector_t block, 172struct buffer_head *__getblk(struct block_device *bdev, sector_t block,
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 3fd2194ff573..b880864672de 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -125,6 +125,13 @@ struct compat_dirent {
125 char d_name[256]; 125 char d_name[256];
126}; 126};
127 127
128struct compat_ustat {
129 compat_daddr_t f_tfree;
130 compat_ino_t f_tinode;
131 char f_fname[6];
132 char f_fpack[6];
133};
134
128typedef union compat_sigval { 135typedef union compat_sigval {
129 compat_int_t sival_int; 136 compat_int_t sival_int;
130 compat_uptr_t sival_ptr; 137 compat_uptr_t sival_ptr;
@@ -178,6 +185,7 @@ long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
178 unsigned nsems, const struct compat_timespec __user *timeout); 185 unsigned nsems, const struct compat_timespec __user *timeout);
179asmlinkage long compat_sys_keyctl(u32 option, 186asmlinkage long compat_sys_keyctl(u32 option,
180 u32 arg2, u32 arg3, u32 arg4, u32 arg5); 187 u32 arg2, u32 arg3, u32 arg4, u32 arg5);
188asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
181 189
182asmlinkage ssize_t compat_sys_readv(unsigned long fd, 190asmlinkage ssize_t compat_sys_readv(unsigned long fd,
183 const struct compat_iovec __user *vec, unsigned long vlen); 191 const struct compat_iovec __user *vec, unsigned long vlen);
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index c66d22487bf8..15156364d196 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -112,7 +112,7 @@ struct dentry {
112 struct list_head d_subdirs; /* our children */ 112 struct list_head d_subdirs; /* our children */
113 struct list_head d_alias; /* inode alias list */ 113 struct list_head d_alias; /* inode alias list */
114 unsigned long d_time; /* used by d_revalidate */ 114 unsigned long d_time; /* used by d_revalidate */
115 struct dentry_operations *d_op; 115 const struct dentry_operations *d_op;
116 struct super_block *d_sb; /* The root of the dentry tree */ 116 struct super_block *d_sb; /* The root of the dentry tree */
117 void *d_fsdata; /* fs-specific data */ 117 void *d_fsdata; /* fs-specific data */
118 118
diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h
index 4d078e99c017..c6b3ca3af6df 100644
--- a/include/linux/firewire-cdev.h
+++ b/include/linux/firewire-cdev.h
@@ -25,10 +25,12 @@
25#include <linux/types.h> 25#include <linux/types.h>
26#include <linux/firewire-constants.h> 26#include <linux/firewire-constants.h>
27 27
28#define FW_CDEV_EVENT_BUS_RESET 0x00 28#define FW_CDEV_EVENT_BUS_RESET 0x00
29#define FW_CDEV_EVENT_RESPONSE 0x01 29#define FW_CDEV_EVENT_RESPONSE 0x01
30#define FW_CDEV_EVENT_REQUEST 0x02 30#define FW_CDEV_EVENT_REQUEST 0x02
31#define FW_CDEV_EVENT_ISO_INTERRUPT 0x03 31#define FW_CDEV_EVENT_ISO_INTERRUPT 0x03
32#define FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED 0x04
33#define FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED 0x05
32 34
33/** 35/**
34 * struct fw_cdev_event_common - Common part of all fw_cdev_event_ types 36 * struct fw_cdev_event_common - Common part of all fw_cdev_event_ types
@@ -136,7 +138,24 @@ struct fw_cdev_event_request {
136 * This event is sent when the controller has completed an &fw_cdev_iso_packet 138 * This event is sent when the controller has completed an &fw_cdev_iso_packet
137 * with the %FW_CDEV_ISO_INTERRUPT bit set. In the receive case, the headers 139 * with the %FW_CDEV_ISO_INTERRUPT bit set. In the receive case, the headers
138 * stripped of all packets up until and including the interrupt packet are 140 * stripped of all packets up until and including the interrupt packet are
139 * returned in the @header field. 141 * returned in the @header field. The amount of header data per packet is as
142 * specified at iso context creation by &fw_cdev_create_iso_context.header_size.
143 *
144 * In version 1 of this ABI, header data consisted of the 1394 isochronous
145 * packet header, followed by quadlets from the packet payload if
146 * &fw_cdev_create_iso_context.header_size > 4.
147 *
148 * In version 2 of this ABI, header data consist of the 1394 isochronous
149 * packet header, followed by a timestamp quadlet if
150 * &fw_cdev_create_iso_context.header_size > 4, followed by quadlets from the
151 * packet payload if &fw_cdev_create_iso_context.header_size > 8.
152 *
153 * Behaviour of ver. 1 of this ABI is no longer available since ABI ver. 2.
154 *
155 * Format of 1394 iso packet header: 16 bits len, 2 bits tag, 6 bits channel,
156 * 4 bits tcode, 4 bits sy, in big endian byte order. Format of timestamp:
157 * 16 bits invalid, 3 bits cycleSeconds, 13 bits cycleCount, in big endian byte
158 * order.
140 */ 159 */
141struct fw_cdev_event_iso_interrupt { 160struct fw_cdev_event_iso_interrupt {
142 __u64 closure; 161 __u64 closure;
@@ -147,12 +166,44 @@ struct fw_cdev_event_iso_interrupt {
147}; 166};
148 167
149/** 168/**
169 * struct fw_cdev_event_iso_resource - Iso resources were allocated or freed
170 * @closure: See &fw_cdev_event_common;
171 * set by %FW_CDEV_IOC_(DE)ALLOCATE_ISO_RESOURCE(_ONCE) ioctl
172 * @type: %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or
173 * %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED
174 * @handle: Reference by which an allocated resource can be deallocated
175 * @channel: Isochronous channel which was (de)allocated, if any
176 * @bandwidth: Bandwidth allocation units which were (de)allocated, if any
177 *
178 * An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED event is sent after an isochronous
179 * resource was allocated at the IRM. The client has to check @channel and
180 * @bandwidth for whether the allocation actually succeeded.
181 *
182 * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event is sent after an isochronous
183 * resource was deallocated at the IRM. It is also sent when automatic
184 * reallocation after a bus reset failed.
185 *
186 * @channel is <0 if no channel was (de)allocated or if reallocation failed.
187 * @bandwidth is 0 if no bandwidth was (de)allocated or if reallocation failed.
188 */
189struct fw_cdev_event_iso_resource {
190 __u64 closure;
191 __u32 type;
192 __u32 handle;
193 __s32 channel;
194 __s32 bandwidth;
195};
196
197/**
150 * union fw_cdev_event - Convenience union of fw_cdev_event_ types 198 * union fw_cdev_event - Convenience union of fw_cdev_event_ types
151 * @common: Valid for all types 199 * @common: Valid for all types
152 * @bus_reset: Valid if @common.type == %FW_CDEV_EVENT_BUS_RESET 200 * @bus_reset: Valid if @common.type == %FW_CDEV_EVENT_BUS_RESET
153 * @response: Valid if @common.type == %FW_CDEV_EVENT_RESPONSE 201 * @response: Valid if @common.type == %FW_CDEV_EVENT_RESPONSE
154 * @request: Valid if @common.type == %FW_CDEV_EVENT_REQUEST 202 * @request: Valid if @common.type == %FW_CDEV_EVENT_REQUEST
155 * @iso_interrupt: Valid if @common.type == %FW_CDEV_EVENT_ISO_INTERRUPT 203 * @iso_interrupt: Valid if @common.type == %FW_CDEV_EVENT_ISO_INTERRUPT
204 * @iso_resource: Valid if @common.type ==
205 * %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or
206 * %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED
156 * 207 *
157 * Convenience union for userspace use. Events could be read(2) into an 208 * Convenience union for userspace use. Events could be read(2) into an
158 * appropriately aligned char buffer and then cast to this union for further 209 * appropriately aligned char buffer and then cast to this union for further
@@ -163,33 +214,47 @@ struct fw_cdev_event_iso_interrupt {
163 * not fit will be discarded so that the next read(2) will return a new event. 214 * not fit will be discarded so that the next read(2) will return a new event.
164 */ 215 */
165union fw_cdev_event { 216union fw_cdev_event {
166 struct fw_cdev_event_common common; 217 struct fw_cdev_event_common common;
167 struct fw_cdev_event_bus_reset bus_reset; 218 struct fw_cdev_event_bus_reset bus_reset;
168 struct fw_cdev_event_response response; 219 struct fw_cdev_event_response response;
169 struct fw_cdev_event_request request; 220 struct fw_cdev_event_request request;
170 struct fw_cdev_event_iso_interrupt iso_interrupt; 221 struct fw_cdev_event_iso_interrupt iso_interrupt;
222 struct fw_cdev_event_iso_resource iso_resource;
171}; 223};
172 224
173#define FW_CDEV_IOC_GET_INFO _IOWR('#', 0x00, struct fw_cdev_get_info) 225/* available since kernel version 2.6.22 */
174#define FW_CDEV_IOC_SEND_REQUEST _IOW('#', 0x01, struct fw_cdev_send_request) 226#define FW_CDEV_IOC_GET_INFO _IOWR('#', 0x00, struct fw_cdev_get_info)
175#define FW_CDEV_IOC_ALLOCATE _IOWR('#', 0x02, struct fw_cdev_allocate) 227#define FW_CDEV_IOC_SEND_REQUEST _IOW('#', 0x01, struct fw_cdev_send_request)
176#define FW_CDEV_IOC_DEALLOCATE _IOW('#', 0x03, struct fw_cdev_deallocate) 228#define FW_CDEV_IOC_ALLOCATE _IOWR('#', 0x02, struct fw_cdev_allocate)
177#define FW_CDEV_IOC_SEND_RESPONSE _IOW('#', 0x04, struct fw_cdev_send_response) 229#define FW_CDEV_IOC_DEALLOCATE _IOW('#', 0x03, struct fw_cdev_deallocate)
178#define FW_CDEV_IOC_INITIATE_BUS_RESET _IOW('#', 0x05, struct fw_cdev_initiate_bus_reset) 230#define FW_CDEV_IOC_SEND_RESPONSE _IOW('#', 0x04, struct fw_cdev_send_response)
179#define FW_CDEV_IOC_ADD_DESCRIPTOR _IOWR('#', 0x06, struct fw_cdev_add_descriptor) 231#define FW_CDEV_IOC_INITIATE_BUS_RESET _IOW('#', 0x05, struct fw_cdev_initiate_bus_reset)
180#define FW_CDEV_IOC_REMOVE_DESCRIPTOR _IOW('#', 0x07, struct fw_cdev_remove_descriptor) 232#define FW_CDEV_IOC_ADD_DESCRIPTOR _IOWR('#', 0x06, struct fw_cdev_add_descriptor)
233#define FW_CDEV_IOC_REMOVE_DESCRIPTOR _IOW('#', 0x07, struct fw_cdev_remove_descriptor)
234#define FW_CDEV_IOC_CREATE_ISO_CONTEXT _IOWR('#', 0x08, struct fw_cdev_create_iso_context)
235#define FW_CDEV_IOC_QUEUE_ISO _IOWR('#', 0x09, struct fw_cdev_queue_iso)
236#define FW_CDEV_IOC_START_ISO _IOW('#', 0x0a, struct fw_cdev_start_iso)
237#define FW_CDEV_IOC_STOP_ISO _IOW('#', 0x0b, struct fw_cdev_stop_iso)
181 238
182#define FW_CDEV_IOC_CREATE_ISO_CONTEXT _IOWR('#', 0x08, struct fw_cdev_create_iso_context) 239/* available since kernel version 2.6.24 */
183#define FW_CDEV_IOC_QUEUE_ISO _IOWR('#', 0x09, struct fw_cdev_queue_iso) 240#define FW_CDEV_IOC_GET_CYCLE_TIMER _IOR('#', 0x0c, struct fw_cdev_get_cycle_timer)
184#define FW_CDEV_IOC_START_ISO _IOW('#', 0x0a, struct fw_cdev_start_iso)
185#define FW_CDEV_IOC_STOP_ISO _IOW('#', 0x0b, struct fw_cdev_stop_iso)
186#define FW_CDEV_IOC_GET_CYCLE_TIMER _IOR('#', 0x0c, struct fw_cdev_get_cycle_timer)
187 241
188/* FW_CDEV_VERSION History 242/* available since kernel version 2.6.30 */
189 * 243#define FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE _IOWR('#', 0x0d, struct fw_cdev_allocate_iso_resource)
190 * 1 Feb 18, 2007: Initial version. 244#define FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE _IOW('#', 0x0e, struct fw_cdev_deallocate)
245#define FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE _IOW('#', 0x0f, struct fw_cdev_allocate_iso_resource)
246#define FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE_ONCE _IOW('#', 0x10, struct fw_cdev_allocate_iso_resource)
247#define FW_CDEV_IOC_GET_SPEED _IO('#', 0x11) /* returns speed code */
248#define FW_CDEV_IOC_SEND_BROADCAST_REQUEST _IOW('#', 0x12, struct fw_cdev_send_request)
249#define FW_CDEV_IOC_SEND_STREAM_PACKET _IOW('#', 0x13, struct fw_cdev_send_stream_packet)
250
251/*
252 * FW_CDEV_VERSION History
253 * 1 (2.6.22) - initial version
254 * 2 (2.6.30) - changed &fw_cdev_event_iso_interrupt.header if
255 * &fw_cdev_create_iso_context.header_size is 8 or more
191 */ 256 */
192#define FW_CDEV_VERSION 1 257#define FW_CDEV_VERSION 2
193 258
194/** 259/**
195 * struct fw_cdev_get_info - General purpose information ioctl 260 * struct fw_cdev_get_info - General purpose information ioctl
@@ -201,7 +266,7 @@ union fw_cdev_event {
201 * case, @rom_length is updated with the actual length of the 266 * case, @rom_length is updated with the actual length of the
202 * configuration ROM. 267 * configuration ROM.
203 * @rom: If non-zero, address of a buffer to be filled by a copy of the 268 * @rom: If non-zero, address of a buffer to be filled by a copy of the
204 * local node's configuration ROM 269 * device's configuration ROM
205 * @bus_reset: If non-zero, address of a buffer to be filled by a 270 * @bus_reset: If non-zero, address of a buffer to be filled by a
206 * &struct fw_cdev_event_bus_reset with the current state 271 * &struct fw_cdev_event_bus_reset with the current state
207 * of the bus. This does not cause a bus reset to happen. 272 * of the bus. This does not cause a bus reset to happen.
@@ -229,7 +294,7 @@ struct fw_cdev_get_info {
229 * Send a request to the device. This ioctl implements all outgoing requests. 294 * Send a request to the device. This ioctl implements all outgoing requests.
230 * Both quadlet and block request specify the payload as a pointer to the data 295 * Both quadlet and block request specify the payload as a pointer to the data
231 * in the @data field. Once the transaction completes, the kernel writes an 296 * in the @data field. Once the transaction completes, the kernel writes an
232 * &fw_cdev_event_request event back. The @closure field is passed back to 297 * &fw_cdev_event_response event back. The @closure field is passed back to
233 * user space in the response event. 298 * user space in the response event.
234 */ 299 */
235struct fw_cdev_send_request { 300struct fw_cdev_send_request {
@@ -284,9 +349,9 @@ struct fw_cdev_allocate {
284}; 349};
285 350
286/** 351/**
287 * struct fw_cdev_deallocate - Free an address range allocation 352 * struct fw_cdev_deallocate - Free a CSR address range or isochronous resource
288 * @handle: Handle to the address range, as returned by the kernel when the 353 * @handle: Handle to the address range or iso resource, as returned by the
289 * range was allocated 354 * kernel when the range or resource was allocated
290 */ 355 */
291struct fw_cdev_deallocate { 356struct fw_cdev_deallocate {
292 __u32 handle; 357 __u32 handle;
@@ -329,6 +394,9 @@ struct fw_cdev_initiate_bus_reset {
329 * If successful, the kernel adds the descriptor and writes back a handle to the 394 * If successful, the kernel adds the descriptor and writes back a handle to the
330 * kernel-side object to be used for later removal of the descriptor block and 395 * kernel-side object to be used for later removal of the descriptor block and
331 * immediate key. 396 * immediate key.
397 *
398 * This ioctl affects the configuration ROMs of all local nodes.
399 * The ioctl only succeeds on device files which represent a local node.
332 */ 400 */
333struct fw_cdev_add_descriptor { 401struct fw_cdev_add_descriptor {
334 __u32 immediate; 402 __u32 immediate;
@@ -344,7 +412,7 @@ struct fw_cdev_add_descriptor {
344 * descriptor was added 412 * descriptor was added
345 * 413 *
346 * Remove a descriptor block and accompanying immediate key from the local 414 * Remove a descriptor block and accompanying immediate key from the local
347 * node's configuration ROM. 415 * nodes' configuration ROMs.
348 */ 416 */
349struct fw_cdev_remove_descriptor { 417struct fw_cdev_remove_descriptor {
350 __u32 handle; 418 __u32 handle;
@@ -370,6 +438,9 @@ struct fw_cdev_remove_descriptor {
370 * 438 *
371 * If a context was successfully created, the kernel writes back a handle to the 439 * If a context was successfully created, the kernel writes back a handle to the
372 * context, which must be passed in for subsequent operations on that context. 440 * context, which must be passed in for subsequent operations on that context.
441 *
442 * Note that the effect of a @header_size > 4 depends on
443 * &fw_cdev_get_info.version, as documented at &fw_cdev_event_iso_interrupt.
373 */ 444 */
374struct fw_cdev_create_iso_context { 445struct fw_cdev_create_iso_context {
375 __u32 type; 446 __u32 type;
@@ -473,10 +544,91 @@ struct fw_cdev_stop_iso {
473 * The %FW_CDEV_IOC_GET_CYCLE_TIMER ioctl reads the isochronous cycle timer 544 * The %FW_CDEV_IOC_GET_CYCLE_TIMER ioctl reads the isochronous cycle timer
474 * and also the system clock. This allows to express the receive time of an 545 * and also the system clock. This allows to express the receive time of an
475 * isochronous packet as a system time with microsecond accuracy. 546 * isochronous packet as a system time with microsecond accuracy.
547 *
548 * @cycle_timer consists of 7 bits cycleSeconds, 13 bits cycleCount, and
549 * 12 bits cycleOffset, in host byte order.
476 */ 550 */
477struct fw_cdev_get_cycle_timer { 551struct fw_cdev_get_cycle_timer {
478 __u64 local_time; 552 __u64 local_time;
479 __u32 cycle_timer; 553 __u32 cycle_timer;
480}; 554};
481 555
556/**
557 * struct fw_cdev_allocate_iso_resource - (De)allocate a channel or bandwidth
558 * @closure: Passed back to userspace in correponding iso resource events
559 * @channels: Isochronous channels of which one is to be (de)allocated
560 * @bandwidth: Isochronous bandwidth units to be (de)allocated
561 * @handle: Handle to the allocation, written by the kernel (only valid in
562 * case of %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE ioctls)
563 *
564 * The %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE ioctl initiates allocation of an
565 * isochronous channel and/or of isochronous bandwidth at the isochronous
566 * resource manager (IRM). Only one of the channels specified in @channels is
567 * allocated. An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED is sent after
568 * communication with the IRM, indicating success or failure in the event data.
569 * The kernel will automatically reallocate the resources after bus resets.
570 * Should a reallocation fail, an %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event
571 * will be sent. The kernel will also automatically deallocate the resources
572 * when the file descriptor is closed.
573 *
574 * The %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE ioctl can be used to initiate
575 * deallocation of resources which were allocated as described above.
576 * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation.
577 *
578 * The %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE ioctl is a variant of allocation
579 * without automatic re- or deallocation.
580 * An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED event concludes this operation,
581 * indicating success or failure in its data.
582 *
583 * The %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE_ONCE ioctl works like
584 * %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE except that resources are freed
585 * instead of allocated.
586 * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation.
587 *
588 * To summarize, %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE allocates iso resources
589 * for the lifetime of the fd or handle.
590 * In contrast, %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE allocates iso resources
591 * for the duration of a bus generation.
592 *
593 * @channels is a host-endian bitfield with the least significant bit
594 * representing channel 0 and the most significant bit representing channel 63:
595 * 1ULL << c for each channel c that is a candidate for (de)allocation.
596 *
597 * @bandwidth is expressed in bandwidth allocation units, i.e. the time to send
598 * one quadlet of data (payload or header data) at speed S1600.
599 */
600struct fw_cdev_allocate_iso_resource {
601 __u64 closure;
602 __u64 channels;
603 __u32 bandwidth;
604 __u32 handle;
605};
606
607/**
608 * struct fw_cdev_send_stream_packet - send an asynchronous stream packet
609 * @length: Length of outgoing payload, in bytes
610 * @tag: Data format tag
611 * @channel: Isochronous channel to transmit to
612 * @sy: Synchronization code
613 * @closure: Passed back to userspace in the response event
614 * @data: Userspace pointer to payload
615 * @generation: The bus generation where packet is valid
616 * @speed: Speed to transmit at
617 *
618 * The %FW_CDEV_IOC_SEND_STREAM_PACKET ioctl sends an asynchronous stream packet
619 * to every device which is listening to the specified channel. The kernel
620 * writes an &fw_cdev_event_response event which indicates success or failure of
621 * the transmission.
622 */
623struct fw_cdev_send_stream_packet {
624 __u32 length;
625 __u32 tag;
626 __u32 channel;
627 __u32 sy;
628 __u64 closure;
629 __u64 data;
630 __u32 generation;
631 __u32 speed;
632};
633
482#endif /* _LINUX_FIREWIRE_CDEV_H */ 634#endif /* _LINUX_FIREWIRE_CDEV_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 1cd44f727dac..42436ae42f70 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1064,34 +1064,147 @@ extern int lease_modify(struct file_lock **, int);
1064extern int lock_may_read(struct inode *, loff_t start, unsigned long count); 1064extern int lock_may_read(struct inode *, loff_t start, unsigned long count);
1065extern int lock_may_write(struct inode *, loff_t start, unsigned long count); 1065extern int lock_may_write(struct inode *, loff_t start, unsigned long count);
1066#else /* !CONFIG_FILE_LOCKING */ 1066#else /* !CONFIG_FILE_LOCKING */
1067#define fcntl_getlk(a, b) ({ -EINVAL; }) 1067static inline int fcntl_getlk(struct file *file, struct flock __user *user)
1068#define fcntl_setlk(a, b, c, d) ({ -EACCES; }) 1068{
1069 return -EINVAL;
1070}
1071
1072static inline int fcntl_setlk(unsigned int fd, struct file *file,
1073 unsigned int cmd, struct flock __user *user)
1074{
1075 return -EACCES;
1076}
1077
1069#if BITS_PER_LONG == 32 1078#if BITS_PER_LONG == 32
1070#define fcntl_getlk64(a, b) ({ -EINVAL; }) 1079static inline int fcntl_getlk64(struct file *file, struct flock64 __user *user)
1071#define fcntl_setlk64(a, b, c, d) ({ -EACCES; }) 1080{
1081 return -EINVAL;
1082}
1083
1084static inline int fcntl_setlk64(unsigned int fd, struct file *file,
1085 unsigned int cmd, struct flock64 __user *user)
1086{
1087 return -EACCES;
1088}
1072#endif 1089#endif
1073#define fcntl_setlease(a, b, c) ({ 0; }) 1090static inline int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1074#define fcntl_getlease(a) ({ 0; }) 1091{
1075#define locks_init_lock(a) ({ }) 1092 return 0;
1076#define __locks_copy_lock(a, b) ({ }) 1093}
1077#define locks_copy_lock(a, b) ({ }) 1094
1078#define locks_remove_posix(a, b) ({ }) 1095static inline int fcntl_getlease(struct file *filp)
1079#define locks_remove_flock(a) ({ }) 1096{
1080#define posix_test_lock(a, b) ({ 0; }) 1097 return 0;
1081#define posix_lock_file(a, b, c) ({ -ENOLCK; }) 1098}
1082#define posix_lock_file_wait(a, b) ({ -ENOLCK; }) 1099
1083#define posix_unblock_lock(a, b) (-ENOENT) 1100static inline void locks_init_lock(struct file_lock *fl)
1084#define vfs_test_lock(a, b) ({ 0; }) 1101{
1085#define vfs_lock_file(a, b, c, d) (-ENOLCK) 1102 return;
1086#define vfs_cancel_lock(a, b) ({ 0; }) 1103}
1087#define flock_lock_file_wait(a, b) ({ -ENOLCK; }) 1104
1088#define __break_lease(a, b) ({ 0; }) 1105static inline void __locks_copy_lock(struct file_lock *new, struct file_lock *fl)
1089#define lease_get_mtime(a, b) ({ }) 1106{
1090#define generic_setlease(a, b, c) ({ -EINVAL; }) 1107 return;
1091#define vfs_setlease(a, b, c) ({ -EINVAL; }) 1108}
1092#define lease_modify(a, b) ({ -EINVAL; }) 1109
1093#define lock_may_read(a, b, c) ({ 1; }) 1110static inline void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
1094#define lock_may_write(a, b, c) ({ 1; }) 1111{
1112 return;
1113}
1114
1115static inline void locks_remove_posix(struct file *filp, fl_owner_t owner)
1116{
1117 return;
1118}
1119
1120static inline void locks_remove_flock(struct file *filp)
1121{
1122 return;
1123}
1124
1125static inline void posix_test_lock(struct file *filp, struct file_lock *fl)
1126{
1127 return;
1128}
1129
1130static inline int posix_lock_file(struct file *filp, struct file_lock *fl,
1131 struct file_lock *conflock)
1132{
1133 return -ENOLCK;
1134}
1135
1136static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1137{
1138 return -ENOLCK;
1139}
1140
1141static inline int posix_unblock_lock(struct file *filp,
1142 struct file_lock *waiter)
1143{
1144 return -ENOENT;
1145}
1146
1147static inline int vfs_test_lock(struct file *filp, struct file_lock *fl)
1148{
1149 return 0;
1150}
1151
1152static inline int vfs_lock_file(struct file *filp, unsigned int cmd,
1153 struct file_lock *fl, struct file_lock *conf)
1154{
1155 return -ENOLCK;
1156}
1157
1158static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
1159{
1160 return 0;
1161}
1162
1163static inline int flock_lock_file_wait(struct file *filp,
1164 struct file_lock *request)
1165{
1166 return -ENOLCK;
1167}
1168
1169static inline int __break_lease(struct inode *inode, unsigned int mode)
1170{
1171 return 0;
1172}
1173
1174static inline void lease_get_mtime(struct inode *inode, struct timespec *time)
1175{
1176 return;
1177}
1178
1179static inline int generic_setlease(struct file *filp, long arg,
1180 struct file_lock **flp)
1181{
1182 return -EINVAL;
1183}
1184
1185static inline int vfs_setlease(struct file *filp, long arg,
1186 struct file_lock **lease)
1187{
1188 return -EINVAL;
1189}
1190
1191static inline int lease_modify(struct file_lock **before, int arg)
1192{
1193 return -EINVAL;
1194}
1195
1196static inline int lock_may_read(struct inode *inode, loff_t start,
1197 unsigned long len)
1198{
1199 return 1;
1200}
1201
1202static inline int lock_may_write(struct inode *inode, loff_t start,
1203 unsigned long len)
1204{
1205 return 1;
1206}
1207
1095#endif /* !CONFIG_FILE_LOCKING */ 1208#endif /* !CONFIG_FILE_LOCKING */
1096 1209
1097 1210
@@ -1607,7 +1720,7 @@ struct super_block *sget(struct file_system_type *type,
1607extern int get_sb_pseudo(struct file_system_type *, char *, 1720extern int get_sb_pseudo(struct file_system_type *, char *,
1608 const struct super_operations *ops, unsigned long, 1721 const struct super_operations *ops, unsigned long,
1609 struct vfsmount *mnt); 1722 struct vfsmount *mnt);
1610extern int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb); 1723extern void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb);
1611int __put_super_and_need_restart(struct super_block *sb); 1724int __put_super_and_need_restart(struct super_block *sb);
1612 1725
1613/* Alas, no aliases. Too much hassle with bringing module.h everywhere */ 1726/* Alas, no aliases. Too much hassle with bringing module.h everywhere */
@@ -1688,13 +1801,44 @@ static inline int break_lease(struct inode *inode, unsigned int mode)
1688 return 0; 1801 return 0;
1689} 1802}
1690#else /* !CONFIG_FILE_LOCKING */ 1803#else /* !CONFIG_FILE_LOCKING */
1691#define locks_mandatory_locked(a) ({ 0; }) 1804static inline int locks_mandatory_locked(struct inode *inode)
1692#define locks_mandatory_area(a, b, c, d, e) ({ 0; }) 1805{
1693#define __mandatory_lock(a) ({ 0; }) 1806 return 0;
1694#define mandatory_lock(a) ({ 0; }) 1807}
1695#define locks_verify_locked(a) ({ 0; }) 1808
1696#define locks_verify_truncate(a, b, c) ({ 0; }) 1809static inline int locks_mandatory_area(int rw, struct inode *inode,
1697#define break_lease(a, b) ({ 0; }) 1810 struct file *filp, loff_t offset,
1811 size_t count)
1812{
1813 return 0;
1814}
1815
1816static inline int __mandatory_lock(struct inode *inode)
1817{
1818 return 0;
1819}
1820
1821static inline int mandatory_lock(struct inode *inode)
1822{
1823 return 0;
1824}
1825
1826static inline int locks_verify_locked(struct inode *inode)
1827{
1828 return 0;
1829}
1830
1831static inline int locks_verify_truncate(struct inode *inode, struct file *filp,
1832 size_t size)
1833{
1834 return 0;
1835}
1836
1837static inline int break_lease(struct inode *inode, unsigned int mode)
1838{
1839 return 0;
1840}
1841
1698#endif /* CONFIG_FILE_LOCKING */ 1842#endif /* CONFIG_FILE_LOCKING */
1699 1843
1700/* fs/open.c */ 1844/* fs/open.c */
@@ -1731,6 +1875,13 @@ extern void bd_set_size(struct block_device *, loff_t size);
1731extern void bd_forget(struct inode *inode); 1875extern void bd_forget(struct inode *inode);
1732extern void bdput(struct block_device *); 1876extern void bdput(struct block_device *);
1733extern struct block_device *open_by_devnum(dev_t, fmode_t); 1877extern struct block_device *open_by_devnum(dev_t, fmode_t);
1878extern void invalidate_bdev(struct block_device *);
1879extern int sync_blockdev(struct block_device *bdev);
1880extern struct super_block *freeze_bdev(struct block_device *);
1881extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
1882extern int fsync_bdev(struct block_device *);
1883extern int fsync_super(struct super_block *);
1884extern int fsync_no_super(struct block_device *);
1734#else 1885#else
1735static inline void bd_forget(struct inode *inode) {} 1886static inline void bd_forget(struct inode *inode) {}
1736#endif 1887#endif
@@ -1882,7 +2033,6 @@ static inline void allow_write_access(struct file *file)
1882 if (file) 2033 if (file)
1883 atomic_inc(&file->f_path.dentry->d_inode->i_writecount); 2034 atomic_inc(&file->f_path.dentry->d_inode->i_writecount);
1884} 2035}
1885extern int do_pipe(int *);
1886extern int do_pipe_flags(int *, int); 2036extern int do_pipe_flags(int *, int);
1887extern struct file *create_read_pipe(struct file *f, int flags); 2037extern struct file *create_read_pipe(struct file *f, int flags);
1888extern struct file *create_write_pipe(int flags); 2038extern struct file *create_write_pipe(int flags);
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index b1bb817d1427..4b501b48ce86 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -18,6 +18,22 @@
18#include <linux/types.h> 18#include <linux/types.h>
19#include <asm/byteorder.h> 19#include <asm/byteorder.h>
20 20
21/*
22 * DS bit usage
23 *
24 * TA = transmitter address
25 * RA = receiver address
26 * DA = destination address
27 * SA = source address
28 *
29 * ToDS FromDS A1(RA) A2(TA) A3 A4 Use
30 * -----------------------------------------------------------------
31 * 0 0 DA SA BSSID - IBSS/DLS
32 * 0 1 DA BSSID SA - AP -> STA
33 * 1 0 BSSID SA DA - AP <- STA
34 * 1 1 RA TA DA SA unspecified (WDS)
35 */
36
21#define FCS_LEN 4 37#define FCS_LEN 4
22 38
23#define IEEE80211_FCTL_VERS 0x0003 39#define IEEE80211_FCTL_VERS 0x0003
@@ -851,6 +867,7 @@ struct ieee80211_ht_info {
851/* Authentication algorithms */ 867/* Authentication algorithms */
852#define WLAN_AUTH_OPEN 0 868#define WLAN_AUTH_OPEN 0
853#define WLAN_AUTH_SHARED_KEY 1 869#define WLAN_AUTH_SHARED_KEY 1
870#define WLAN_AUTH_FT 2
854#define WLAN_AUTH_LEAP 128 871#define WLAN_AUTH_LEAP 128
855 872
856#define WLAN_AUTH_CHALLENGE_LEN 128 873#define WLAN_AUTH_CHALLENGE_LEN 128
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index 0216e1bdbc56..cfe4fe1b7132 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -78,6 +78,7 @@
78#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */ 78#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
79#define ETH_P_AOE 0x88A2 /* ATA over Ethernet */ 79#define ETH_P_AOE 0x88A2 /* ATA over Ethernet */
80#define ETH_P_TIPC 0x88CA /* TIPC */ 80#define ETH_P_TIPC 0x88CA /* TIPC */
81#define ETH_P_FCOE 0x8906 /* Fibre Channel over Ethernet */
81#define ETH_P_EDSA 0xDADA /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */ 82#define ETH_P_EDSA 0xDADA /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */
82 83
83/* 84/*
diff --git a/include/linux/if_frad.h b/include/linux/if_frad.h
index 60e16a551dd6..673f2209453d 100644
--- a/include/linux/if_frad.h
+++ b/include/linux/if_frad.h
@@ -153,7 +153,6 @@ struct frhdr
153 153
154struct dlci_local 154struct dlci_local
155{ 155{
156 struct net_device_stats stats;
157 struct net_device *master; 156 struct net_device *master;
158 struct net_device *slave; 157 struct net_device *slave;
159 struct dlci_conf config; 158 struct dlci_conf config;
diff --git a/include/linux/major.h b/include/linux/major.h
index 88249452b935..058ec15dd060 100644
--- a/include/linux/major.h
+++ b/include/linux/major.h
@@ -171,5 +171,6 @@
171#define VIOTAPE_MAJOR 230 171#define VIOTAPE_MAJOR 230
172 172
173#define BLOCK_EXT_MAJOR 259 173#define BLOCK_EXT_MAJOR 259
174#define SCSI_OSD_MAJOR 260 /* open-osd's OSD scsi device */
174 175
175#endif 176#endif
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index a820f816a49e..beb6ec99cfef 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -26,6 +26,7 @@
26#define TUN_MINOR 200 26#define TUN_MINOR 200
27#define MWAVE_MINOR 219 /* ACP/Mwave Modem */ 27#define MWAVE_MINOR 219 /* ACP/Mwave Modem */
28#define MPT_MINOR 220 28#define MPT_MINOR 220
29#define MPT2SAS_MINOR 221
29#define HPET_MINOR 228 30#define HPET_MINOR 228
30#define FUSE_MINOR 229 31#define FUSE_MINOR 229
31#define KVM_MINOR 232 32#define KVM_MINOR 232
diff --git a/include/linux/ncp_fs.h b/include/linux/ncp_fs.h
index f69e66d151cc..30b06c893944 100644
--- a/include/linux/ncp_fs.h
+++ b/include/linux/ncp_fs.h
@@ -204,7 +204,7 @@ void ncp_update_inode2(struct inode *, struct ncp_entry_info *);
204/* linux/fs/ncpfs/dir.c */ 204/* linux/fs/ncpfs/dir.c */
205extern const struct inode_operations ncp_dir_inode_operations; 205extern const struct inode_operations ncp_dir_inode_operations;
206extern const struct file_operations ncp_dir_operations; 206extern const struct file_operations ncp_dir_operations;
207extern struct dentry_operations ncp_root_dentry_operations; 207extern const struct dentry_operations ncp_root_dentry_operations;
208int ncp_conn_logged_in(struct super_block *); 208int ncp_conn_logged_in(struct super_block *);
209int ncp_date_dos2unix(__le16 time, __le16 date); 209int ncp_date_dos2unix(__le16 time, __le16 date);
210void ncp_date_unix2dos(int unix_date, __le16 * time, __le16 * date); 210void ncp_date_unix2dos(int unix_date, __le16 * time, __le16 * date);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index be3ebd7e8ce5..2e7783f4a755 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -32,6 +32,7 @@
32#ifdef __KERNEL__ 32#ifdef __KERNEL__
33#include <linux/timer.h> 33#include <linux/timer.h>
34#include <linux/delay.h> 34#include <linux/delay.h>
35#include <linux/mm.h>
35#include <asm/atomic.h> 36#include <asm/atomic.h>
36#include <asm/cache.h> 37#include <asm/cache.h>
37#include <asm/byteorder.h> 38#include <asm/byteorder.h>
@@ -593,6 +594,14 @@ struct net_device_ops {
593#define HAVE_NETDEV_POLL 594#define HAVE_NETDEV_POLL
594 void (*ndo_poll_controller)(struct net_device *dev); 595 void (*ndo_poll_controller)(struct net_device *dev);
595#endif 596#endif
597#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
598 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
599 u16 xid,
600 struct scatterlist *sgl,
601 unsigned int sgc);
602 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
603 u16 xid);
604#endif
596}; 605};
597 606
598/* 607/*
@@ -661,14 +670,17 @@ struct net_device
661#define NETIF_F_GRO 16384 /* Generic receive offload */ 670#define NETIF_F_GRO 16384 /* Generic receive offload */
662#define NETIF_F_LRO 32768 /* large receive offload */ 671#define NETIF_F_LRO 32768 /* large receive offload */
663 672
673#define NETIF_F_FCOE_CRC (1 << 24) /* FCoE CRC32 */
674
664 /* Segmentation offload features */ 675 /* Segmentation offload features */
665#define NETIF_F_GSO_SHIFT 16 676#define NETIF_F_GSO_SHIFT 16
666#define NETIF_F_GSO_MASK 0xffff0000 677#define NETIF_F_GSO_MASK 0x00ff0000
667#define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT) 678#define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
668#define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT) 679#define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
669#define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT) 680#define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
670#define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT) 681#define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
671#define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT) 682#define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
683#define NETIF_F_FSO (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT)
672 684
673 /* List of features with software fallbacks. */ 685 /* List of features with software fallbacks. */
674#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6) 686#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
@@ -851,6 +863,11 @@ struct net_device
851 struct dcbnl_rtnl_ops *dcbnl_ops; 863 struct dcbnl_rtnl_ops *dcbnl_ops;
852#endif 864#endif
853 865
866#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
867 /* max exchange id for FCoE LRO by ddp */
868 unsigned int fcoe_ddp_xid;
869#endif
870
854#ifdef CONFIG_COMPAT_NET_DEV_OPS 871#ifdef CONFIG_COMPAT_NET_DEV_OPS
855 struct { 872 struct {
856 int (*init)(struct net_device *dev); 873 int (*init)(struct net_device *dev);
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index adbc50a20ec2..7b1a652066c0 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -437,6 +437,29 @@ extern void xt_free_table_info(struct xt_table_info *info);
437extern void xt_table_entry_swap_rcu(struct xt_table_info *old, 437extern void xt_table_entry_swap_rcu(struct xt_table_info *old,
438 struct xt_table_info *new); 438 struct xt_table_info *new);
439 439
440/*
441 * This helper is performance critical and must be inlined
442 */
443static inline unsigned long ifname_compare_aligned(const char *_a,
444 const char *_b,
445 const char *_mask)
446{
447 const unsigned long *a = (const unsigned long *)_a;
448 const unsigned long *b = (const unsigned long *)_b;
449 const unsigned long *mask = (const unsigned long *)_mask;
450 unsigned long ret;
451
452 ret = (a[0] ^ b[0]) & mask[0];
453 if (IFNAMSIZ > sizeof(unsigned long))
454 ret |= (a[1] ^ b[1]) & mask[1];
455 if (IFNAMSIZ > 2 * sizeof(unsigned long))
456 ret |= (a[2] ^ b[2]) & mask[2];
457 if (IFNAMSIZ > 3 * sizeof(unsigned long))
458 ret |= (a[3] ^ b[3]) & mask[3];
459 BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
460 return ret;
461}
462
440#ifdef CONFIG_COMPAT 463#ifdef CONFIG_COMPAT
441#include <net/compat.h> 464#include <net/compat.h>
442 465
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index db867b04ac3c..8cc8807f77d6 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -415,7 +415,7 @@ extern const struct inode_operations nfs_dir_inode_operations;
415extern const struct inode_operations nfs3_dir_inode_operations; 415extern const struct inode_operations nfs3_dir_inode_operations;
416#endif /* CONFIG_NFS_V3 */ 416#endif /* CONFIG_NFS_V3 */
417extern const struct file_operations nfs_dir_operations; 417extern const struct file_operations nfs_dir_operations;
418extern struct dentry_operations nfs_dentry_operations; 418extern const struct dentry_operations nfs_dentry_operations;
419 419
420extern void nfs_force_lookup_revalidate(struct inode *dir); 420extern void nfs_force_lookup_revalidate(struct inode *dir);
421extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, struct nfs_fattr *fattr); 421extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, struct nfs_fattr *fattr);
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 2e5f00066afd..43a713fce11c 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -785,7 +785,7 @@ struct nfs_access_entry;
785 */ 785 */
786struct nfs_rpc_ops { 786struct nfs_rpc_ops {
787 u32 version; /* Protocol version */ 787 u32 version; /* Protocol version */
788 struct dentry_operations *dentry_ops; 788 const struct dentry_operations *dentry_ops;
789 const struct inode_operations *dir_inode_ops; 789 const struct inode_operations *dir_inode_ops;
790 const struct inode_operations *file_inode_ops; 790 const struct inode_operations *file_inode_ops;
791 791
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index f33aa08dd9b3..cbe8ce3bf486 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -142,6 +142,12 @@
142 * %NL80211_ATTR_IE. If the command succeeds, the requested data will be 142 * %NL80211_ATTR_IE. If the command succeeds, the requested data will be
143 * added to all specified management frames generated by 143 * added to all specified management frames generated by
144 * kernel/firmware/driver. 144 * kernel/firmware/driver.
145 * Note: This command has been removed and it is only reserved at this
146 * point to avoid re-using existing command number. The functionality this
147 * command was planned for has been provided with cleaner design with the
148 * option to specify additional IEs in NL80211_CMD_TRIGGER_SCAN,
149 * NL80211_CMD_AUTHENTICATE, NL80211_CMD_ASSOCIATE,
150 * NL80211_CMD_DEAUTHENTICATE, and NL80211_CMD_DISASSOCIATE.
145 * 151 *
146 * @NL80211_CMD_GET_SCAN: get scan results 152 * @NL80211_CMD_GET_SCAN: get scan results
147 * @NL80211_CMD_TRIGGER_SCAN: trigger a new scan with the given parameters 153 * @NL80211_CMD_TRIGGER_SCAN: trigger a new scan with the given parameters
@@ -161,6 +167,38 @@
161 * %NL80211_REG_TYPE_COUNTRY the alpha2 to which we have moved on 167 * %NL80211_REG_TYPE_COUNTRY the alpha2 to which we have moved on
162 * to (%NL80211_ATTR_REG_ALPHA2). 168 * to (%NL80211_ATTR_REG_ALPHA2).
163 * 169 *
170 * @NL80211_CMD_AUTHENTICATE: authentication request and notification.
171 * This command is used both as a command (request to authenticate) and
172 * as an event on the "mlme" multicast group indicating completion of the
173 * authentication process.
174 * When used as a command, %NL80211_ATTR_IFINDEX is used to identify the
175 * interface. %NL80211_ATTR_MAC is used to specify PeerSTAAddress (and
176 * BSSID in case of station mode). %NL80211_ATTR_SSID is used to specify
177 * the SSID (mainly for association, but is included in authentication
178 * request, too, to help BSS selection. %NL80211_ATTR_WIPHY_FREQ is used
179 * to specify the frequence of the channel in MHz. %NL80211_ATTR_AUTH_TYPE
180 * is used to specify the authentication type. %NL80211_ATTR_IE is used to
181 * define IEs (VendorSpecificInfo, but also including RSN IE and FT IEs)
182 * to be added to the frame.
183 * When used as an event, this reports reception of an Authentication
184 * frame in station and IBSS modes when the local MLME processed the
185 * frame, i.e., it was for the local STA and was received in correct
186 * state. This is similar to MLME-AUTHENTICATE.confirm primitive in the
187 * MLME SAP interface (kernel providing MLME, userspace SME). The
188 * included NL80211_ATTR_FRAME attribute contains the management frame
189 * (including both the header and frame body, but not FCS).
190 * @NL80211_CMD_ASSOCIATE: association request and notification; like
191 * NL80211_CMD_AUTHENTICATE but for Association and Reassociation
192 * (similar to MLME-ASSOCIATE.request, MLME-REASSOCIATE.request,
193 * MLME-ASSOCIATE.confirm or MLME-REASSOCIATE.confirm primitives).
194 * @NL80211_CMD_DEAUTHENTICATE: deauthentication request and notification; like
195 * NL80211_CMD_AUTHENTICATE but for Deauthentication frames (similar to
196 * MLME-DEAUTHENTICATION.request and MLME-DEAUTHENTICATE.indication
197 * primitives).
198 * @NL80211_CMD_DISASSOCIATE: disassociation request and notification; like
199 * NL80211_CMD_AUTHENTICATE but for Disassociation frames (similar to
200 * MLME-DISASSOCIATE.request and MLME-DISASSOCIATE.indication primitives).
201 *
164 * @NL80211_CMD_MAX: highest used command number 202 * @NL80211_CMD_MAX: highest used command number
165 * @__NL80211_CMD_AFTER_LAST: internal use 203 * @__NL80211_CMD_AFTER_LAST: internal use
166 */ 204 */
@@ -206,7 +244,7 @@ enum nl80211_commands {
206 NL80211_CMD_GET_MESH_PARAMS, 244 NL80211_CMD_GET_MESH_PARAMS,
207 NL80211_CMD_SET_MESH_PARAMS, 245 NL80211_CMD_SET_MESH_PARAMS,
208 246
209 NL80211_CMD_SET_MGMT_EXTRA_IE, 247 NL80211_CMD_SET_MGMT_EXTRA_IE /* reserved; not used */,
210 248
211 NL80211_CMD_GET_REG, 249 NL80211_CMD_GET_REG,
212 250
@@ -217,6 +255,11 @@ enum nl80211_commands {
217 255
218 NL80211_CMD_REG_CHANGE, 256 NL80211_CMD_REG_CHANGE,
219 257
258 NL80211_CMD_AUTHENTICATE,
259 NL80211_CMD_ASSOCIATE,
260 NL80211_CMD_DEAUTHENTICATE,
261 NL80211_CMD_DISASSOCIATE,
262
220 /* add new commands above here */ 263 /* add new commands above here */
221 264
222 /* used to define NL80211_CMD_MAX below */ 265 /* used to define NL80211_CMD_MAX below */
@@ -230,8 +273,11 @@ enum nl80211_commands {
230 */ 273 */
231#define NL80211_CMD_SET_BSS NL80211_CMD_SET_BSS 274#define NL80211_CMD_SET_BSS NL80211_CMD_SET_BSS
232#define NL80211_CMD_SET_MGMT_EXTRA_IE NL80211_CMD_SET_MGMT_EXTRA_IE 275#define NL80211_CMD_SET_MGMT_EXTRA_IE NL80211_CMD_SET_MGMT_EXTRA_IE
233
234#define NL80211_CMD_REG_CHANGE NL80211_CMD_REG_CHANGE 276#define NL80211_CMD_REG_CHANGE NL80211_CMD_REG_CHANGE
277#define NL80211_CMD_AUTHENTICATE NL80211_CMD_AUTHENTICATE
278#define NL80211_CMD_ASSOCIATE NL80211_CMD_ASSOCIATE
279#define NL80211_CMD_DEAUTHENTICATE NL80211_CMD_DEAUTHENTICATE
280#define NL80211_CMD_DISASSOCIATE NL80211_CMD_DISASSOCIATE
235 281
236/** 282/**
237 * enum nl80211_attrs - nl80211 netlink attributes 283 * enum nl80211_attrs - nl80211 netlink attributes
@@ -349,6 +395,19 @@ enum nl80211_commands {
349 * @NL80211_ATTR_REG_TYPE: indicates the type of the regulatory domain currently 395 * @NL80211_ATTR_REG_TYPE: indicates the type of the regulatory domain currently
350 * set. This can be one of the nl80211_reg_type (%NL80211_REGDOM_TYPE_*) 396 * set. This can be one of the nl80211_reg_type (%NL80211_REGDOM_TYPE_*)
351 * 397 *
398 * @NL80211_ATTR_SUPPORTED_COMMANDS: wiphy attribute that specifies
399 * an array of command numbers (i.e. a mapping index to command number)
400 * that the driver for the given wiphy supports.
401 *
402 * @NL80211_ATTR_FRAME: frame data (binary attribute), including frame header
403 * and body, but not FCS; used, e.g., with NL80211_CMD_AUTHENTICATE and
404 * NL80211_CMD_ASSOCIATE events
405 * @NL80211_ATTR_SSID: SSID (binary attribute, 0..32 octets)
406 * @NL80211_ATTR_AUTH_TYPE: AuthenticationType, see &enum nl80211_auth_type,
407 * represented as a u32
408 * @NL80211_ATTR_REASON_CODE: ReasonCode for %NL80211_CMD_DEAUTHENTICATE and
409 * %NL80211_CMD_DISASSOCIATE, u16
410 *
352 * @NL80211_ATTR_MAX: highest attribute number currently defined 411 * @NL80211_ATTR_MAX: highest attribute number currently defined
353 * @__NL80211_ATTR_AFTER_LAST: internal use 412 * @__NL80211_ATTR_AFTER_LAST: internal use
354 */ 413 */
@@ -426,6 +485,13 @@ enum nl80211_attrs {
426 NL80211_ATTR_REG_INITIATOR, 485 NL80211_ATTR_REG_INITIATOR,
427 NL80211_ATTR_REG_TYPE, 486 NL80211_ATTR_REG_TYPE,
428 487
488 NL80211_ATTR_SUPPORTED_COMMANDS,
489
490 NL80211_ATTR_FRAME,
491 NL80211_ATTR_SSID,
492 NL80211_ATTR_AUTH_TYPE,
493 NL80211_ATTR_REASON_CODE,
494
429 /* add attributes here, update the policy in nl80211.c */ 495 /* add attributes here, update the policy in nl80211.c */
430 496
431 __NL80211_ATTR_AFTER_LAST, 497 __NL80211_ATTR_AFTER_LAST,
@@ -445,6 +511,10 @@ enum nl80211_attrs {
445#define NL80211_ATTR_IE NL80211_ATTR_IE 511#define NL80211_ATTR_IE NL80211_ATTR_IE
446#define NL80211_ATTR_REG_INITIATOR NL80211_ATTR_REG_INITIATOR 512#define NL80211_ATTR_REG_INITIATOR NL80211_ATTR_REG_INITIATOR
447#define NL80211_ATTR_REG_TYPE NL80211_ATTR_REG_TYPE 513#define NL80211_ATTR_REG_TYPE NL80211_ATTR_REG_TYPE
514#define NL80211_ATTR_FRAME NL80211_ATTR_FRAME
515#define NL80211_ATTR_SSID NL80211_ATTR_SSID
516#define NL80211_ATTR_AUTH_TYPE NL80211_ATTR_AUTH_TYPE
517#define NL80211_ATTR_REASON_CODE NL80211_ATTR_REASON_CODE
448 518
449#define NL80211_MAX_SUPP_RATES 32 519#define NL80211_MAX_SUPP_RATES 32
450#define NL80211_MAX_SUPP_REG_RULES 32 520#define NL80211_MAX_SUPP_REG_RULES 32
@@ -978,4 +1048,18 @@ enum nl80211_bss {
978 NL80211_BSS_MAX = __NL80211_BSS_AFTER_LAST - 1 1048 NL80211_BSS_MAX = __NL80211_BSS_AFTER_LAST - 1
979}; 1049};
980 1050
1051/**
1052 * enum nl80211_auth_type - AuthenticationType
1053 *
1054 * @NL80211_AUTHTYPE_OPEN_SYSTEM: Open System authentication
1055 * @NL80211_AUTHTYPE_SHARED_KEY: Shared Key authentication (WEP only)
1056 * @NL80211_AUTHTYPE_FT: Fast BSS Transition (IEEE 802.11r)
1057 * @NL80211_AUTHTYPE_NETWORK_EAP: Network EAP (some Cisco APs and mainly LEAP)
1058 */
1059enum nl80211_auth_type {
1060 NL80211_AUTHTYPE_OPEN_SYSTEM,
1061 NL80211_AUTHTYPE_SHARED_KEY,
1062 NL80211_AUTHTYPE_FT,
1063 NL80211_AUTHTYPE_NETWORK_EAP,
1064};
981#endif /* __LINUX_NL80211_H */ 1065#endif /* __LINUX_NL80211_H */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 097f410edefa..05dfa7c4fb64 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2271,6 +2271,8 @@
2271#define PCI_DEVICE_ID_KORENIX_JETCARDF0 0x1600 2271#define PCI_DEVICE_ID_KORENIX_JETCARDF0 0x1600
2272#define PCI_DEVICE_ID_KORENIX_JETCARDF1 0x16ff 2272#define PCI_DEVICE_ID_KORENIX_JETCARDF1 0x16ff
2273 2273
2274#define PCI_VENDOR_ID_QMI 0x1a32
2275
2274#define PCI_VENDOR_ID_TEKRAM 0x1de1 2276#define PCI_VENDOR_ID_TEKRAM 0x1de1
2275#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29 2277#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
2276 2278
diff --git a/include/linux/quota.h b/include/linux/quota.h
index d72d5d84fde5..78c48895b12a 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -198,6 +198,7 @@ struct mem_dqblk {
198 qsize_t dqb_bhardlimit; /* absolute limit on disk blks alloc */ 198 qsize_t dqb_bhardlimit; /* absolute limit on disk blks alloc */
199 qsize_t dqb_bsoftlimit; /* preferred limit on disk blks */ 199 qsize_t dqb_bsoftlimit; /* preferred limit on disk blks */
200 qsize_t dqb_curspace; /* current used space */ 200 qsize_t dqb_curspace; /* current used space */
201 qsize_t dqb_rsvspace; /* current reserved space for delalloc*/
201 qsize_t dqb_ihardlimit; /* absolute limit on allocated inodes */ 202 qsize_t dqb_ihardlimit; /* absolute limit on allocated inodes */
202 qsize_t dqb_isoftlimit; /* preferred inode limit */ 203 qsize_t dqb_isoftlimit; /* preferred inode limit */
203 qsize_t dqb_curinodes; /* current # allocated inodes */ 204 qsize_t dqb_curinodes; /* current # allocated inodes */
@@ -276,8 +277,6 @@ struct dquot {
276 struct mem_dqblk dq_dqb; /* Diskquota usage */ 277 struct mem_dqblk dq_dqb; /* Diskquota usage */
277}; 278};
278 279
279#define NODQUOT (struct dquot *)NULL
280
281#define QUOTA_OK 0 280#define QUOTA_OK 0
282#define NO_QUOTA 1 281#define NO_QUOTA 1
283 282
@@ -308,6 +307,14 @@ struct dquot_operations {
308 int (*release_dquot) (struct dquot *); /* Quota is going to be deleted from disk */ 307 int (*release_dquot) (struct dquot *); /* Quota is going to be deleted from disk */
309 int (*mark_dirty) (struct dquot *); /* Dquot is marked dirty */ 308 int (*mark_dirty) (struct dquot *); /* Dquot is marked dirty */
310 int (*write_info) (struct super_block *, int); /* Write of quota "superblock" */ 309 int (*write_info) (struct super_block *, int); /* Write of quota "superblock" */
310 /* reserve quota for delayed block allocation */
311 int (*reserve_space) (struct inode *, qsize_t, int);
312 /* claim reserved quota for delayed alloc */
313 int (*claim_space) (struct inode *, qsize_t);
314 /* release rsved quota for delayed alloc */
315 void (*release_rsv) (struct inode *, qsize_t);
316 /* get reserved quota for delayed alloc */
317 qsize_t (*get_reserved_space) (struct inode *);
311}; 318};
312 319
313/* Operations handling requests from userspace */ 320/* Operations handling requests from userspace */
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 0b35b3a1be05..36353d95c8db 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -35,6 +35,11 @@ void dquot_destroy(struct dquot *dquot);
35int dquot_alloc_space(struct inode *inode, qsize_t number, int prealloc); 35int dquot_alloc_space(struct inode *inode, qsize_t number, int prealloc);
36int dquot_alloc_inode(const struct inode *inode, qsize_t number); 36int dquot_alloc_inode(const struct inode *inode, qsize_t number);
37 37
38int dquot_reserve_space(struct inode *inode, qsize_t number, int prealloc);
39int dquot_claim_space(struct inode *inode, qsize_t number);
40void dquot_release_reserved_space(struct inode *inode, qsize_t number);
41qsize_t dquot_get_reserved_space(struct inode *inode);
42
38int dquot_free_space(struct inode *inode, qsize_t number); 43int dquot_free_space(struct inode *inode, qsize_t number);
39int dquot_free_inode(const struct inode *inode, qsize_t number); 44int dquot_free_inode(const struct inode *inode, qsize_t number);
40 45
@@ -183,6 +188,16 @@ static inline int vfs_dq_alloc_space(struct inode *inode, qsize_t nr)
183 return ret; 188 return ret;
184} 189}
185 190
191static inline int vfs_dq_reserve_space(struct inode *inode, qsize_t nr)
192{
193 if (sb_any_quota_active(inode->i_sb)) {
194 /* Used space is updated in alloc_space() */
195 if (inode->i_sb->dq_op->reserve_space(inode, nr, 0) == NO_QUOTA)
196 return 1;
197 }
198 return 0;
199}
200
186static inline int vfs_dq_alloc_inode(struct inode *inode) 201static inline int vfs_dq_alloc_inode(struct inode *inode)
187{ 202{
188 if (sb_any_quota_active(inode->i_sb)) { 203 if (sb_any_quota_active(inode->i_sb)) {
@@ -193,6 +208,31 @@ static inline int vfs_dq_alloc_inode(struct inode *inode)
193 return 0; 208 return 0;
194} 209}
195 210
211/*
212 * Convert in-memory reserved quotas to real consumed quotas
213 */
214static inline int vfs_dq_claim_space(struct inode *inode, qsize_t nr)
215{
216 if (sb_any_quota_active(inode->i_sb)) {
217 if (inode->i_sb->dq_op->claim_space(inode, nr) == NO_QUOTA)
218 return 1;
219 } else
220 inode_add_bytes(inode, nr);
221
222 mark_inode_dirty(inode);
223 return 0;
224}
225
226/*
227 * Release reserved (in-memory) quotas
228 */
229static inline
230void vfs_dq_release_reservation_space(struct inode *inode, qsize_t nr)
231{
232 if (sb_any_quota_active(inode->i_sb))
233 inode->i_sb->dq_op->release_rsv(inode, nr);
234}
235
196static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr) 236static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr)
197{ 237{
198 if (sb_any_quota_active(inode->i_sb)) 238 if (sb_any_quota_active(inode->i_sb))
@@ -339,6 +379,22 @@ static inline int vfs_dq_alloc_space(struct inode *inode, qsize_t nr)
339 return 0; 379 return 0;
340} 380}
341 381
382static inline int vfs_dq_reserve_space(struct inode *inode, qsize_t nr)
383{
384 return 0;
385}
386
387static inline int vfs_dq_claim_space(struct inode *inode, qsize_t nr)
388{
389 return vfs_dq_alloc_space(inode, nr);
390}
391
392static inline
393int vfs_dq_release_reservation_space(struct inode *inode, qsize_t nr)
394{
395 return 0;
396}
397
342static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr) 398static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr)
343{ 399{
344 inode_sub_bytes(inode, nr); 400 inode_sub_bytes(inode, nr);
@@ -354,67 +410,48 @@ static inline void vfs_dq_free_space(struct inode *inode, qsize_t nr)
354 410
355static inline int vfs_dq_prealloc_block_nodirty(struct inode *inode, qsize_t nr) 411static inline int vfs_dq_prealloc_block_nodirty(struct inode *inode, qsize_t nr)
356{ 412{
357 return vfs_dq_prealloc_space_nodirty(inode, 413 return vfs_dq_prealloc_space_nodirty(inode, nr << inode->i_blkbits);
358 nr << inode->i_sb->s_blocksize_bits);
359} 414}
360 415
361static inline int vfs_dq_prealloc_block(struct inode *inode, qsize_t nr) 416static inline int vfs_dq_prealloc_block(struct inode *inode, qsize_t nr)
362{ 417{
363 return vfs_dq_prealloc_space(inode, 418 return vfs_dq_prealloc_space(inode, nr << inode->i_blkbits);
364 nr << inode->i_sb->s_blocksize_bits);
365} 419}
366 420
367static inline int vfs_dq_alloc_block_nodirty(struct inode *inode, qsize_t nr) 421static inline int vfs_dq_alloc_block_nodirty(struct inode *inode, qsize_t nr)
368{ 422{
369 return vfs_dq_alloc_space_nodirty(inode, 423 return vfs_dq_alloc_space_nodirty(inode, nr << inode->i_blkbits);
370 nr << inode->i_sb->s_blocksize_bits);
371} 424}
372 425
373static inline int vfs_dq_alloc_block(struct inode *inode, qsize_t nr) 426static inline int vfs_dq_alloc_block(struct inode *inode, qsize_t nr)
374{ 427{
375 return vfs_dq_alloc_space(inode, 428 return vfs_dq_alloc_space(inode, nr << inode->i_blkbits);
376 nr << inode->i_sb->s_blocksize_bits); 429}
430
431static inline int vfs_dq_reserve_block(struct inode *inode, qsize_t nr)
432{
433 return vfs_dq_reserve_space(inode, nr << inode->i_blkbits);
434}
435
436static inline int vfs_dq_claim_block(struct inode *inode, qsize_t nr)
437{
438 return vfs_dq_claim_space(inode, nr << inode->i_blkbits);
439}
440
441static inline
442void vfs_dq_release_reservation_block(struct inode *inode, qsize_t nr)
443{
444 vfs_dq_release_reservation_space(inode, nr << inode->i_blkbits);
377} 445}
378 446
379static inline void vfs_dq_free_block_nodirty(struct inode *inode, qsize_t nr) 447static inline void vfs_dq_free_block_nodirty(struct inode *inode, qsize_t nr)
380{ 448{
381 vfs_dq_free_space_nodirty(inode, nr << inode->i_sb->s_blocksize_bits); 449 vfs_dq_free_space_nodirty(inode, nr << inode->i_blkbits);
382} 450}
383 451
384static inline void vfs_dq_free_block(struct inode *inode, qsize_t nr) 452static inline void vfs_dq_free_block(struct inode *inode, qsize_t nr)
385{ 453{
386 vfs_dq_free_space(inode, nr << inode->i_sb->s_blocksize_bits); 454 vfs_dq_free_space(inode, nr << inode->i_blkbits);
387} 455}
388 456
389/*
390 * Define uppercase equivalents for compatibility with old function names
391 * Can go away when we think all users have been converted (15/04/2008)
392 */
393#define DQUOT_INIT(inode) vfs_dq_init(inode)
394#define DQUOT_DROP(inode) vfs_dq_drop(inode)
395#define DQUOT_PREALLOC_SPACE_NODIRTY(inode, nr) \
396 vfs_dq_prealloc_space_nodirty(inode, nr)
397#define DQUOT_PREALLOC_SPACE(inode, nr) vfs_dq_prealloc_space(inode, nr)
398#define DQUOT_ALLOC_SPACE_NODIRTY(inode, nr) \
399 vfs_dq_alloc_space_nodirty(inode, nr)
400#define DQUOT_ALLOC_SPACE(inode, nr) vfs_dq_alloc_space(inode, nr)
401#define DQUOT_PREALLOC_BLOCK_NODIRTY(inode, nr) \
402 vfs_dq_prealloc_block_nodirty(inode, nr)
403#define DQUOT_PREALLOC_BLOCK(inode, nr) vfs_dq_prealloc_block(inode, nr)
404#define DQUOT_ALLOC_BLOCK_NODIRTY(inode, nr) \
405 vfs_dq_alloc_block_nodirty(inode, nr)
406#define DQUOT_ALLOC_BLOCK(inode, nr) vfs_dq_alloc_block(inode, nr)
407#define DQUOT_ALLOC_INODE(inode) vfs_dq_alloc_inode(inode)
408#define DQUOT_FREE_SPACE_NODIRTY(inode, nr) \
409 vfs_dq_free_space_nodirty(inode, nr)
410#define DQUOT_FREE_SPACE(inode, nr) vfs_dq_free_space(inode, nr)
411#define DQUOT_FREE_BLOCK_NODIRTY(inode, nr) \
412 vfs_dq_free_block_nodirty(inode, nr)
413#define DQUOT_FREE_BLOCK(inode, nr) vfs_dq_free_block(inode, nr)
414#define DQUOT_FREE_INODE(inode) vfs_dq_free_inode(inode)
415#define DQUOT_TRANSFER(inode, iattr) vfs_dq_transfer(inode, iattr)
416#define DQUOT_SYNC(sb) vfs_dq_sync(sb)
417#define DQUOT_OFF(sb, remount) vfs_dq_off(sb, remount)
418#define DQUOT_ON_REMOUNT(sb) vfs_dq_quota_on_remount(sb)
419
420#endif /* _LINUX_QUOTAOPS_ */ 457#endif /* _LINUX_QUOTAOPS_ */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index bb1981fd60f3..55d67300fa10 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -236,6 +236,8 @@ enum {
236 SKB_GSO_TCP_ECN = 1 << 3, 236 SKB_GSO_TCP_ECN = 1 << 3,
237 237
238 SKB_GSO_TCPV6 = 1 << 4, 238 SKB_GSO_TCPV6 = 1 << 4,
239
240 SKB_GSO_FCOE = 1 << 5,
239}; 241};
240 242
241#if BITS_PER_LONG > 32 243#if BITS_PER_LONG > 32
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 50f3fd9ff524..5389afdc1297 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -471,26 +471,6 @@ struct ieee80211_txq_params {
471 u8 aifs; 471 u8 aifs;
472}; 472};
473 473
474/**
475 * struct mgmt_extra_ie_params - Extra management frame IE parameters
476 *
477 * Used to add extra IE(s) into management frames. If the driver cannot add the
478 * requested data into all management frames of the specified subtype that are
479 * generated in kernel or firmware/hardware, it must reject the configuration
480 * call. The IE data buffer is added to the end of the specified management
481 * frame body after all other IEs. This addition is not applied to frames that
482 * are injected through a monitor interface.
483 *
484 * @subtype: Management frame subtype
485 * @ies: IE data buffer or %NULL to remove previous data
486 * @ies_len: Length of @ies in octets
487 */
488struct mgmt_extra_ie_params {
489 u8 subtype;
490 u8 *ies;
491 int ies_len;
492};
493
494/* from net/wireless.h */ 474/* from net/wireless.h */
495struct wiphy; 475struct wiphy;
496 476
@@ -559,6 +539,7 @@ enum cfg80211_signal_type {
559 * is no guarantee that these are well-formed!) 539 * is no guarantee that these are well-formed!)
560 * @len_information_elements: total length of the information elements 540 * @len_information_elements: total length of the information elements
561 * @signal: signal strength value (type depends on the wiphy's signal_type) 541 * @signal: signal strength value (type depends on the wiphy's signal_type)
542 * @hold: BSS should not expire
562 * @free_priv: function pointer to free private data 543 * @free_priv: function pointer to free private data
563 * @priv: private area for driver use, has at least wiphy->bss_priv_size bytes 544 * @priv: private area for driver use, has at least wiphy->bss_priv_size bytes
564 */ 545 */
@@ -579,6 +560,105 @@ struct cfg80211_bss {
579}; 560};
580 561
581/** 562/**
563 * struct cfg80211_auth_request - Authentication request data
564 *
565 * This structure provides information needed to complete IEEE 802.11
566 * authentication.
567 * NOTE: This structure will likely change when more code from mac80211 is
568 * moved into cfg80211 so that non-mac80211 drivers can benefit from it, too.
569 * Before using this in a driver that does not use mac80211, it would be better
570 * to check the status of that work and better yet, volunteer to work on it.
571 *
572 * @chan: The channel to use or %NULL if not specified (auto-select based on
573 * scan results)
574 * @peer_addr: The address of the peer STA (AP BSSID in infrastructure case);
575 * this field is required to be present; if the driver wants to help with
576 * BSS selection, it should use (yet to be added) MLME event to allow user
577 * space SME to be notified of roaming candidate, so that the SME can then
578 * use the authentication request with the recommended BSSID and whatever
579 * other data may be needed for authentication/association
580 * @ssid: SSID or %NULL if not yet available
581 * @ssid_len: Length of ssid in octets
582 * @auth_type: Authentication type (algorithm)
583 * @ie: Extra IEs to add to Authentication frame or %NULL
584 * @ie_len: Length of ie buffer in octets
585 */
586struct cfg80211_auth_request {
587 struct ieee80211_channel *chan;
588 u8 *peer_addr;
589 const u8 *ssid;
590 size_t ssid_len;
591 enum nl80211_auth_type auth_type;
592 const u8 *ie;
593 size_t ie_len;
594};
595
596/**
597 * struct cfg80211_assoc_request - (Re)Association request data
598 *
599 * This structure provides information needed to complete IEEE 802.11
600 * (re)association.
601 * NOTE: This structure will likely change when more code from mac80211 is
602 * moved into cfg80211 so that non-mac80211 drivers can benefit from it, too.
603 * Before using this in a driver that does not use mac80211, it would be better
604 * to check the status of that work and better yet, volunteer to work on it.
605 *
606 * @chan: The channel to use or %NULL if not specified (auto-select based on
607 * scan results)
608 * @peer_addr: The address of the peer STA (AP BSSID); this field is required
609 * to be present and the STA must be in State 2 (authenticated) with the
610 * peer STA
611 * @ssid: SSID
612 * @ssid_len: Length of ssid in octets
613 * @ie: Extra IEs to add to (Re)Association Request frame or %NULL
614 * @ie_len: Length of ie buffer in octets
615 */
616struct cfg80211_assoc_request {
617 struct ieee80211_channel *chan;
618 u8 *peer_addr;
619 const u8 *ssid;
620 size_t ssid_len;
621 const u8 *ie;
622 size_t ie_len;
623};
624
625/**
626 * struct cfg80211_deauth_request - Deauthentication request data
627 *
628 * This structure provides information needed to complete IEEE 802.11
629 * deauthentication.
630 *
631 * @peer_addr: The address of the peer STA (AP BSSID); this field is required
632 * to be present and the STA must be authenticated with the peer STA
633 * @ie: Extra IEs to add to Deauthentication frame or %NULL
634 * @ie_len: Length of ie buffer in octets
635 */
636struct cfg80211_deauth_request {
637 u8 *peer_addr;
638 u16 reason_code;
639 const u8 *ie;
640 size_t ie_len;
641};
642
643/**
644 * struct cfg80211_disassoc_request - Disassociation request data
645 *
646 * This structure provides information needed to complete IEEE 802.11
647 * disassocation.
648 *
649 * @peer_addr: The address of the peer STA (AP BSSID); this field is required
650 * to be present and the STA must be associated with the peer STA
651 * @ie: Extra IEs to add to Disassociation frame or %NULL
652 * @ie_len: Length of ie buffer in octets
653 */
654struct cfg80211_disassoc_request {
655 u8 *peer_addr;
656 u16 reason_code;
657 const u8 *ie;
658 size_t ie_len;
659};
660
661/**
582 * struct cfg80211_ops - backend description for wireless configuration 662 * struct cfg80211_ops - backend description for wireless configuration
583 * 663 *
584 * This struct is registered by fullmac card drivers and/or wireless stacks 664 * This struct is registered by fullmac card drivers and/or wireless stacks
@@ -644,12 +724,15 @@ struct cfg80211_bss {
644 * 724 *
645 * @set_channel: Set channel 725 * @set_channel: Set channel
646 * 726 *
647 * @set_mgmt_extra_ie: Set extra IE data for management frames
648 *
649 * @scan: Request to do a scan. If returning zero, the scan request is given 727 * @scan: Request to do a scan. If returning zero, the scan request is given
650 * the driver, and will be valid until passed to cfg80211_scan_done(). 728 * the driver, and will be valid until passed to cfg80211_scan_done().
651 * For scan results, call cfg80211_inform_bss(); you can call this outside 729 * For scan results, call cfg80211_inform_bss(); you can call this outside
652 * the scan/scan_done bracket too. 730 * the scan/scan_done bracket too.
731 *
732 * @auth: Request to authenticate with the specified peer
733 * @assoc: Request to (re)associate with the specified peer
734 * @deauth: Request to deauthenticate from the specified peer
735 * @disassoc: Request to disassociate from the specified peer
653 */ 736 */
654struct cfg80211_ops { 737struct cfg80211_ops {
655 int (*suspend)(struct wiphy *wiphy); 738 int (*suspend)(struct wiphy *wiphy);
@@ -724,12 +807,17 @@ struct cfg80211_ops {
724 struct ieee80211_channel *chan, 807 struct ieee80211_channel *chan,
725 enum nl80211_channel_type channel_type); 808 enum nl80211_channel_type channel_type);
726 809
727 int (*set_mgmt_extra_ie)(struct wiphy *wiphy,
728 struct net_device *dev,
729 struct mgmt_extra_ie_params *params);
730
731 int (*scan)(struct wiphy *wiphy, struct net_device *dev, 810 int (*scan)(struct wiphy *wiphy, struct net_device *dev,
732 struct cfg80211_scan_request *request); 811 struct cfg80211_scan_request *request);
812
813 int (*auth)(struct wiphy *wiphy, struct net_device *dev,
814 struct cfg80211_auth_request *req);
815 int (*assoc)(struct wiphy *wiphy, struct net_device *dev,
816 struct cfg80211_assoc_request *req);
817 int (*deauth)(struct wiphy *wiphy, struct net_device *dev,
818 struct cfg80211_deauth_request *req);
819 int (*disassoc)(struct wiphy *wiphy, struct net_device *dev,
820 struct cfg80211_disassoc_request *req);
733}; 821};
734 822
735/* temporary wext handlers */ 823/* temporary wext handlers */
@@ -807,4 +895,67 @@ void cfg80211_put_bss(struct cfg80211_bss *bss);
807 */ 895 */
808void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *bss); 896void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *bss);
809 897
898/**
899 * cfg80211_send_rx_auth - notification of processed authentication
900 * @dev: network device
901 * @buf: authentication frame (header + body)
902 * @len: length of the frame data
903 *
904 * This function is called whenever an authentication has been processed in
905 * station mode.
906 */
907void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len);
908
909/**
910 * cfg80211_send_rx_assoc - notification of processed association
911 * @dev: network device
912 * @buf: (re)association response frame (header + body)
913 * @len: length of the frame data
914 *
915 * This function is called whenever a (re)association response has been
916 * processed in station mode.
917 */
918void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len);
919
920/**
921 * cfg80211_send_rx_deauth - notification of processed deauthentication
922 * @dev: network device
923 * @buf: deauthentication frame (header + body)
924 * @len: length of the frame data
925 *
926 * This function is called whenever deauthentication has been processed in
927 * station mode.
928 */
929void cfg80211_send_rx_deauth(struct net_device *dev, const u8 *buf,
930 size_t len);
931
932/**
933 * cfg80211_send_rx_disassoc - notification of processed disassociation
934 * @dev: network device
935 * @buf: disassociation response frame (header + body)
936 * @len: length of the frame data
937 *
938 * This function is called whenever disassociation has been processed in
939 * station mode.
940 */
941void cfg80211_send_rx_disassoc(struct net_device *dev, const u8 *buf,
942 size_t len);
943
944/**
945 * cfg80211_hold_bss - exclude bss from expiration
946 * @bss: bss which should not expire
947 *
948 * In a case when the BSS is not updated but it shouldn't expire this
949 * function can be used to mark the BSS to be excluded from expiration.
950 */
951void cfg80211_hold_bss(struct cfg80211_bss *bss);
952
953/**
954 * cfg80211_unhold_bss - remove expiration exception from the BSS
955 * @bss: bss which can expire again
956 *
957 * This function marks the BSS to be expirable again.
958 */
959void cfg80211_unhold_bss(struct cfg80211_bss *bss);
960
810#endif /* __NET_CFG80211_H */ 961#endif /* __NET_CFG80211_H */
diff --git a/include/net/ethoc.h b/include/net/ethoc.h
new file mode 100644
index 000000000000..96f3789b27bc
--- /dev/null
+++ b/include/net/ethoc.h
@@ -0,0 +1,22 @@
1/*
2 * linux/include/net/ethoc.h
3 *
4 * Copyright (C) 2008-2009 Avionic Design GmbH
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Written by Thierry Reding <thierry.reding@avionic-design.de>
11 */
12
13#ifndef LINUX_NET_ETHOC_H
14#define LINUX_NET_ETHOC_H 1
15
16struct ethoc_platform_data {
17 u8 hwaddr[IFHWADDRLEN];
18 s8 phy_id;
19};
20
21#endif /* !LINUX_NET_ETHOC_H */
22
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index 384698cb773a..23c3f3d97779 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -230,8 +230,10 @@ enum ieee80211_radiotap_type {
230 * 802.11 header and payload 230 * 802.11 header and payload
231 * (to 32-bit boundary) 231 * (to 32-bit boundary)
232 */ 232 */
233#define IEEE80211_RADIOTAP_F_BADFCS 0x40 /* bad FCS */
234
233/* For IEEE80211_RADIOTAP_RX_FLAGS */ 235/* For IEEE80211_RADIOTAP_RX_FLAGS */
234#define IEEE80211_RADIOTAP_F_RX_BADFCS 0x0001 /* frame failed crc check */ 236#define IEEE80211_RADIOTAP_F_RX_BADPLCP 0x0002 /* frame has bad PLCP */
235 237
236/* For IEEE80211_RADIOTAP_TX_FLAGS */ 238/* For IEEE80211_RADIOTAP_TX_FLAGS */
237#define IEEE80211_RADIOTAP_F_TX_FAIL 0x0001 /* failed due to excessive 239#define IEEE80211_RADIOTAP_F_TX_FAIL 0x0001 /* failed due to excessive
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 12a52efcd0d1..3b83a80e3fe0 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -93,12 +93,9 @@ struct ieee80211_ht_bss_info {
93 * enum ieee80211_max_queues - maximum number of queues 93 * enum ieee80211_max_queues - maximum number of queues
94 * 94 *
95 * @IEEE80211_MAX_QUEUES: Maximum number of regular device queues. 95 * @IEEE80211_MAX_QUEUES: Maximum number of regular device queues.
96 * @IEEE80211_MAX_AMPDU_QUEUES: Maximum number of queues usable
97 * for A-MPDU operation.
98 */ 96 */
99enum ieee80211_max_queues { 97enum ieee80211_max_queues {
100 IEEE80211_MAX_QUEUES = 16, 98 IEEE80211_MAX_QUEUES = 4,
101 IEEE80211_MAX_AMPDU_QUEUES = 16,
102}; 99};
103 100
104/** 101/**
@@ -245,6 +242,12 @@ struct ieee80211_bss_conf {
245 * @IEEE80211_TX_CTL_RATE_CTRL_PROBE: internal to mac80211, can be 242 * @IEEE80211_TX_CTL_RATE_CTRL_PROBE: internal to mac80211, can be
246 * set by rate control algorithms to indicate probe rate, will 243 * set by rate control algorithms to indicate probe rate, will
247 * be cleared for fragmented frames (except on the last fragment) 244 * be cleared for fragmented frames (except on the last fragment)
245 * @IEEE80211_TX_INTFL_RCALGO: mac80211 internal flag, do not test or
246 * set this flag in the driver; indicates that the rate control
247 * algorithm was used and should be notified of TX status
248 * @IEEE80211_TX_INTFL_NEED_TXPROCESSING: completely internal to mac80211,
249 * used to indicate that a pending frame requires TX processing before
250 * it can be sent out.
248 */ 251 */
249enum mac80211_tx_control_flags { 252enum mac80211_tx_control_flags {
250 IEEE80211_TX_CTL_REQ_TX_STATUS = BIT(0), 253 IEEE80211_TX_CTL_REQ_TX_STATUS = BIT(0),
@@ -260,6 +263,8 @@ enum mac80211_tx_control_flags {
260 IEEE80211_TX_STAT_AMPDU = BIT(10), 263 IEEE80211_TX_STAT_AMPDU = BIT(10),
261 IEEE80211_TX_STAT_AMPDU_NO_BACK = BIT(11), 264 IEEE80211_TX_STAT_AMPDU_NO_BACK = BIT(11),
262 IEEE80211_TX_CTL_RATE_CTRL_PROBE = BIT(12), 265 IEEE80211_TX_CTL_RATE_CTRL_PROBE = BIT(12),
266 IEEE80211_TX_INTFL_RCALGO = BIT(13),
267 IEEE80211_TX_INTFL_NEED_TXPROCESSING = BIT(14),
263}; 268};
264 269
265/** 270/**
@@ -520,12 +525,6 @@ enum ieee80211_conf_flags {
520 IEEE80211_CONF_PS = (1<<1), 525 IEEE80211_CONF_PS = (1<<1),
521}; 526};
522 527
523/* XXX: remove all this once drivers stop trying to use it */
524static inline int __deprecated __IEEE80211_CONF_SHORT_SLOT_TIME(void)
525{
526 return 0;
527}
528#define IEEE80211_CONF_SHORT_SLOT_TIME (__IEEE80211_CONF_SHORT_SLOT_TIME())
529 528
530/** 529/**
531 * enum ieee80211_conf_changed - denotes which configuration changed 530 * enum ieee80211_conf_changed - denotes which configuration changed
@@ -888,6 +887,10 @@ enum ieee80211_tkip_key_type {
888 * 887 *
889 * @IEEE80211_HW_MFP_CAPABLE: 888 * @IEEE80211_HW_MFP_CAPABLE:
890 * Hardware supports management frame protection (MFP, IEEE 802.11w). 889 * Hardware supports management frame protection (MFP, IEEE 802.11w).
890 *
891 * @IEEE80211_HW_BEACON_FILTER:
892 * Hardware supports dropping of irrelevant beacon frames to
893 * avoid waking up cpu.
891 */ 894 */
892enum ieee80211_hw_flags { 895enum ieee80211_hw_flags {
893 IEEE80211_HW_RX_INCLUDES_FCS = 1<<1, 896 IEEE80211_HW_RX_INCLUDES_FCS = 1<<1,
@@ -903,6 +906,7 @@ enum ieee80211_hw_flags {
903 IEEE80211_HW_PS_NULLFUNC_STACK = 1<<11, 906 IEEE80211_HW_PS_NULLFUNC_STACK = 1<<11,
904 IEEE80211_HW_SUPPORTS_DYNAMIC_PS = 1<<12, 907 IEEE80211_HW_SUPPORTS_DYNAMIC_PS = 1<<12,
905 IEEE80211_HW_MFP_CAPABLE = 1<<13, 908 IEEE80211_HW_MFP_CAPABLE = 1<<13,
909 IEEE80211_HW_BEACON_FILTER = 1<<14,
906}; 910};
907 911
908/** 912/**
@@ -945,12 +949,6 @@ enum ieee80211_hw_flags {
945 * data packets. WMM/QoS requires at least four, these 949 * data packets. WMM/QoS requires at least four, these
946 * queues need to have configurable access parameters. 950 * queues need to have configurable access parameters.
947 * 951 *
948 * @ampdu_queues: number of available hardware transmit queues
949 * for A-MPDU packets, these have no access parameters
950 * because they're used only for A-MPDU frames. Note that
951 * mac80211 will not currently use any of the regular queues
952 * for aggregation.
953 *
954 * @rate_control_algorithm: rate control algorithm for this hardware. 952 * @rate_control_algorithm: rate control algorithm for this hardware.
955 * If unset (NULL), the default algorithm will be used. Must be 953 * If unset (NULL), the default algorithm will be used. Must be
956 * set before calling ieee80211_register_hw(). 954 * set before calling ieee80211_register_hw().
@@ -975,7 +973,6 @@ struct ieee80211_hw {
975 int vif_data_size; 973 int vif_data_size;
976 int sta_data_size; 974 int sta_data_size;
977 u16 queues; 975 u16 queues;
978 u16 ampdu_queues;
979 u16 max_listen_interval; 976 u16 max_listen_interval;
980 s8 max_signal; 977 s8 max_signal;
981 u8 max_rates; 978 u8 max_rates;
@@ -1017,11 +1014,6 @@ static inline void SET_IEEE80211_PERM_ADDR(struct ieee80211_hw *hw, u8 *addr)
1017 memcpy(hw->wiphy->perm_addr, addr, ETH_ALEN); 1014 memcpy(hw->wiphy->perm_addr, addr, ETH_ALEN);
1018} 1015}
1019 1016
1020static inline int ieee80211_num_regular_queues(struct ieee80211_hw *hw)
1021{
1022 return hw->queues;
1023}
1024
1025static inline struct ieee80211_rate * 1017static inline struct ieee80211_rate *
1026ieee80211_get_tx_rate(const struct ieee80211_hw *hw, 1018ieee80211_get_tx_rate(const struct ieee80211_hw *hw,
1027 const struct ieee80211_tx_info *c) 1019 const struct ieee80211_tx_info *c)
@@ -1132,6 +1124,24 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw,
1132 */ 1124 */
1133 1125
1134/** 1126/**
1127 * DOC: Beacon filter support
1128 *
1129 * Some hardware have beacon filter support to reduce host cpu wakeups
1130 * which will reduce system power consumption. It usuallly works so that
1131 * the firmware creates a checksum of the beacon but omits all constantly
1132 * changing elements (TSF, TIM etc). Whenever the checksum changes the
1133 * beacon is forwarded to the host, otherwise it will be just dropped. That
1134 * way the host will only receive beacons where some relevant information
1135 * (for example ERP protection or WMM settings) have changed.
1136 *
1137 * Beacon filter support is informed with %IEEE80211_HW_BEACON_FILTER flag.
1138 * The driver needs to enable beacon filter support whenever power save is
1139 * enabled, that is %IEEE80211_CONF_PS is set. When power save is enabled,
1140 * the stack will not check for beacon miss at all and the driver needs to
1141 * notify about complete loss of beacons with ieee80211_beacon_loss().
1142 */
1143
1144/**
1135 * DOC: Frame filtering 1145 * DOC: Frame filtering
1136 * 1146 *
1137 * mac80211 requires to see many management frames for proper 1147 * mac80211 requires to see many management frames for proper
@@ -1220,14 +1230,14 @@ enum ieee80211_filter_flags {
1220 * @IEEE80211_AMPDU_RX_STOP: stop Rx aggregation 1230 * @IEEE80211_AMPDU_RX_STOP: stop Rx aggregation
1221 * @IEEE80211_AMPDU_TX_START: start Tx aggregation 1231 * @IEEE80211_AMPDU_TX_START: start Tx aggregation
1222 * @IEEE80211_AMPDU_TX_STOP: stop Tx aggregation 1232 * @IEEE80211_AMPDU_TX_STOP: stop Tx aggregation
1223 * @IEEE80211_AMPDU_TX_RESUME: resume TX aggregation 1233 * @IEEE80211_AMPDU_TX_OPERATIONAL: TX aggregation has become operational
1224 */ 1234 */
1225enum ieee80211_ampdu_mlme_action { 1235enum ieee80211_ampdu_mlme_action {
1226 IEEE80211_AMPDU_RX_START, 1236 IEEE80211_AMPDU_RX_START,
1227 IEEE80211_AMPDU_RX_STOP, 1237 IEEE80211_AMPDU_RX_STOP,
1228 IEEE80211_AMPDU_TX_START, 1238 IEEE80211_AMPDU_TX_START,
1229 IEEE80211_AMPDU_TX_STOP, 1239 IEEE80211_AMPDU_TX_STOP,
1230 IEEE80211_AMPDU_TX_RESUME, 1240 IEEE80211_AMPDU_TX_OPERATIONAL,
1231}; 1241};
1232 1242
1233/** 1243/**
@@ -1318,11 +1328,13 @@ enum ieee80211_ampdu_mlme_action {
1318 * 1328 *
1319 * @hw_scan: Ask the hardware to service the scan request, no need to start 1329 * @hw_scan: Ask the hardware to service the scan request, no need to start
1320 * the scan state machine in stack. The scan must honour the channel 1330 * the scan state machine in stack. The scan must honour the channel
1321 * configuration done by the regulatory agent in the wiphy's registered 1331 * configuration done by the regulatory agent in the wiphy's
1322 * bands. When the scan finishes, ieee80211_scan_completed() must be 1332 * registered bands. The hardware (or the driver) needs to make sure
1323 * called; note that it also must be called when the scan cannot finish 1333 * that power save is disabled. When the scan finishes,
1324 * because the hardware is turned off! Anything else is a bug! 1334 * ieee80211_scan_completed() must be called; note that it also must
1325 * Returns a negative error code which will be seen in userspace. 1335 * be called when the scan cannot finish because the hardware is
1336 * turned off! Anything else is a bug! Returns a negative error code
1337 * which will be seen in userspace.
1326 * 1338 *
1327 * @sw_scan_start: Notifier function that is called just before a software scan 1339 * @sw_scan_start: Notifier function that is called just before a software scan
1328 * is started. Can be NULL, if the driver doesn't need this notification. 1340 * is started. Can be NULL, if the driver doesn't need this notification.
@@ -1350,8 +1362,8 @@ enum ieee80211_ampdu_mlme_action {
1350 * @get_tx_stats: Get statistics of the current TX queue status. This is used 1362 * @get_tx_stats: Get statistics of the current TX queue status. This is used
1351 * to get number of currently queued packets (queue length), maximum queue 1363 * to get number of currently queued packets (queue length), maximum queue
1352 * size (limit), and total number of packets sent using each TX queue 1364 * size (limit), and total number of packets sent using each TX queue
1353 * (count). The 'stats' pointer points to an array that has hw->queues + 1365 * (count). The 'stats' pointer points to an array that has hw->queues
1354 * hw->ampdu_queues items. 1366 * items.
1355 * 1367 *
1356 * @get_tsf: Get the current TSF timer value from firmware/hardware. Currently, 1368 * @get_tsf: Get the current TSF timer value from firmware/hardware. Currently,
1357 * this is only used for IBSS mode BSSID merging and debugging. Is not a 1369 * this is only used for IBSS mode BSSID merging and debugging. Is not a
@@ -1979,6 +1991,16 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw, const u8 *ra,
1979struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_hw *hw, 1991struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_hw *hw,
1980 const u8 *addr); 1992 const u8 *addr);
1981 1993
1994/**
1995 * ieee80211_beacon_loss - inform hardware does not receive beacons
1996 *
1997 * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf.
1998 *
1999 * When beacon filtering is enabled with IEEE80211_HW_BEACON_FILTERING and
2000 * IEEE80211_CONF_PS is set, the driver needs to inform whenever the
2001 * hardware is not receiving beacons with this function.
2002 */
2003void ieee80211_beacon_loss(struct ieee80211_vif *vif);
1982 2004
1983/* Rate control API */ 2005/* Rate control API */
1984 2006
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 4dfb793c3f15..6c3f964de9e1 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -91,8 +91,7 @@ struct nf_conn_help {
91#include <net/netfilter/ipv4/nf_conntrack_ipv4.h> 91#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
92#include <net/netfilter/ipv6/nf_conntrack_ipv6.h> 92#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
93 93
94struct nf_conn 94struct nf_conn {
95{
96 /* Usage count in here is 1 for hash table/destruct timer, 1 per skb, 95 /* Usage count in here is 1 for hash table/destruct timer, 1 per skb,
97 plus 1 for any connection(s) we are `master' for */ 96 plus 1 for any connection(s) we are `master' for */
98 struct nf_conntrack ct_general; 97 struct nf_conntrack ct_general;
@@ -126,7 +125,6 @@ struct nf_conn
126#ifdef CONFIG_NET_NS 125#ifdef CONFIG_NET_NS
127 struct net *ct_net; 126 struct net *ct_net;
128#endif 127#endif
129 struct rcu_head rcu;
130}; 128};
131 129
132static inline struct nf_conn * 130static inline struct nf_conn *
@@ -190,9 +188,13 @@ static inline void nf_ct_put(struct nf_conn *ct)
190extern int nf_ct_l3proto_try_module_get(unsigned short l3proto); 188extern int nf_ct_l3proto_try_module_get(unsigned short l3proto);
191extern void nf_ct_l3proto_module_put(unsigned short l3proto); 189extern void nf_ct_l3proto_module_put(unsigned short l3proto);
192 190
193extern struct hlist_head *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced); 191/*
194extern void nf_ct_free_hashtable(struct hlist_head *hash, int vmalloced, 192 * Allocate a hashtable of hlist_head (if nulls == 0),
195 unsigned int size); 193 * or hlist_nulls_head (if nulls == 1)
194 */
195extern void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls);
196
197extern void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size);
196 198
197extern struct nf_conntrack_tuple_hash * 199extern struct nf_conntrack_tuple_hash *
198__nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple); 200__nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple);
diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h
index 66d65a7caa39..ee2a4b369a04 100644
--- a/include/net/netfilter/nf_conntrack_helper.h
+++ b/include/net/netfilter/nf_conntrack_helper.h
@@ -14,6 +14,8 @@
14 14
15struct module; 15struct module;
16 16
17#define NF_CT_HELPER_NAME_LEN 16
18
17struct nf_conntrack_helper 19struct nf_conntrack_helper
18{ 20{
19 struct hlist_node hnode; /* Internal use. */ 21 struct hlist_node hnode; /* Internal use. */
diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h
index 0378676c3dd8..9f99d36d5de9 100644
--- a/include/net/netfilter/nf_conntrack_l3proto.h
+++ b/include/net/netfilter/nf_conntrack_l3proto.h
@@ -53,10 +53,17 @@ struct nf_conntrack_l3proto
53 int (*tuple_to_nlattr)(struct sk_buff *skb, 53 int (*tuple_to_nlattr)(struct sk_buff *skb,
54 const struct nf_conntrack_tuple *t); 54 const struct nf_conntrack_tuple *t);
55 55
56 /*
57 * Calculate size of tuple nlattr
58 */
59 int (*nlattr_tuple_size)(void);
60
56 int (*nlattr_to_tuple)(struct nlattr *tb[], 61 int (*nlattr_to_tuple)(struct nlattr *tb[],
57 struct nf_conntrack_tuple *t); 62 struct nf_conntrack_tuple *t);
58 const struct nla_policy *nla_policy; 63 const struct nla_policy *nla_policy;
59 64
65 size_t nla_size;
66
60#ifdef CONFIG_SYSCTL 67#ifdef CONFIG_SYSCTL
61 struct ctl_table_header *ctl_table_header; 68 struct ctl_table_header *ctl_table_header;
62 struct ctl_path *ctl_table_path; 69 struct ctl_path *ctl_table_path;
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index b01070bf2f84..ba32ed7bdabe 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -64,16 +64,22 @@ struct nf_conntrack_l4proto
64 /* convert protoinfo to nfnetink attributes */ 64 /* convert protoinfo to nfnetink attributes */
65 int (*to_nlattr)(struct sk_buff *skb, struct nlattr *nla, 65 int (*to_nlattr)(struct sk_buff *skb, struct nlattr *nla,
66 const struct nf_conn *ct); 66 const struct nf_conn *ct);
67 /* Calculate protoinfo nlattr size */
68 int (*nlattr_size)(void);
67 69
68 /* convert nfnetlink attributes to protoinfo */ 70 /* convert nfnetlink attributes to protoinfo */
69 int (*from_nlattr)(struct nlattr *tb[], struct nf_conn *ct); 71 int (*from_nlattr)(struct nlattr *tb[], struct nf_conn *ct);
70 72
71 int (*tuple_to_nlattr)(struct sk_buff *skb, 73 int (*tuple_to_nlattr)(struct sk_buff *skb,
72 const struct nf_conntrack_tuple *t); 74 const struct nf_conntrack_tuple *t);
75 /* Calculate tuple nlattr size */
76 int (*nlattr_tuple_size)(void);
73 int (*nlattr_to_tuple)(struct nlattr *tb[], 77 int (*nlattr_to_tuple)(struct nlattr *tb[],
74 struct nf_conntrack_tuple *t); 78 struct nf_conntrack_tuple *t);
75 const struct nla_policy *nla_policy; 79 const struct nla_policy *nla_policy;
76 80
81 size_t nla_size;
82
77#ifdef CONFIG_SYSCTL 83#ifdef CONFIG_SYSCTL
78 struct ctl_table_header **ctl_table_header; 84 struct ctl_table_header **ctl_table_header;
79 struct ctl_table *ctl_table; 85 struct ctl_table *ctl_table;
@@ -107,6 +113,7 @@ extern int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
107 const struct nf_conntrack_tuple *tuple); 113 const struct nf_conntrack_tuple *tuple);
108extern int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[], 114extern int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
109 struct nf_conntrack_tuple *t); 115 struct nf_conntrack_tuple *t);
116extern int nf_ct_port_nlattr_tuple_size(void);
110extern const struct nla_policy nf_ct_port_nla_policy[]; 117extern const struct nla_policy nf_ct_port_nla_policy[];
111 118
112#ifdef CONFIG_SYSCTL 119#ifdef CONFIG_SYSCTL
diff --git a/include/net/netfilter/nf_conntrack_tuple.h b/include/net/netfilter/nf_conntrack_tuple.h
index f2f6aa73dc10..2628c154d40e 100644
--- a/include/net/netfilter/nf_conntrack_tuple.h
+++ b/include/net/netfilter/nf_conntrack_tuple.h
@@ -12,6 +12,7 @@
12 12
13#include <linux/netfilter/x_tables.h> 13#include <linux/netfilter/x_tables.h>
14#include <linux/netfilter/nf_conntrack_tuple_common.h> 14#include <linux/netfilter/nf_conntrack_tuple_common.h>
15#include <linux/list_nulls.h>
15 16
16/* A `tuple' is a structure containing the information to uniquely 17/* A `tuple' is a structure containing the information to uniquely
17 identify a connection. ie. if two packets have the same tuple, they 18 identify a connection. ie. if two packets have the same tuple, they
@@ -146,9 +147,8 @@ static inline void nf_ct_dump_tuple(const struct nf_conntrack_tuple *t)
146 ((enum ip_conntrack_dir)(h)->tuple.dst.dir) 147 ((enum ip_conntrack_dir)(h)->tuple.dst.dir)
147 148
148/* Connections have two entries in the hash table: one for each way */ 149/* Connections have two entries in the hash table: one for each way */
149struct nf_conntrack_tuple_hash 150struct nf_conntrack_tuple_hash {
150{ 151 struct hlist_nulls_node hnnode;
151 struct hlist_node hnode;
152 struct nf_conntrack_tuple tuple; 152 struct nf_conntrack_tuple tuple;
153}; 153};
154 154
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 8a6150a3f4c7..eddb50289d6d 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -230,6 +230,7 @@ extern int nla_validate(struct nlattr *head, int len, int maxtype,
230extern int nla_parse(struct nlattr *tb[], int maxtype, 230extern int nla_parse(struct nlattr *tb[], int maxtype,
231 struct nlattr *head, int len, 231 struct nlattr *head, int len,
232 const struct nla_policy *policy); 232 const struct nla_policy *policy);
233extern int nla_policy_len(const struct nla_policy *, int);
233extern struct nlattr * nla_find(struct nlattr *head, int len, int attrtype); 234extern struct nlattr * nla_find(struct nlattr *head, int len, int attrtype);
234extern size_t nla_strlcpy(char *dst, const struct nlattr *nla, 235extern size_t nla_strlcpy(char *dst, const struct nlattr *nla,
235 size_t dstsize); 236 size_t dstsize);
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index f4498a62881b..9dc58402bc09 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -2,6 +2,7 @@
2#define __NETNS_CONNTRACK_H 2#define __NETNS_CONNTRACK_H
3 3
4#include <linux/list.h> 4#include <linux/list.h>
5#include <linux/list_nulls.h>
5#include <asm/atomic.h> 6#include <asm/atomic.h>
6 7
7struct ctl_table_header; 8struct ctl_table_header;
@@ -10,9 +11,9 @@ struct nf_conntrack_ecache;
10struct netns_ct { 11struct netns_ct {
11 atomic_t count; 12 atomic_t count;
12 unsigned int expect_count; 13 unsigned int expect_count;
13 struct hlist_head *hash; 14 struct hlist_nulls_head *hash;
14 struct hlist_head *expect_hash; 15 struct hlist_head *expect_hash;
15 struct hlist_head unconfirmed; 16 struct hlist_nulls_head unconfirmed;
16 struct ip_conntrack_stat *stat; 17 struct ip_conntrack_stat *stat;
17#ifdef CONFIG_NF_CONNTRACK_EVENTS 18#ifdef CONFIG_NF_CONNTRACK_EVENTS
18 struct nf_conntrack_ecache *ecache; 19 struct nf_conntrack_ecache *ecache;
diff --git a/include/scsi/fc/fc_fcoe.h b/include/scsi/fc/fc_fcoe.h
index f271d9cc0fc2..ccb3dbe90463 100644
--- a/include/scsi/fc/fc_fcoe.h
+++ b/include/scsi/fc/fc_fcoe.h
@@ -25,13 +25,6 @@
25 */ 25 */
26 26
27/* 27/*
28 * The FCoE ethertype eventually goes in net/if_ether.h.
29 */
30#ifndef ETH_P_FCOE
31#define ETH_P_FCOE 0x8906 /* FCOE ether type */
32#endif
33
34/*
35 * FC_FCOE_OUI hasn't been standardized yet. XXX TBD. 28 * FC_FCOE_OUI hasn't been standardized yet. XXX TBD.
36 */ 29 */
37#ifndef FC_FCOE_OUI 30#ifndef FC_FCOE_OUI
diff --git a/include/scsi/fc_frame.h b/include/scsi/fc_frame.h
index 04d34a71355f..59511057cee0 100644
--- a/include/scsi/fc_frame.h
+++ b/include/scsi/fc_frame.h
@@ -54,8 +54,7 @@
54#define fr_eof(fp) (fr_cb(fp)->fr_eof) 54#define fr_eof(fp) (fr_cb(fp)->fr_eof)
55#define fr_flags(fp) (fr_cb(fp)->fr_flags) 55#define fr_flags(fp) (fr_cb(fp)->fr_flags)
56#define fr_max_payload(fp) (fr_cb(fp)->fr_max_payload) 56#define fr_max_payload(fp) (fr_cb(fp)->fr_max_payload)
57#define fr_cmd(fp) (fr_cb(fp)->fr_cmd) 57#define fr_fsp(fp) (fr_cb(fp)->fr_fsp)
58#define fr_dir(fp) (fr_cmd(fp)->sc_data_direction)
59#define fr_crc(fp) (fr_cb(fp)->fr_crc) 58#define fr_crc(fp) (fr_cb(fp)->fr_crc)
60 59
61struct fc_frame { 60struct fc_frame {
@@ -66,7 +65,7 @@ struct fcoe_rcv_info {
66 struct packet_type *ptype; 65 struct packet_type *ptype;
67 struct fc_lport *fr_dev; /* transport layer private pointer */ 66 struct fc_lport *fr_dev; /* transport layer private pointer */
68 struct fc_seq *fr_seq; /* for use with exchange manager */ 67 struct fc_seq *fr_seq; /* for use with exchange manager */
69 struct scsi_cmnd *fr_cmd; /* for use of scsi command */ 68 struct fc_fcp_pkt *fr_fsp; /* for the corresponding fcp I/O */
70 u32 fr_crc; 69 u32 fr_crc;
71 u16 fr_max_payload; /* max FC payload */ 70 u16 fr_max_payload; /* max FC payload */
72 enum fc_sof fr_sof; /* start of frame delimiter */ 71 enum fc_sof fr_sof; /* start of frame delimiter */
@@ -218,20 +217,6 @@ static inline bool fc_frame_is_cmd(const struct fc_frame *fp)
218 return fc_frame_rctl(fp) == FC_RCTL_DD_UNSOL_CMD; 217 return fc_frame_rctl(fp) == FC_RCTL_DD_UNSOL_CMD;
219} 218}
220 219
221static inline bool fc_frame_is_read(const struct fc_frame *fp)
222{
223 if (fc_frame_is_cmd(fp) && fr_cmd(fp))
224 return fr_dir(fp) == DMA_FROM_DEVICE;
225 return false;
226}
227
228static inline bool fc_frame_is_write(const struct fc_frame *fp)
229{
230 if (fc_frame_is_cmd(fp) && fr_cmd(fp))
231 return fr_dir(fp) == DMA_TO_DEVICE;
232 return false;
233}
234
235/* 220/*
236 * Check for leaks. 221 * Check for leaks.
237 * Print the frame header of any currently allocated frame, assuming there 222 * Print the frame header of any currently allocated frame, assuming there
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index a2e126b86e3e..a70eafaad084 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -245,6 +245,7 @@ struct fc_fcp_pkt {
245 */ 245 */
246 struct fcp_cmnd cdb_cmd; 246 struct fcp_cmnd cdb_cmd;
247 size_t xfer_len; 247 size_t xfer_len;
248 u16 xfer_ddp; /* this xfer is ddped */
248 u32 xfer_contig_end; /* offset of end of contiguous xfer */ 249 u32 xfer_contig_end; /* offset of end of contiguous xfer */
249 u16 max_payload; /* max payload size in bytes */ 250 u16 max_payload; /* max payload size in bytes */
250 251
@@ -267,6 +268,15 @@ struct fc_fcp_pkt {
267 u8 recov_retry; /* count of recovery retries */ 268 u8 recov_retry; /* count of recovery retries */
268 struct fc_seq *recov_seq; /* sequence for REC or SRR */ 269 struct fc_seq *recov_seq; /* sequence for REC or SRR */
269}; 270};
271/*
272 * FC_FCP HELPER FUNCTIONS
273 *****************************/
274static inline bool fc_fcp_is_read(const struct fc_fcp_pkt *fsp)
275{
276 if (fsp && fsp->cmd)
277 return fsp->cmd->sc_data_direction == DMA_FROM_DEVICE;
278 return false;
279}
270 280
271/* 281/*
272 * Structure and function definitions for managing Fibre Channel Exchanges 282 * Structure and function definitions for managing Fibre Channel Exchanges
@@ -400,6 +410,21 @@ struct libfc_function_template {
400 void *arg, unsigned int timer_msec); 410 void *arg, unsigned int timer_msec);
401 411
402 /* 412 /*
413 * Sets up the DDP context for a given exchange id on the given
414 * scatterlist if LLD supports DDP for large receive.
415 *
416 * STATUS: OPTIONAL
417 */
418 int (*ddp_setup)(struct fc_lport *lp, u16 xid,
419 struct scatterlist *sgl, unsigned int sgc);
420 /*
421 * Completes the DDP transfer and returns the length of data DDPed
422 * for the given exchange id.
423 *
424 * STATUS: OPTIONAL
425 */
426 int (*ddp_done)(struct fc_lport *lp, u16 xid);
427 /*
403 * Send a frame using an existing sequence and exchange. 428 * Send a frame using an existing sequence and exchange.
404 * 429 *
405 * STATUS: OPTIONAL 430 * STATUS: OPTIONAL
@@ -654,6 +679,7 @@ struct fc_lport {
654 u16 link_speed; 679 u16 link_speed;
655 u16 link_supported_speeds; 680 u16 link_supported_speeds;
656 u16 lro_xid; /* max xid for fcoe lro */ 681 u16 lro_xid; /* max xid for fcoe lro */
682 unsigned int lso_max; /* max large send size */
657 struct fc_ns_fts fcts; /* FC-4 type masks */ 683 struct fc_ns_fts fcts; /* FC-4 type masks */
658 struct fc_els_rnid_gen rnid_gen; /* RNID information */ 684 struct fc_els_rnid_gen rnid_gen; /* RNID information */
659 685
@@ -821,6 +847,11 @@ int fc_change_queue_type(struct scsi_device *sdev, int tag_type);
821void fc_fcp_destroy(struct fc_lport *); 847void fc_fcp_destroy(struct fc_lport *);
822 848
823/* 849/*
850 * Set up direct-data placement for this I/O request
851 */
852void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid);
853
854/*
824 * ELS/CT interface 855 * ELS/CT interface
825 *****************************/ 856 *****************************/
826/* 857/*
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index 941818f29f59..c41f7d0c6efc 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -124,24 +124,6 @@ static inline u16 skb_fc_rxid(const struct sk_buff *skb)
124 return be16_to_cpu(skb_fc_header(skb)->fh_rx_id); 124 return be16_to_cpu(skb_fc_header(skb)->fh_rx_id);
125} 125}
126 126
127/* FIXME - DMA_BIDIRECTIONAL ? */
128#define skb_cb(skb) ((struct fcoe_rcv_info *)&((skb)->cb[0]))
129#define skb_cmd(skb) (skb_cb(skb)->fr_cmd)
130#define skb_dir(skb) (skb_cmd(skb)->sc_data_direction)
131static inline bool skb_fc_is_read(const struct sk_buff *skb)
132{
133 if (skb_fc_is_cmd(skb) && skb_cmd(skb))
134 return skb_dir(skb) == DMA_FROM_DEVICE;
135 return false;
136}
137
138static inline bool skb_fc_is_write(const struct sk_buff *skb)
139{
140 if (skb_fc_is_cmd(skb) && skb_cmd(skb))
141 return skb_dir(skb) == DMA_TO_DEVICE;
142 return false;
143}
144
145/* libfcoe funcs */ 127/* libfcoe funcs */
146int fcoe_reset(struct Scsi_Host *shost); 128int fcoe_reset(struct Scsi_Host *shost);
147u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN], 129u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN],
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index 7360e1916e75..7ffaed2f94dd 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -45,18 +45,10 @@ struct iscsi_session;
45struct iscsi_nopin; 45struct iscsi_nopin;
46struct device; 46struct device;
47 47
48/* #define DEBUG_SCSI */
49#ifdef DEBUG_SCSI
50#define debug_scsi(fmt...) printk(KERN_INFO "iscsi: " fmt)
51#else
52#define debug_scsi(fmt...)
53#endif
54
55#define ISCSI_DEF_XMIT_CMDS_MAX 128 /* must be power of 2 */ 48#define ISCSI_DEF_XMIT_CMDS_MAX 128 /* must be power of 2 */
56#define ISCSI_MGMT_CMDS_MAX 15 49#define ISCSI_MGMT_CMDS_MAX 15
57 50
58#define ISCSI_DEF_CMD_PER_LUN 32 51#define ISCSI_DEF_CMD_PER_LUN 32
59#define ISCSI_MAX_CMD_PER_LUN 128
60 52
61/* Task Mgmt states */ 53/* Task Mgmt states */
62enum { 54enum {
@@ -326,6 +318,9 @@ struct iscsi_host {
326 spinlock_t lock; 318 spinlock_t lock;
327 int num_sessions; 319 int num_sessions;
328 int state; 320 int state;
321
322 struct workqueue_struct *workq;
323 char workq_name[20];
329}; 324};
330 325
331/* 326/*
@@ -351,7 +346,8 @@ extern int iscsi_host_get_param(struct Scsi_Host *shost,
351 enum iscsi_host_param param, char *buf); 346 enum iscsi_host_param param, char *buf);
352extern int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev); 347extern int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev);
353extern struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht, 348extern struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
354 int dd_data_size, uint16_t qdepth); 349 int dd_data_size,
350 bool xmit_can_sleep);
355extern void iscsi_host_remove(struct Scsi_Host *shost); 351extern void iscsi_host_remove(struct Scsi_Host *shost);
356extern void iscsi_host_free(struct Scsi_Host *shost); 352extern void iscsi_host_free(struct Scsi_Host *shost);
357 353
@@ -382,11 +378,12 @@ extern void iscsi_conn_stop(struct iscsi_cls_conn *, int);
382extern int iscsi_conn_bind(struct iscsi_cls_session *, struct iscsi_cls_conn *, 378extern int iscsi_conn_bind(struct iscsi_cls_session *, struct iscsi_cls_conn *,
383 int); 379 int);
384extern void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err); 380extern void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err);
385extern void iscsi_session_failure(struct iscsi_cls_session *cls_session, 381extern void iscsi_session_failure(struct iscsi_session *session,
386 enum iscsi_err err); 382 enum iscsi_err err);
387extern int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn, 383extern int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
388 enum iscsi_param param, char *buf); 384 enum iscsi_param param, char *buf);
389extern void iscsi_suspend_tx(struct iscsi_conn *conn); 385extern void iscsi_suspend_tx(struct iscsi_conn *conn);
386extern void iscsi_conn_queue_work(struct iscsi_conn *conn);
390 387
391#define iscsi_conn_printk(prefix, _c, fmt, a...) \ 388#define iscsi_conn_printk(prefix, _c, fmt, a...) \
392 iscsi_cls_conn_printk(prefix, ((struct iscsi_conn *)_c)->cls_conn, \ 389 iscsi_cls_conn_printk(prefix, ((struct iscsi_conn *)_c)->cls_conn, \
diff --git a/include/scsi/osd_attributes.h b/include/scsi/osd_attributes.h
new file mode 100644
index 000000000000..f888a6fda073
--- /dev/null
+++ b/include/scsi/osd_attributes.h
@@ -0,0 +1,327 @@
1#ifndef __OSD_ATTRIBUTES_H__
2#define __OSD_ATTRIBUTES_H__
3
4#include "osd_protocol.h"
5
6/*
7 * Contains types and constants that define attribute pages and attribute
8 * numbers and their data types.
9 */
10
11#define ATTR_SET(pg, id, l, ptr) \
12 { .attr_page = pg, .attr_id = id, .len = l, .val_ptr = ptr }
13
14#define ATTR_DEF(pg, id, l) ATTR_SET(pg, id, l, NULL)
15
16/* osd-r10 4.7.3 Attributes pages */
17enum {
18 OSD_APAGE_OBJECT_FIRST = 0x0,
19 OSD_APAGE_OBJECT_DIRECTORY = 0,
20 OSD_APAGE_OBJECT_INFORMATION = 1,
21 OSD_APAGE_OBJECT_QUOTAS = 2,
22 OSD_APAGE_OBJECT_TIMESTAMP = 3,
23 OSD_APAGE_OBJECT_COLLECTIONS = 4,
24 OSD_APAGE_OBJECT_SECURITY = 5,
25 OSD_APAGE_OBJECT_LAST = 0x2fffffff,
26
27 OSD_APAGE_PARTITION_FIRST = 0x30000000,
28 OSD_APAGE_PARTITION_DIRECTORY = OSD_APAGE_PARTITION_FIRST + 0,
29 OSD_APAGE_PARTITION_INFORMATION = OSD_APAGE_PARTITION_FIRST + 1,
30 OSD_APAGE_PARTITION_QUOTAS = OSD_APAGE_PARTITION_FIRST + 2,
31 OSD_APAGE_PARTITION_TIMESTAMP = OSD_APAGE_PARTITION_FIRST + 3,
32 OSD_APAGE_PARTITION_SECURITY = OSD_APAGE_PARTITION_FIRST + 5,
33 OSD_APAGE_PARTITION_LAST = 0x5FFFFFFF,
34
35 OSD_APAGE_COLLECTION_FIRST = 0x60000000,
36 OSD_APAGE_COLLECTION_DIRECTORY = OSD_APAGE_COLLECTION_FIRST + 0,
37 OSD_APAGE_COLLECTION_INFORMATION = OSD_APAGE_COLLECTION_FIRST + 1,
38 OSD_APAGE_COLLECTION_TIMESTAMP = OSD_APAGE_COLLECTION_FIRST + 3,
39 OSD_APAGE_COLLECTION_SECURITY = OSD_APAGE_COLLECTION_FIRST + 5,
40 OSD_APAGE_COLLECTION_LAST = 0x8FFFFFFF,
41
42 OSD_APAGE_ROOT_FIRST = 0x90000000,
43 OSD_APAGE_ROOT_DIRECTORY = OSD_APAGE_ROOT_FIRST + 0,
44 OSD_APAGE_ROOT_INFORMATION = OSD_APAGE_ROOT_FIRST + 1,
45 OSD_APAGE_ROOT_QUOTAS = OSD_APAGE_ROOT_FIRST + 2,
46 OSD_APAGE_ROOT_TIMESTAMP = OSD_APAGE_ROOT_FIRST + 3,
47 OSD_APAGE_ROOT_SECURITY = OSD_APAGE_ROOT_FIRST + 5,
48 OSD_APAGE_ROOT_LAST = 0xBFFFFFFF,
49
50 OSD_APAGE_RESERVED_TYPE_FIRST = 0xC0000000,
51 OSD_APAGE_RESERVED_TYPE_LAST = 0xEFFFFFFF,
52
53 OSD_APAGE_COMMON_FIRST = 0xF0000000,
54 OSD_APAGE_COMMON_LAST = 0xFFFFFFFE,
55
56 OSD_APAGE_REQUEST_ALL = 0xFFFFFFFF,
57};
58
59/* subcategories of attr pages within each range above */
60enum {
61 OSD_APAGE_STD_FIRST = 0x0,
62 OSD_APAGE_STD_DIRECTORY = 0,
63 OSD_APAGE_STD_INFORMATION = 1,
64 OSD_APAGE_STD_QUOTAS = 2,
65 OSD_APAGE_STD_TIMESTAMP = 3,
66 OSD_APAGE_STD_COLLECTIONS = 4,
67 OSD_APAGE_STD_POLICY_SECURITY = 5,
68 OSD_APAGE_STD_LAST = 0x0000007F,
69
70 OSD_APAGE_RESERVED_FIRST = 0x00000080,
71 OSD_APAGE_RESERVED_LAST = 0x00007FFF,
72
73 OSD_APAGE_OTHER_STD_FIRST = 0x00008000,
74 OSD_APAGE_OTHER_STD_LAST = 0x0000EFFF,
75
76 OSD_APAGE_PUBLIC_FIRST = 0x0000F000,
77 OSD_APAGE_PUBLIC_LAST = 0x0000FFFF,
78
79 OSD_APAGE_APP_DEFINED_FIRST = 0x00010000,
80 OSD_APAGE_APP_DEFINED_LAST = 0x1FFFFFFF,
81
82 OSD_APAGE_VENDOR_SPECIFIC_FIRST = 0x20000000,
83 OSD_APAGE_VENDOR_SPECIFIC_LAST = 0x2FFFFFFF,
84};
85
86enum {
87 OSD_ATTR_PAGE_IDENTIFICATION = 0, /* in all pages 40 bytes */
88};
89
90struct page_identification {
91 u8 vendor_identification[8];
92 u8 page_identification[32];
93} __packed;
94
95struct osd_attr_page_header {
96 __be32 page_number;
97 __be32 page_length;
98} __packed;
99
100/* 7.1.2.8 Root Information attributes page (OSD_APAGE_ROOT_INFORMATION) */
101enum {
102 OSD_ATTR_RI_OSD_SYSTEM_ID = 0x3, /* 20 */
103 OSD_ATTR_RI_VENDOR_IDENTIFICATION = 0x4, /* 8 */
104 OSD_ATTR_RI_PRODUCT_IDENTIFICATION = 0x5, /* 16 */
105 OSD_ATTR_RI_PRODUCT_MODEL = 0x6, /* 32 */
106 OSD_ATTR_RI_PRODUCT_REVISION_LEVEL = 0x7, /* 4 */
107 OSD_ATTR_RI_PRODUCT_SERIAL_NUMBER = 0x8, /* variable */
108 OSD_ATTR_RI_OSD_NAME = 0x9, /* variable */
109 OSD_ATTR_RI_TOTAL_CAPACITY = 0x80, /* 8 */
110 OSD_ATTR_RI_USED_CAPACITY = 0x81, /* 8 */
111 OSD_ATTR_RI_NUMBER_OF_PARTITIONS = 0xC0, /* 8 */
112 OSD_ATTR_RI_CLOCK = 0x100, /* 6 */
113};
114/* Root_Information_attributes_page does not have a get_page structure */
115
116/* 7.1.2.9 Partition Information attributes page
117 * (OSD_APAGE_PARTITION_INFORMATION)
118 */
119enum {
120 OSD_ATTR_PI_PARTITION_ID = 0x1, /* 8 */
121 OSD_ATTR_PI_USERNAME = 0x9, /* variable */
122 OSD_ATTR_PI_USED_CAPACITY = 0x81, /* 8 */
123 OSD_ATTR_PI_NUMBER_OF_OBJECTS = 0xC1, /* 8 */
124};
125/* Partition Information attributes page does not have a get_page structure */
126
127/* 7.1.2.10 Collection Information attributes page
128 * (OSD_APAGE_COLLECTION_INFORMATION)
129 */
130enum {
131 OSD_ATTR_CI_PARTITION_ID = 0x1, /* 8 */
132 OSD_ATTR_CI_COLLECTION_OBJECT_ID = 0x2, /* 8 */
133 OSD_ATTR_CI_USERNAME = 0x9, /* variable */
134 OSD_ATTR_CI_USED_CAPACITY = 0x81, /* 8 */
135};
136/* Collection Information attributes page does not have a get_page structure */
137
138/* 7.1.2.11 User Object Information attributes page
139 * (OSD_APAGE_OBJECT_INFORMATION)
140 */
141enum {
142 OSD_ATTR_OI_PARTITION_ID = 0x1, /* 8 */
143 OSD_ATTR_OI_OBJECT_ID = 0x2, /* 8 */
144 OSD_ATTR_OI_USERNAME = 0x9, /* variable */
145 OSD_ATTR_OI_USED_CAPACITY = 0x81, /* 8 */
146 OSD_ATTR_OI_LOGICAL_LENGTH = 0x82, /* 8 */
147};
148/* Object Information attributes page does not have a get_page structure */
149
150/* 7.1.2.12 Root Quotas attributes page (OSD_APAGE_ROOT_QUOTAS) */
151enum {
152 OSD_ATTR_RQ_DEFAULT_MAXIMUM_USER_OBJECT_LENGTH = 0x1, /* 8 */
153 OSD_ATTR_RQ_PARTITION_CAPACITY_QUOTA = 0x10001, /* 8 */
154 OSD_ATTR_RQ_PARTITION_OBJECT_COUNT = 0x10002, /* 8 */
155 OSD_ATTR_RQ_PARTITION_COLLECTIONS_PER_USER_OBJECT = 0x10081, /* 4 */
156 OSD_ATTR_RQ_PARTITION_COUNT = 0x20002, /* 8 */
157};
158
159struct Root_Quotas_attributes_page {
160 struct osd_attr_page_header hdr; /* id=R+2, size=0x24 */
161 __be64 default_maximum_user_object_length;
162 __be64 partition_capacity_quota;
163 __be64 partition_object_count;
164 __be64 partition_collections_per_user_object;
165 __be64 partition_count;
166} __packed;
167
168/* 7.1.2.13 Partition Quotas attributes page (OSD_APAGE_PARTITION_QUOTAS)*/
169enum {
170 OSD_ATTR_PQ_DEFAULT_MAXIMUM_USER_OBJECT_LENGTH = 0x1, /* 8 */
171 OSD_ATTR_PQ_CAPACITY_QUOTA = 0x10001, /* 8 */
172 OSD_ATTR_PQ_OBJECT_COUNT = 0x10002, /* 8 */
173 OSD_ATTR_PQ_COLLECTIONS_PER_USER_OBJECT = 0x10081, /* 4 */
174};
175
176struct Partition_Quotas_attributes_page {
177 struct osd_attr_page_header hdr; /* id=P+2, size=0x1C */
178 __be64 default_maximum_user_object_length;
179 __be64 capacity_quota;
180 __be64 object_count;
181 __be64 collections_per_user_object;
182} __packed;
183
184/* 7.1.2.14 User Object Quotas attributes page (OSD_APAGE_OBJECT_QUOTAS) */
185enum {
186 OSD_ATTR_OQ_MAXIMUM_LENGTH = 0x1, /* 8 */
187};
188
189struct Object_Quotas_attributes_page {
190 struct osd_attr_page_header hdr; /* id=U+2, size=0x8 */
191 __be64 maximum_length;
192} __packed;
193
194/* 7.1.2.15 Root Timestamps attributes page (OSD_APAGE_ROOT_TIMESTAMP) */
195enum {
196 OSD_ATTR_RT_ATTRIBUTES_ACCESSED_TIME = 0x2, /* 6 */
197 OSD_ATTR_RT_ATTRIBUTES_MODIFIED_TIME = 0x3, /* 6 */
198 OSD_ATTR_RT_TIMESTAMP_BYPASS = 0xFFFFFFFE, /* 1 */
199};
200
201struct root_timestamps_attributes_page {
202 struct osd_attr_page_header hdr; /* id=R+3, size=0xD */
203 struct osd_timestamp attributes_accessed_time;
204 struct osd_timestamp attributes_modified_time;
205 u8 timestamp_bypass;
206} __packed;
207
208/* 7.1.2.16 Partition Timestamps attributes page
209 * (OSD_APAGE_PARTITION_TIMESTAMP)
210 */
211enum {
212 OSD_ATTR_PT_CREATED_TIME = 0x1, /* 6 */
213 OSD_ATTR_PT_ATTRIBUTES_ACCESSED_TIME = 0x2, /* 6 */
214 OSD_ATTR_PT_ATTRIBUTES_MODIFIED_TIME = 0x3, /* 6 */
215 OSD_ATTR_PT_DATA_ACCESSED_TIME = 0x4, /* 6 */
216 OSD_ATTR_PT_DATA_MODIFIED_TIME = 0x5, /* 6 */
217 OSD_ATTR_PT_TIMESTAMP_BYPASS = 0xFFFFFFFE, /* 1 */
218};
219
220struct partition_timestamps_attributes_page {
221 struct osd_attr_page_header hdr; /* id=P+3, size=0x1F */
222 struct osd_timestamp created_time;
223 struct osd_timestamp attributes_accessed_time;
224 struct osd_timestamp attributes_modified_time;
225 struct osd_timestamp data_accessed_time;
226 struct osd_timestamp data_modified_time;
227 u8 timestamp_bypass;
228} __packed;
229
230/* 7.1.2.17/18 Collection/Object Timestamps attributes page
231 * (OSD_APAGE_COLLECTION_TIMESTAMP/OSD_APAGE_OBJECT_TIMESTAMP)
232 */
233enum {
234 OSD_ATTR_OT_CREATED_TIME = 0x1, /* 6 */
235 OSD_ATTR_OT_ATTRIBUTES_ACCESSED_TIME = 0x2, /* 6 */
236 OSD_ATTR_OT_ATTRIBUTES_MODIFIED_TIME = 0x3, /* 6 */
237 OSD_ATTR_OT_DATA_ACCESSED_TIME = 0x4, /* 6 */
238 OSD_ATTR_OT_DATA_MODIFIED_TIME = 0x5, /* 6 */
239};
240
241/* same for collection */
242struct object_timestamps_attributes_page {
243 struct osd_attr_page_header hdr; /* id=C+3/3, size=0x1E */
244 struct osd_timestamp created_time;
245 struct osd_timestamp attributes_accessed_time;
246 struct osd_timestamp attributes_modified_time;
247 struct osd_timestamp data_accessed_time;
248 struct osd_timestamp data_modified_time;
249} __packed;
250
251/* 7.1.2.19 Collections attributes page */
252/* TBD */
253
254/* 7.1.2.20 Root Policy/Security attributes page (OSD_APAGE_ROOT_SECURITY) */
255enum {
256 OSD_ATTR_RS_DEFAULT_SECURITY_METHOD = 0x1, /* 1 */
257 OSD_ATTR_RS_OLDEST_VALID_NONCE_LIMIT = 0x2, /* 6 */
258 OSD_ATTR_RS_NEWEST_VALID_NONCE_LIMIT = 0x3, /* 6 */
259 OSD_ATTR_RS_PARTITION_DEFAULT_SECURITY_METHOD = 0x6, /* 1 */
260 OSD_ATTR_RS_SUPPORTED_SECURITY_METHODS = 0x7, /* 2 */
261 OSD_ATTR_RS_ADJUSTABLE_CLOCK = 0x9, /* 6 */
262 OSD_ATTR_RS_MASTER_KEY_IDENTIFIER = 0x7FFD, /* 0 or 7 */
263 OSD_ATTR_RS_ROOT_KEY_IDENTIFIER = 0x7FFE, /* 0 or 7 */
264 OSD_ATTR_RS_SUPPORTED_INTEGRITY_ALGORITHM_0 = 0x80000000,/* 1,(x16)*/
265 OSD_ATTR_RS_SUPPORTED_DH_GROUP_0 = 0x80000010,/* 1,(x16)*/
266};
267
268struct root_security_attributes_page {
269 struct osd_attr_page_header hdr; /* id=R+5, size=0x3F */
270 u8 default_security_method;
271 u8 partition_default_security_method;
272 __be16 supported_security_methods;
273 u8 mki_valid_rki_valid;
274 struct osd_timestamp oldest_valid_nonce_limit;
275 struct osd_timestamp newest_valid_nonce_limit;
276 struct osd_timestamp adjustable_clock;
277 u8 master_key_identifier[32-25];
278 u8 root_key_identifier[39-32];
279 u8 supported_integrity_algorithm[16];
280 u8 supported_dh_group[16];
281} __packed;
282
283/* 7.1.2.21 Partition Policy/Security attributes page
284 * (OSD_APAGE_PARTITION_SECURITY)
285 */
286enum {
287 OSD_ATTR_PS_DEFAULT_SECURITY_METHOD = 0x1, /* 1 */
288 OSD_ATTR_PS_OLDEST_VALID_NONCE = 0x2, /* 6 */
289 OSD_ATTR_PS_NEWEST_VALID_NONCE = 0x3, /* 6 */
290 OSD_ATTR_PS_REQUEST_NONCE_LIST_DEPTH = 0x4, /* 2 */
291 OSD_ATTR_PS_FROZEN_WORKING_KEY_BIT_MASK = 0x5, /* 2 */
292 OSD_ATTR_PS_PARTITION_KEY_IDENTIFIER = 0x7FFF, /* 0 or 7 */
293 OSD_ATTR_PS_WORKING_KEY_IDENTIFIER_FIRST = 0x8000, /* 0 or 7 */
294 OSD_ATTR_PS_WORKING_KEY_IDENTIFIER_LAST = 0x800F, /* 0 or 7 */
295 OSD_ATTR_PS_POLICY_ACCESS_TAG = 0x40000001, /* 4 */
296 OSD_ATTR_PS_USER_OBJECT_POLICY_ACCESS_TAG = 0x40000002, /* 4 */
297};
298
299struct partition_security_attributes_page {
300 struct osd_attr_page_header hdr; /* id=p+5, size=0x8f */
301 u8 reserved[3];
302 u8 default_security_method;
303 struct osd_timestamp oldest_valid_nonce;
304 struct osd_timestamp newest_valid_nonce;
305 __be16 request_nonce_list_depth;
306 __be16 frozen_working_key_bit_mask;
307 __be32 policy_access_tag;
308 __be32 user_object_policy_access_tag;
309 u8 pki_valid;
310 __be16 wki_00_0f_vld;
311 struct osd_key_identifier partition_key_identifier;
312 struct osd_key_identifier working_key_identifiers[16];
313} __packed;
314
315/* 7.1.2.22/23 Collection/Object Policy-Security attributes page
316 * (OSD_APAGE_COLLECTION_SECURITY/OSD_APAGE_OBJECT_SECURITY)
317 */
318enum {
319 OSD_ATTR_OS_POLICY_ACCESS_TAG = 0x40000001, /* 4 */
320};
321
322struct object_security_attributes_page {
323 struct osd_attr_page_header hdr; /* id=C+5/5, size=4 */
324 __be32 policy_access_tag;
325} __packed;
326
327#endif /*ndef __OSD_ATTRIBUTES_H__*/
diff --git a/include/scsi/osd_initiator.h b/include/scsi/osd_initiator.h
new file mode 100644
index 000000000000..b24d9616eb46
--- /dev/null
+++ b/include/scsi/osd_initiator.h
@@ -0,0 +1,433 @@
1/*
2 * osd_initiator.h - OSD initiator API definition
3 *
4 * Copyright (C) 2008 Panasas Inc. All rights reserved.
5 *
6 * Authors:
7 * Boaz Harrosh <bharrosh@panasas.com>
8 * Benny Halevy <bhalevy@panasas.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2
12 *
13 */
14#ifndef __OSD_INITIATOR_H__
15#define __OSD_INITIATOR_H__
16
17#include "osd_protocol.h"
18#include "osd_types.h"
19
20#include <linux/blkdev.h>
21
22/* Note: "NI" in comments below means "Not Implemented yet" */
23
24/* Configure of code:
25 * #undef if you *don't* want OSD v1 support in runtime.
26 * If #defined the initiator will dynamically configure to encode OSD v1
27 * CDB's if the target is detected to be OSD v1 only.
28 * OSD v2 only commands, options, and attributes will be ignored if target
29 * is v1 only.
30 * If #defined will result in bigger/slower code (OK Slower maybe not)
31 * Q: Should this be CONFIG_SCSI_OSD_VER1_SUPPORT and set from Kconfig?
32 */
33#define OSD_VER1_SUPPORT y
34
35enum osd_std_version {
36 OSD_VER_NONE = 0,
37 OSD_VER1 = 1,
38 OSD_VER2 = 2,
39};
40
41/*
42 * Object-based Storage Device.
43 * This object represents an OSD device.
44 * It is not a full linux device in any way. It is only
45 * a place to hang resources associated with a Linux
46 * request Q and some default properties.
47 */
48struct osd_dev {
49 struct scsi_device *scsi_device;
50 unsigned def_timeout;
51
52#ifdef OSD_VER1_SUPPORT
53 enum osd_std_version version;
54#endif
55};
56
57/* Retrieve/return osd_dev(s) for use by Kernel clients */
58struct osd_dev *osduld_path_lookup(const char *dev_name); /*Use IS_ERR/ERR_PTR*/
59void osduld_put_device(struct osd_dev *od);
60
61/* Add/remove test ioctls from external modules */
62typedef int (do_test_fn)(struct osd_dev *od, unsigned cmd, unsigned long arg);
63int osduld_register_test(unsigned ioctl, do_test_fn *do_test);
64void osduld_unregister_test(unsigned ioctl);
65
66/* These are called by uld at probe time */
67void osd_dev_init(struct osd_dev *od, struct scsi_device *scsi_device);
68void osd_dev_fini(struct osd_dev *od);
69
70/* some hi level device operations */
71int osd_auto_detect_ver(struct osd_dev *od, void *caps); /* GFP_KERNEL */
72
73/* we might want to use function vector in the future */
74static inline void osd_dev_set_ver(struct osd_dev *od, enum osd_std_version v)
75{
76#ifdef OSD_VER1_SUPPORT
77 od->version = v;
78#endif
79}
80
81struct osd_request;
82typedef void (osd_req_done_fn)(struct osd_request *or, void *private);
83
84struct osd_request {
85 struct osd_cdb cdb;
86 struct osd_data_out_integrity_info out_data_integ;
87 struct osd_data_in_integrity_info in_data_integ;
88
89 struct osd_dev *osd_dev;
90 struct request *request;
91
92 struct _osd_req_data_segment {
93 void *buff;
94 unsigned alloc_size; /* 0 here means: don't call kfree */
95 unsigned total_bytes;
96 } set_attr, enc_get_attr, get_attr;
97
98 struct _osd_io_info {
99 struct bio *bio;
100 u64 total_bytes;
101 struct request *req;
102 struct _osd_req_data_segment *last_seg;
103 u8 *pad_buff;
104 } out, in;
105
106 gfp_t alloc_flags;
107 unsigned timeout;
108 unsigned retries;
109 u8 sense[OSD_MAX_SENSE_LEN];
110 enum osd_attributes_mode attributes_mode;
111
112 osd_req_done_fn *async_done;
113 void *async_private;
114 int async_error;
115};
116
117/* OSD Version control */
118static inline bool osd_req_is_ver1(struct osd_request *or)
119{
120#ifdef OSD_VER1_SUPPORT
121 return or->osd_dev->version == OSD_VER1;
122#else
123 return false;
124#endif
125}
126
127/*
128 * How to use the osd library:
129 *
130 * osd_start_request
131 * Allocates a request.
132 *
133 * osd_req_*
134 * Call one of, to encode the desired operation.
135 *
136 * osd_add_{get,set}_attr
137 * Optionally add attributes to the CDB, list or page mode.
138 *
139 * osd_finalize_request
140 * Computes final data out/in offsets and signs the request,
141 * making it ready for execution.
142 *
143 * osd_execute_request
144 * May be called to execute it through the block layer. Other wise submit
145 * the associated block request in some other way.
146 *
147 * After execution:
148 * osd_req_decode_sense
149 * Decodes sense information to verify execution results.
150 *
151 * osd_req_decode_get_attr
152 * Retrieve osd_add_get_attr_list() values if used.
153 *
154 * osd_end_request
155 * Must be called to deallocate the request.
156 */
157
158/**
159 * osd_start_request - Allocate and initialize an osd_request
160 *
161 * @osd_dev: OSD device that holds the scsi-device and default values
162 * that the request is associated with.
163 * @gfp: The allocation flags to use for request allocation, and all
164 * subsequent allocations. This will be stored at
165 * osd_request->alloc_flags, can be changed by user later
166 *
167 * Allocate osd_request and initialize all members to the
168 * default/initial state.
169 */
170struct osd_request *osd_start_request(struct osd_dev *od, gfp_t gfp);
171
172enum osd_req_options {
173 OSD_REQ_FUA = 0x08, /* Force Unit Access */
174 OSD_REQ_DPO = 0x10, /* Disable Page Out */
175
176 OSD_REQ_BYPASS_TIMESTAMPS = 0x80,
177};
178
179/**
180 * osd_finalize_request - Sign request and prepare request for execution
181 *
182 * @or: osd_request to prepare
183 * @options: combination of osd_req_options bit flags or 0.
184 * @cap: A Pointer to an OSD_CAP_LEN bytes buffer that is received from
185 * The security manager as capabilities for this cdb.
186 * @cap_key: The cryptographic key used to sign the cdb/data. Can be null
187 * if NOSEC is used.
188 *
189 * The actual request and bios are only allocated here, so are the get_attr
190 * buffers that will receive the returned attributes. Copy's @cap to cdb.
191 * Sign the cdb/data with @cap_key.
192 */
193int osd_finalize_request(struct osd_request *or,
194 u8 options, const void *cap, const u8 *cap_key);
195
196/**
197 * osd_execute_request - Execute the request synchronously through block-layer
198 *
199 * @or: osd_request to Executed
200 *
201 * Calls blk_execute_rq to q the command and waits for completion.
202 */
203int osd_execute_request(struct osd_request *or);
204
205/**
206 * osd_execute_request_async - Execute the request without waitting.
207 *
208 * @or: - osd_request to Executed
209 * @done: (Optional) - Called at end of execution
210 * @private: - Will be passed to @done function
211 *
212 * Calls blk_execute_rq_nowait to queue the command. When execution is done
213 * optionally calls @done with @private as parameter. @or->async_error will
214 * have the return code
215 */
216int osd_execute_request_async(struct osd_request *or,
217 osd_req_done_fn *done, void *private);
218
219/**
220 * osd_req_decode_sense_full - Decode sense information after execution.
221 *
222 * @or: - osd_request to examine
223 * @osi - Recievs a more detailed error report information (optional).
224 * @silent - Do not print to dmsg (Even if enabled)
225 * @bad_obj_list - Some commands act on multiple objects. Failed objects will
226 * be recieved here (optional)
227 * @max_obj - Size of @bad_obj_list.
228 * @bad_attr_list - List of failing attributes (optional)
229 * @max_attr - Size of @bad_attr_list.
230 *
231 * After execution, sense + return code can be analyzed using this function. The
232 * return code is the final disposition on the error. So it is possible that a
233 * CHECK_CONDITION was returned from target but this will return NO_ERROR, for
234 * example on recovered errors. All parameters are optional if caller does
235 * not need any returned information.
236 * Note: This function will also dump the error to dmsg according to settings
237 * of the SCSI_OSD_DPRINT_SENSE Kconfig value. Set @silent if you know the
238 * command would routinely fail, to not spam the dmsg file.
239 */
240struct osd_sense_info {
241 int key; /* one of enum scsi_sense_keys */
242 int additional_code ; /* enum osd_additional_sense_codes */
243 union { /* Sense specific information */
244 u16 sense_info;
245 u16 cdb_field_offset; /* scsi_invalid_field_in_cdb */
246 };
247 union { /* Command specific information */
248 u64 command_info;
249 };
250
251 u32 not_initiated_command_functions; /* osd_command_functions_bits */
252 u32 completed_command_functions; /* osd_command_functions_bits */
253 struct osd_obj_id obj;
254 struct osd_attr attr;
255};
256
257int osd_req_decode_sense_full(struct osd_request *or,
258 struct osd_sense_info *osi, bool silent,
259 struct osd_obj_id *bad_obj_list, int max_obj,
260 struct osd_attr *bad_attr_list, int max_attr);
261
262static inline int osd_req_decode_sense(struct osd_request *or,
263 struct osd_sense_info *osi)
264{
265 return osd_req_decode_sense_full(or, osi, false, NULL, 0, NULL, 0);
266}
267
268/**
269 * osd_end_request - return osd_request to free store
270 *
271 * @or: osd_request to free
272 *
273 * Deallocate all osd_request resources (struct req's, BIOs, buffers, etc.)
274 */
275void osd_end_request(struct osd_request *or);
276
277/*
278 * CDB Encoding
279 *
280 * Note: call only one of the following methods.
281 */
282
283/*
284 * Device commands
285 */
286void osd_req_set_master_seed_xchg(struct osd_request *or, ...);/* NI */
287void osd_req_set_master_key(struct osd_request *or, ...);/* NI */
288
289void osd_req_format(struct osd_request *or, u64 tot_capacity);
290
291/* list all partitions
292 * @list header must be initialized to zero on first run.
293 *
294 * Call osd_is_obj_list_done() to find if we got the complete list.
295 */
296int osd_req_list_dev_partitions(struct osd_request *or,
297 osd_id initial_id, struct osd_obj_id_list *list, unsigned nelem);
298
299void osd_req_flush_obsd(struct osd_request *or,
300 enum osd_options_flush_scope_values);
301
302void osd_req_perform_scsi_command(struct osd_request *or,
303 const u8 *cdb, ...);/* NI */
304void osd_req_task_management(struct osd_request *or, ...);/* NI */
305
306/*
307 * Partition commands
308 */
309void osd_req_create_partition(struct osd_request *or, osd_id partition);
310void osd_req_remove_partition(struct osd_request *or, osd_id partition);
311
312void osd_req_set_partition_key(struct osd_request *or,
313 osd_id partition, u8 new_key_id[OSD_CRYPTO_KEYID_SIZE],
314 u8 seed[OSD_CRYPTO_SEED_SIZE]);/* NI */
315
316/* list all collections in the partition
317 * @list header must be init to zero on first run.
318 *
319 * Call osd_is_obj_list_done() to find if we got the complete list.
320 */
321int osd_req_list_partition_collections(struct osd_request *or,
322 osd_id partition, osd_id initial_id, struct osd_obj_id_list *list,
323 unsigned nelem);
324
325/* list all objects in the partition
326 * @list header must be init to zero on first run.
327 *
328 * Call osd_is_obj_list_done() to find if we got the complete list.
329 */
330int osd_req_list_partition_objects(struct osd_request *or,
331 osd_id partition, osd_id initial_id, struct osd_obj_id_list *list,
332 unsigned nelem);
333
334void osd_req_flush_partition(struct osd_request *or,
335 osd_id partition, enum osd_options_flush_scope_values);
336
337/*
338 * Collection commands
339 */
340void osd_req_create_collection(struct osd_request *or,
341 const struct osd_obj_id *);/* NI */
342void osd_req_remove_collection(struct osd_request *or,
343 const struct osd_obj_id *);/* NI */
344
345/* list all objects in the collection */
346int osd_req_list_collection_objects(struct osd_request *or,
347 const struct osd_obj_id *, osd_id initial_id,
348 struct osd_obj_id_list *list, unsigned nelem);
349
350/* V2 only filtered list of objects in the collection */
351void osd_req_query(struct osd_request *or, ...);/* NI */
352
353void osd_req_flush_collection(struct osd_request *or,
354 const struct osd_obj_id *, enum osd_options_flush_scope_values);
355
356void osd_req_get_member_attrs(struct osd_request *or, ...);/* V2-only NI */
357void osd_req_set_member_attrs(struct osd_request *or, ...);/* V2-only NI */
358
359/*
360 * Object commands
361 */
362void osd_req_create_object(struct osd_request *or, struct osd_obj_id *);
363void osd_req_remove_object(struct osd_request *or, struct osd_obj_id *);
364
365void osd_req_write(struct osd_request *or,
366 const struct osd_obj_id *, struct bio *data_out, u64 offset);
367void osd_req_append(struct osd_request *or,
368 const struct osd_obj_id *, struct bio *data_out);/* NI */
369void osd_req_create_write(struct osd_request *or,
370 const struct osd_obj_id *, struct bio *data_out, u64 offset);/* NI */
371void osd_req_clear(struct osd_request *or,
372 const struct osd_obj_id *, u64 offset, u64 len);/* NI */
373void osd_req_punch(struct osd_request *or,
374 const struct osd_obj_id *, u64 offset, u64 len);/* V2-only NI */
375
376void osd_req_flush_object(struct osd_request *or,
377 const struct osd_obj_id *, enum osd_options_flush_scope_values,
378 /*V2*/ u64 offset, /*V2*/ u64 len);
379
380void osd_req_read(struct osd_request *or,
381 const struct osd_obj_id *, struct bio *data_in, u64 offset);
382
383/*
384 * Root/Partition/Collection/Object Attributes commands
385 */
386
387/* get before set */
388void osd_req_get_attributes(struct osd_request *or, const struct osd_obj_id *);
389
390/* set before get */
391void osd_req_set_attributes(struct osd_request *or, const struct osd_obj_id *);
392
393/*
394 * Attributes appended to most commands
395 */
396
397/* Attributes List mode (or V2 CDB) */
398 /*
399 * TODO: In ver2 if at finalize time only one attr was set and no gets,
400 * then the Attributes CDB mode is used automatically to save IO.
401 */
402
403/* set a list of attributes. */
404int osd_req_add_set_attr_list(struct osd_request *or,
405 const struct osd_attr *, unsigned nelem);
406
407/* get a list of attributes */
408int osd_req_add_get_attr_list(struct osd_request *or,
409 const struct osd_attr *, unsigned nelem);
410
411/*
412 * Attributes list decoding
413 * Must be called after osd_request.request was executed
414 * It is called in a loop to decode the returned get_attr
415 * (see osd_add_get_attr)
416 */
417int osd_req_decode_get_attr_list(struct osd_request *or,
418 struct osd_attr *, int *nelem, void **iterator);
419
420/* Attributes Page mode */
421
422/*
423 * Read an attribute page and optionally set one attribute
424 *
425 * Retrieves the attribute page directly to a user buffer.
426 * @attr_page_data shall stay valid until end of execution.
427 * See osd_attributes.h for common page structures
428 */
429int osd_req_add_get_attr_page(struct osd_request *or,
430 u32 page_id, void *attr_page_data, unsigned max_page_len,
431 const struct osd_attr *set_one);
432
433#endif /* __OSD_LIB_H__ */
diff --git a/include/scsi/osd_protocol.h b/include/scsi/osd_protocol.h
new file mode 100644
index 000000000000..cd3cbf764650
--- /dev/null
+++ b/include/scsi/osd_protocol.h
@@ -0,0 +1,579 @@
1/*
2 * osd_protocol.h - OSD T10 standard C definitions.
3 *
4 * Copyright (C) 2008 Panasas Inc. All rights reserved.
5 *
6 * Authors:
7 * Boaz Harrosh <bharrosh@panasas.com>
8 * Benny Halevy <bhalevy@panasas.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2
12 *
13 * This file contains types and constants that are defined by the protocol
14 * Note: All names and symbols are taken from the OSD standard's text.
15 */
16#ifndef __OSD_PROTOCOL_H__
17#define __OSD_PROTOCOL_H__
18
19#include <linux/types.h>
20#include <asm/unaligned.h>
21#include <scsi/scsi.h>
22
23enum {
24 OSDv1_ADDITIONAL_CDB_LENGTH = 192,
25 OSDv1_TOTAL_CDB_LEN = OSDv1_ADDITIONAL_CDB_LENGTH + 8,
26 OSDv1_CAP_LEN = 80,
27 /* Latest supported version */
28/* OSD_ADDITIONAL_CDB_LENGTH = 216,*/
29 OSD_ADDITIONAL_CDB_LENGTH =
30 OSDv1_ADDITIONAL_CDB_LENGTH, /* FIXME: Pete rev-001 sup */
31 OSD_TOTAL_CDB_LEN = OSD_ADDITIONAL_CDB_LENGTH + 8,
32/* OSD_CAP_LEN = 104,*/
33 OSD_CAP_LEN = OSDv1_CAP_LEN,/* FIXME: Pete rev-001 sup */
34
35 OSD_SYSTEMID_LEN = 20,
36 OSD_CRYPTO_KEYID_SIZE = 20,
37 /*FIXME: OSDv2_CRYPTO_KEYID_SIZE = 32,*/
38 OSD_CRYPTO_SEED_SIZE = 4,
39 OSD_CRYPTO_NONCE_SIZE = 12,
40 OSD_MAX_SENSE_LEN = 252, /* from SPC-3 */
41
42 OSD_PARTITION_FIRST_ID = 0x10000,
43 OSD_OBJECT_FIRST_ID = 0x10000,
44};
45
46/* (osd-r10 5.2.4)
47 * osd2r03: 5.2.3 Caching control bits
48 */
49enum osd_options_byte {
50 OSD_CDB_FUA = 0x08, /* Force Unit Access */
51 OSD_CDB_DPO = 0x10, /* Disable Page Out */
52};
53
54/*
55 * osd2r03: 5.2.5 Isolation.
56 * First 3 bits, V2-only.
57 * Also for attr 110h "default isolation method" at Root Information page
58 */
59enum osd_options_byte_isolation {
60 OSD_ISOLATION_DEFAULT = 0,
61 OSD_ISOLATION_NONE = 1,
62 OSD_ISOLATION_STRICT = 2,
63 OSD_ISOLATION_RANGE = 4,
64 OSD_ISOLATION_FUNCTIONAL = 5,
65 OSD_ISOLATION_VENDOR = 7,
66};
67
68/* (osd-r10: 6.7)
69 * osd2r03: 6.8 FLUSH, FLUSH COLLECTION, FLUSH OSD, FLUSH PARTITION
70 */
71enum osd_options_flush_scope_values {
72 OSD_CDB_FLUSH_ALL = 0,
73 OSD_CDB_FLUSH_ATTR_ONLY = 1,
74
75 OSD_CDB_FLUSH_ALL_RECURSIVE = 2,
76 /* V2-only */
77 OSD_CDB_FLUSH_ALL_RANGE = 2,
78};
79
80/* osd2r03: 5.2.10 Timestamps control */
81enum {
82 OSD_CDB_NORMAL_TIMESTAMPS = 0,
83 OSD_CDB_BYPASS_TIMESTAMPS = 0x7f,
84};
85
86/* (osd-r10: 5.2.2.1)
87 * osd2r03: 5.2.4.1 Get and set attributes CDB format selection
88 * 2 bits at second nibble of command_specific_options byte
89 */
90enum osd_attributes_mode {
91 /* V2-only */
92 OSD_CDB_SET_ONE_ATTR = 0x10,
93
94 OSD_CDB_GET_ATTR_PAGE_SET_ONE = 0x20,
95 OSD_CDB_GET_SET_ATTR_LISTS = 0x30,
96
97 OSD_CDB_GET_SET_ATTR_MASK = 0x30,
98};
99
100/* (osd-r10: 4.12.5)
101 * osd2r03: 4.14.5 Data-In and Data-Out buffer offsets
102 * byte offset = mantissa * (2^(exponent+8))
103 * struct {
104 * unsigned mantissa: 28;
105 * int exponent: 04;
106 * }
107 */
108typedef __be32 __bitwise osd_cdb_offset;
109
110enum {
111 OSD_OFFSET_UNUSED = 0xFFFFFFFF,
112 OSD_OFFSET_MAX_BITS = 28,
113
114 OSDv1_OFFSET_MIN_SHIFT = 8,
115 OSD_OFFSET_MIN_SHIFT = 3,
116 OSD_OFFSET_MAX_SHIFT = 16,
117};
118
119/* Return the smallest allowed encoded offset that contains @offset.
120 *
121 * The actual encoded offset returned is @offset + *padding.
122 * (up to max_shift, non-inclusive)
123 */
124osd_cdb_offset __osd_encode_offset(u64 offset, unsigned *padding,
125 int min_shift, int max_shift);
126
127/* Minimum alignment is 256 bytes
128 * Note: Seems from std v1 that exponent can be from 0+8 to 0xE+8 (inclusive)
129 * which is 8 to 23 but IBM code restricts it to 16, so be it.
130 */
131static inline osd_cdb_offset osd_encode_offset_v1(u64 offset, unsigned *padding)
132{
133 return __osd_encode_offset(offset, padding,
134 OSDv1_OFFSET_MIN_SHIFT, OSD_OFFSET_MAX_SHIFT);
135}
136
137/* Minimum 8 bytes alignment
138 * Same as v1 but since exponent can be signed than a less than
139 * 256 alignment can be reached with small offsets (<2GB)
140 */
141static inline osd_cdb_offset osd_encode_offset_v2(u64 offset, unsigned *padding)
142{
143 return __osd_encode_offset(offset, padding,
144 OSD_OFFSET_MIN_SHIFT, OSD_OFFSET_MAX_SHIFT);
145}
146
147/* osd2r03: 5.2.1 Overview */
148struct osd_cdb_head {
149 struct scsi_varlen_cdb_hdr varlen_cdb;
150/*10*/ u8 options;
151 u8 command_specific_options;
152 u8 timestamp_control;
153/*13*/ u8 reserved1[3];
154/*16*/ __be64 partition;
155/*24*/ __be64 object;
156/*32*/ union { /* V1 vs V2 alignment differences */
157 struct __osdv1_cdb_addr_len {
158/*32*/ __be32 list_identifier;/* Rarely used */
159/*36*/ __be64 length;
160/*44*/ __be64 start_address;
161 } __packed v1;
162
163 struct __osdv2_cdb_addr_len {
164 /* called allocation_length in some commands */
165/*32*/ __be64 length;
166/*40*/ __be64 start_address;
167/*48*/ __be32 list_identifier;/* Rarely used */
168 } __packed v2;
169 };
170/*52*/ union { /* selected attributes mode Page/List/Single */
171 struct osd_attributes_page_mode {
172/*52*/ __be32 get_attr_page;
173/*56*/ __be32 get_attr_alloc_length;
174/*60*/ osd_cdb_offset get_attr_offset;
175
176/*64*/ __be32 set_attr_page;
177/*68*/ __be32 set_attr_id;
178/*72*/ __be32 set_attr_length;
179/*76*/ osd_cdb_offset set_attr_offset;
180/*80*/ } __packed attrs_page;
181
182 struct osd_attributes_list_mode {
183/*52*/ __be32 get_attr_desc_bytes;
184/*56*/ osd_cdb_offset get_attr_desc_offset;
185
186/*60*/ __be32 get_attr_alloc_length;
187/*64*/ osd_cdb_offset get_attr_offset;
188
189/*68*/ __be32 set_attr_bytes;
190/*72*/ osd_cdb_offset set_attr_offset;
191 __be32 not_used;
192/*80*/ } __packed attrs_list;
193
194 /* osd2r03:5.2.4.2 Set one attribute value using CDB fields */
195 struct osd_attributes_cdb_mode {
196/*52*/ __be32 set_attr_page;
197/*56*/ __be32 set_attr_id;
198/*60*/ __be16 set_attr_len;
199/*62*/ u8 set_attr_val[18];
200/*80*/ } __packed attrs_cdb;
201/*52*/ u8 get_set_attributes_parameters[28];
202 };
203} __packed;
204/*80*/
205
206/*160 v1*/
207/*184 v2*/
208struct osd_security_parameters {
209/*160*/u8 integrity_check_value[OSD_CRYPTO_KEYID_SIZE];
210/*180*/u8 request_nonce[OSD_CRYPTO_NONCE_SIZE];
211/*192*/osd_cdb_offset data_in_integrity_check_offset;
212/*196*/osd_cdb_offset data_out_integrity_check_offset;
213} __packed;
214/*200 v1*/
215/*224 v2*/
216
217/* FIXME: osdv2_security_parameters */
218
219struct osdv1_cdb {
220 struct osd_cdb_head h;
221 u8 caps[OSDv1_CAP_LEN];
222 struct osd_security_parameters sec_params;
223} __packed;
224
225struct osdv2_cdb {
226 struct osd_cdb_head h;
227 u8 caps[OSD_CAP_LEN];
228 struct osd_security_parameters sec_params;
229 /* FIXME: osdv2_security_parameters */
230} __packed;
231
232struct osd_cdb {
233 union {
234 struct osdv1_cdb v1;
235 struct osdv2_cdb v2;
236 u8 buff[OSD_TOTAL_CDB_LEN];
237 };
238} __packed;
239
240static inline struct osd_cdb_head *osd_cdb_head(struct osd_cdb *ocdb)
241{
242 return (struct osd_cdb_head *)ocdb->buff;
243}
244
245/* define both version actions
246 * Ex name = FORMAT_OSD we have OSD_ACT_FORMAT_OSD && OSDv1_ACT_FORMAT_OSD
247 */
248#define OSD_ACT___(Name, Num) \
249 OSD_ACT_##Name = __constant_cpu_to_be16(0x8880 + Num), \
250 OSDv1_ACT_##Name = __constant_cpu_to_be16(0x8800 + Num),
251
252/* V2 only actions */
253#define OSD_ACT_V2(Name, Num) \
254 OSD_ACT_##Name = __constant_cpu_to_be16(0x8880 + Num),
255
256#define OSD_ACT_V1_V2(Name, Num1, Num2) \
257 OSD_ACT_##Name = __constant_cpu_to_be16(Num2), \
258 OSDv1_ACT_##Name = __constant_cpu_to_be16(Num1),
259
260enum osd_service_actions {
261 OSD_ACT_V2(OBJECT_STRUCTURE_CHECK, 0x00)
262 OSD_ACT___(FORMAT_OSD, 0x01)
263 OSD_ACT___(CREATE, 0x02)
264 OSD_ACT___(LIST, 0x03)
265 OSD_ACT_V2(PUNCH, 0x04)
266 OSD_ACT___(READ, 0x05)
267 OSD_ACT___(WRITE, 0x06)
268 OSD_ACT___(APPEND, 0x07)
269 OSD_ACT___(FLUSH, 0x08)
270 OSD_ACT_V2(CLEAR, 0x09)
271 OSD_ACT___(REMOVE, 0x0A)
272 OSD_ACT___(CREATE_PARTITION, 0x0B)
273 OSD_ACT___(REMOVE_PARTITION, 0x0C)
274 OSD_ACT___(GET_ATTRIBUTES, 0x0E)
275 OSD_ACT___(SET_ATTRIBUTES, 0x0F)
276 OSD_ACT___(CREATE_AND_WRITE, 0x12)
277 OSD_ACT___(CREATE_COLLECTION, 0x15)
278 OSD_ACT___(REMOVE_COLLECTION, 0x16)
279 OSD_ACT___(LIST_COLLECTION, 0x17)
280 OSD_ACT___(SET_KEY, 0x18)
281 OSD_ACT___(SET_MASTER_KEY, 0x19)
282 OSD_ACT___(FLUSH_COLLECTION, 0x1A)
283 OSD_ACT___(FLUSH_PARTITION, 0x1B)
284 OSD_ACT___(FLUSH_OSD, 0x1C)
285
286 OSD_ACT_V2(QUERY, 0x20)
287 OSD_ACT_V2(REMOVE_MEMBER_OBJECTS, 0x21)
288 OSD_ACT_V2(GET_MEMBER_ATTRIBUTES, 0x22)
289 OSD_ACT_V2(SET_MEMBER_ATTRIBUTES, 0x23)
290 OSD_ACT_V2(READ_MAP, 0x31)
291
292 OSD_ACT_V1_V2(PERFORM_SCSI_COMMAND, 0x8F7E, 0x8F7C)
293 OSD_ACT_V1_V2(SCSI_TASK_MANAGEMENT, 0x8F7F, 0x8F7D)
294 /* 0x8F80 to 0x8FFF are Vendor specific */
295};
296
297/* osd2r03: 7.1.3.2 List entry format for retrieving attributes */
298struct osd_attributes_list_attrid {
299 __be32 attr_page;
300 __be32 attr_id;
301} __packed;
302
303/*
304 * osd2r03: 7.1.3.3 List entry format for retrieved attributes and
305 * for setting attributes
306 * NOTE: v2 is 8-bytes aligned, v1 is not aligned.
307 */
308struct osd_attributes_list_element {
309 __be32 attr_page;
310 __be32 attr_id;
311 __be16 attr_bytes;
312 u8 attr_val[0];
313} __packed;
314
315enum {
316 OSDv1_ATTRIBUTES_ELEM_ALIGN = 1,
317 OSD_ATTRIBUTES_ELEM_ALIGN = 8,
318};
319
320enum {
321 OSD_ATTR_LIST_ALL_PAGES = 0xFFFFFFFF,
322 OSD_ATTR_LIST_ALL_IN_PAGE = 0xFFFFFFFF,
323};
324
325static inline unsigned osdv1_attr_list_elem_size(unsigned len)
326{
327 return ALIGN(len + sizeof(struct osd_attributes_list_element),
328 OSDv1_ATTRIBUTES_ELEM_ALIGN);
329}
330
331static inline unsigned osdv2_attr_list_elem_size(unsigned len)
332{
333 return ALIGN(len + sizeof(struct osd_attributes_list_element),
334 OSD_ATTRIBUTES_ELEM_ALIGN);
335}
336
337/*
338 * osd2r03: 7.1.3 OSD attributes lists (Table 184) — List type values
339 */
340enum osd_attr_list_types {
341 OSD_ATTR_LIST_GET = 0x1, /* descriptors only */
342 OSD_ATTR_LIST_SET_RETRIEVE = 0x9, /*descriptors/values variable-length*/
343 OSD_V2_ATTR_LIST_MULTIPLE = 0xE, /* ver2, Multiple Objects lists*/
344 OSD_V1_ATTR_LIST_CREATE_MULTIPLE = 0xF,/*ver1, used by create_multple*/
345};
346
347/* osd2r03: 7.1.3.4 Multi-object retrieved attributes format */
348struct osd_attributes_list_multi_header {
349 __be64 object_id;
350 u8 object_type; /* object_type enum below */
351 u8 reserved[5];
352 __be16 list_bytes;
353 /* followed by struct osd_attributes_list_element's */
354};
355
356struct osdv1_attributes_list_header {
357 u8 type; /* low 4-bit only */
358 u8 pad;
359 __be16 list_bytes; /* Initiator shall set to Zero. Only set by target */
360 /*
361 * type=9 followed by struct osd_attributes_list_element's
362 * type=E followed by struct osd_attributes_list_multi_header's
363 */
364} __packed;
365
366static inline unsigned osdv1_list_size(struct osdv1_attributes_list_header *h)
367{
368 return be16_to_cpu(h->list_bytes);
369}
370
371struct osdv2_attributes_list_header {
372 u8 type; /* lower 4-bits only */
373 u8 pad[3];
374/*4*/ __be32 list_bytes; /* Initiator shall set to zero. Only set by target */
375 /*
376 * type=9 followed by struct osd_attributes_list_element's
377 * type=E followed by struct osd_attributes_list_multi_header's
378 */
379} __packed;
380
381static inline unsigned osdv2_list_size(struct osdv2_attributes_list_header *h)
382{
383 return be32_to_cpu(h->list_bytes);
384}
385
386/* (osd-r10 6.13)
387 * osd2r03: 6.15 LIST (Table 79) LIST command parameter data.
388 * for root_lstchg below
389 */
390enum {
391 OSD_OBJ_ID_LIST_PAR = 0x1, /* V1-only. Not used in V2 */
392 OSD_OBJ_ID_LIST_LSTCHG = 0x2,
393};
394
395/*
396 * osd2r03: 6.15.2 LIST command parameter data
397 * (Also for LIST COLLECTION)
398 */
399struct osd_obj_id_list {
400 __be64 list_bytes; /* bytes in list excluding list_bytes (-8) */
401 __be64 continuation_id;
402 __be32 list_identifier;
403 u8 pad[3];
404 u8 root_lstchg;
405 __be64 object_ids[0];
406} __packed;
407
408static inline bool osd_is_obj_list_done(struct osd_obj_id_list *list,
409 bool *is_changed)
410{
411 *is_changed = (0 != (list->root_lstchg & OSD_OBJ_ID_LIST_LSTCHG));
412 return 0 != list->continuation_id;
413}
414
415/*
416 * osd2r03: 4.12.4.5 The ALLDATA security method
417 */
418struct osd_data_out_integrity_info {
419 __be64 data_bytes;
420 __be64 set_attributes_bytes;
421 __be64 get_attributes_bytes;
422 __be64 integrity_check_value;
423} __packed;
424
425struct osd_data_in_integrity_info {
426 __be64 data_bytes;
427 __be64 retrieved_attributes_bytes;
428 __be64 integrity_check_value;
429} __packed;
430
431struct osd_timestamp {
432 u8 time[6]; /* number of milliseconds since 1/1/1970 UT (big endian) */
433} __packed;
434/* FIXME: define helper functions to convert to/from osd time format */
435
436/*
437 * Capability & Security definitions
438 * osd2r03: 4.11.2.2 Capability format
439 * osd2r03: 5.2.8 Security parameters
440 */
441
442struct osd_key_identifier {
443 u8 id[7]; /* if you know why 7 please email bharrosh@panasas.com */
444} __packed;
445
446/* for osd_capability.format */
447enum {
448 OSD_SEC_CAP_FORMAT_NO_CAPS = 0,
449 OSD_SEC_CAP_FORMAT_VER1 = 1,
450 OSD_SEC_CAP_FORMAT_VER2 = 2,
451};
452
453/* security_method */
454enum {
455 OSD_SEC_NOSEC = 0,
456 OSD_SEC_CAPKEY = 1,
457 OSD_SEC_CMDRSP = 2,
458 OSD_SEC_ALLDATA = 3,
459};
460
461enum object_type {
462 OSD_SEC_OBJ_ROOT = 0x1,
463 OSD_SEC_OBJ_PARTITION = 0x2,
464 OSD_SEC_OBJ_COLLECTION = 0x40,
465 OSD_SEC_OBJ_USER = 0x80,
466};
467
468enum osd_capability_bit_masks {
469 OSD_SEC_CAP_APPEND = BIT(0),
470 OSD_SEC_CAP_OBJ_MGMT = BIT(1),
471 OSD_SEC_CAP_REMOVE = BIT(2),
472 OSD_SEC_CAP_CREATE = BIT(3),
473 OSD_SEC_CAP_SET_ATTR = BIT(4),
474 OSD_SEC_CAP_GET_ATTR = BIT(5),
475 OSD_SEC_CAP_WRITE = BIT(6),
476 OSD_SEC_CAP_READ = BIT(7),
477
478 OSD_SEC_CAP_NONE1 = BIT(8),
479 OSD_SEC_CAP_NONE2 = BIT(9),
480 OSD_SEC_CAP_NONE3 = BIT(10),
481 OSD_SEC_CAP_QUERY = BIT(11), /*v2 only*/
482 OSD_SEC_CAP_M_OBJECT = BIT(12), /*v2 only*/
483 OSD_SEC_CAP_POL_SEC = BIT(13),
484 OSD_SEC_CAP_GLOBAL = BIT(14),
485 OSD_SEC_CAP_DEV_MGMT = BIT(15),
486};
487
488/* for object_descriptor_type (hi nibble used) */
489enum {
490 OSD_SEC_OBJ_DESC_NONE = 0, /* Not allowed */
491 OSD_SEC_OBJ_DESC_OBJ = 1 << 4, /* v1: also collection */
492 OSD_SEC_OBJ_DESC_PAR = 2 << 4, /* also root */
493 OSD_SEC_OBJ_DESC_COL = 3 << 4, /* v2 only */
494};
495
496/* (osd-r10:4.9.2.2)
497 * osd2r03:4.11.2.2 Capability format
498 */
499struct osd_capability_head {
500 u8 format; /* low nibble */
501 u8 integrity_algorithm__key_version; /* MAKE_BYTE(integ_alg, key_ver) */
502 u8 security_method;
503 u8 reserved1;
504/*04*/ struct osd_timestamp expiration_time;
505/*10*/ u8 audit[20];
506/*30*/ u8 discriminator[12];
507/*42*/ struct osd_timestamp object_created_time;
508/*48*/ u8 object_type;
509/*49*/ u8 permissions_bit_mask[5];
510/*54*/ u8 reserved2;
511/*55*/ u8 object_descriptor_type; /* high nibble */
512} __packed;
513
514/*56 v1*/
515struct osdv1_cap_object_descriptor {
516 union {
517 struct {
518/*56*/ __be32 policy_access_tag;
519/*60*/ __be64 allowed_partition_id;
520/*68*/ __be64 allowed_object_id;
521/*76*/ __be32 reserved;
522 } __packed obj_desc;
523
524/*56*/ u8 object_descriptor[24];
525 };
526} __packed;
527/*80 v1*/
528
529/*56 v2*/
530struct osd_cap_object_descriptor {
531 union {
532 struct {
533/*56*/ __be32 allowed_attributes_access;
534/*60*/ __be32 policy_access_tag;
535/*64*/ __be16 boot_epoch;
536/*66*/ u8 reserved[6];
537/*72*/ __be64 allowed_partition_id;
538/*80*/ __be64 allowed_object_id;
539/*88*/ __be64 allowed_range_length;
540/*96*/ __be64 allowed_range_start;
541 } __packed obj_desc;
542
543/*56*/ u8 object_descriptor[48];
544 };
545} __packed;
546/*104 v2*/
547
548struct osdv1_capability {
549 struct osd_capability_head h;
550 struct osdv1_cap_object_descriptor od;
551} __packed;
552
553struct osd_capability {
554 struct osd_capability_head h;
555/* struct osd_cap_object_descriptor od;*/
556 struct osdv1_cap_object_descriptor od; /* FIXME: Pete rev-001 sup */
557} __packed;
558
559/**
560 * osd_sec_set_caps - set cap-bits into the capabilities header
561 *
562 * @cap: The osd_capability_head to set cap bits to.
563 * @bit_mask: Use an ORed list of enum osd_capability_bit_masks values
564 *
565 * permissions_bit_mask is unaligned use below to set into caps
566 * in a version independent way
567 */
568static inline void osd_sec_set_caps(struct osd_capability_head *cap,
569 u16 bit_mask)
570{
571 /*
572 *Note: The bits above are defined LE order this is because this way
573 * they can grow in the future to more then 16, and still retain
574 * there constant values.
575 */
576 put_unaligned_le16(bit_mask, &cap->permissions_bit_mask);
577}
578
579#endif /* ndef __OSD_PROTOCOL_H__ */
diff --git a/include/scsi/osd_sec.h b/include/scsi/osd_sec.h
new file mode 100644
index 000000000000..4c09fee8ae1e
--- /dev/null
+++ b/include/scsi/osd_sec.h
@@ -0,0 +1,45 @@
1/*
2 * osd_sec.h - OSD security manager API
3 *
4 * Copyright (C) 2008 Panasas Inc. All rights reserved.
5 *
6 * Authors:
7 * Boaz Harrosh <bharrosh@panasas.com>
8 * Benny Halevy <bhalevy@panasas.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2
12 *
13 */
14#ifndef __OSD_SEC_H__
15#define __OSD_SEC_H__
16
17#include "osd_protocol.h"
18#include "osd_types.h"
19
20/*
21 * Contains types and constants of osd capabilities and security
22 * encoding/decoding.
23 * API is trying to keep security abstract so initiator of an object
24 * based pNFS client knows as little as possible about security and
25 * capabilities. It is the Server's osd-initiator place to know more.
26 * Also can be used by osd-target.
27 */
28void osd_sec_encode_caps(void *caps, ...);/* NI */
29void osd_sec_init_nosec_doall_caps(void *caps,
30 const struct osd_obj_id *obj, bool is_collection, const bool is_v1);
31
32bool osd_is_sec_alldata(struct osd_security_parameters *sec_params);
33
34/* Conditionally sign the CDB according to security setting in ocdb
35 * with cap_key */
36void osd_sec_sign_cdb(struct osd_cdb *ocdb, const u8 *cap_key);
37
38/* Unconditionally sign the BIO data with cap_key.
39 * Check for osd_is_sec_alldata() was done prior to calling this. */
40void osd_sec_sign_data(void *data_integ, struct bio *bio, const u8 *cap_key);
41
42/* Version independent copy of caps into the cdb */
43void osd_set_caps(struct osd_cdb *cdb, const void *caps);
44
45#endif /* ndef __OSD_SEC_H__ */
diff --git a/include/scsi/osd_sense.h b/include/scsi/osd_sense.h
new file mode 100644
index 000000000000..ff9b33c773c7
--- /dev/null
+++ b/include/scsi/osd_sense.h
@@ -0,0 +1,260 @@
1/*
2 * osd_sense.h - OSD Related sense handling definitions.
3 *
4 * Copyright (C) 2008 Panasas Inc. All rights reserved.
5 *
6 * Authors:
7 * Boaz Harrosh <bharrosh@panasas.com>
8 * Benny Halevy <bhalevy@panasas.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2
12 *
13 * This file contains types and constants that are defined by the protocol
14 * Note: All names and symbols are taken from the OSD standard's text.
15 */
16#ifndef __OSD_SENSE_H__
17#define __OSD_SENSE_H__
18
19#include <scsi/osd_protocol.h>
20
21/* SPC3r23 4.5.6 Sense key and sense code definitions table 27 */
22enum scsi_sense_keys {
23 scsi_sk_no_sense = 0x0,
24 scsi_sk_recovered_error = 0x1,
25 scsi_sk_not_ready = 0x2,
26 scsi_sk_medium_error = 0x3,
27 scsi_sk_hardware_error = 0x4,
28 scsi_sk_illegal_request = 0x5,
29 scsi_sk_unit_attention = 0x6,
30 scsi_sk_data_protect = 0x7,
31 scsi_sk_blank_check = 0x8,
32 scsi_sk_vendor_specific = 0x9,
33 scsi_sk_copy_aborted = 0xa,
34 scsi_sk_aborted_command = 0xb,
35 scsi_sk_volume_overflow = 0xd,
36 scsi_sk_miscompare = 0xe,
37 scsi_sk_reserved = 0xf,
38};
39
40/* SPC3r23 4.5.6 Sense key and sense code definitions table 28 */
41/* Note: only those which can be returned by an OSD target. Most of
42 * these errors are taken care of by the generic scsi layer.
43 */
44enum osd_additional_sense_codes {
45 scsi_no_additional_sense_information = 0x0000,
46 scsi_operation_in_progress = 0x0016,
47 scsi_cleaning_requested = 0x0017,
48 scsi_lunr_cause_not_reportable = 0x0400,
49 scsi_logical_unit_is_in_process_of_becoming_ready = 0x0401,
50 scsi_lunr_initializing_command_required = 0x0402,
51 scsi_lunr_manual_intervention_required = 0x0403,
52 scsi_lunr_operation_in_progress = 0x0407,
53 scsi_lunr_selftest_in_progress = 0x0409,
54 scsi_luna_asymmetric_access_state_transition = 0x040a,
55 scsi_luna_target_port_in_standby_state = 0x040b,
56 scsi_luna_target_port_in_unavailable_state = 0x040c,
57 scsi_lunr_notify_enable_spinup_required = 0x0411,
58 scsi_logical_unit_does_not_respond_to_selection = 0x0500,
59 scsi_logical_unit_communication_failure = 0x0800,
60 scsi_logical_unit_communication_timeout = 0x0801,
61 scsi_logical_unit_communication_parity_error = 0x0802,
62 scsi_error_log_overflow = 0x0a00,
63 scsi_warning = 0x0b00,
64 scsi_warning_specified_temperature_exceeded = 0x0b01,
65 scsi_warning_enclosure_degraded = 0x0b02,
66 scsi_write_error_unexpected_unsolicited_data = 0x0c0c,
67 scsi_write_error_not_enough_unsolicited_data = 0x0c0d,
68 scsi_invalid_information_unit = 0x0e00,
69 scsi_invalid_field_in_command_information_unit = 0x0e03,
70 scsi_read_error_failed_retransmission_request = 0x1113,
71 scsi_parameter_list_length_error = 0x1a00,
72 scsi_invalid_command_operation_code = 0x2000,
73 scsi_invalid_field_in_cdb = 0x2400,
74 osd_security_audit_value_frozen = 0x2404,
75 osd_security_working_key_frozen = 0x2405,
76 osd_nonce_not_unique = 0x2406,
77 osd_nonce_timestamp_out_of_range = 0x2407,
78 scsi_logical_unit_not_supported = 0x2500,
79 scsi_invalid_field_in_parameter_list = 0x2600,
80 scsi_parameter_not_supported = 0x2601,
81 scsi_parameter_value_invalid = 0x2602,
82 scsi_invalid_release_of_persistent_reservation = 0x2604,
83 osd_invalid_dataout_buffer_integrity_check_value = 0x260f,
84 scsi_not_ready_to_ready_change_medium_may_have_changed = 0x2800,
85 scsi_power_on_reset_or_bus_device_reset_occurred = 0x2900,
86 scsi_power_on_occurred = 0x2901,
87 scsi_scsi_bus_reset_occurred = 0x2902,
88 scsi_bus_device_reset_function_occurred = 0x2903,
89 scsi_device_internal_reset = 0x2904,
90 scsi_transceiver_mode_changed_to_single_ended = 0x2905,
91 scsi_transceiver_mode_changed_to_lvd = 0x2906,
92 scsi_i_t_nexus_loss_occurred = 0x2907,
93 scsi_parameters_changed = 0x2a00,
94 scsi_mode_parameters_changed = 0x2a01,
95 scsi_asymmetric_access_state_changed = 0x2a06,
96 scsi_priority_changed = 0x2a08,
97 scsi_command_sequence_error = 0x2c00,
98 scsi_previous_busy_status = 0x2c07,
99 scsi_previous_task_set_full_status = 0x2c08,
100 scsi_previous_reservation_conflict_status = 0x2c09,
101 osd_partition_or_collection_contains_user_objects = 0x2c0a,
102 scsi_commands_cleared_by_another_initiator = 0x2f00,
103 scsi_cleaning_failure = 0x3007,
104 scsi_enclosure_failure = 0x3400,
105 scsi_enclosure_services_failure = 0x3500,
106 scsi_unsupported_enclosure_function = 0x3501,
107 scsi_enclosure_services_unavailable = 0x3502,
108 scsi_enclosure_services_transfer_failure = 0x3503,
109 scsi_enclosure_services_transfer_refused = 0x3504,
110 scsi_enclosure_services_checksum_error = 0x3505,
111 scsi_rounded_parameter = 0x3700,
112 osd_read_past_end_of_user_object = 0x3b17,
113 scsi_logical_unit_has_not_self_configured_yet = 0x3e00,
114 scsi_logical_unit_failure = 0x3e01,
115 scsi_timeout_on_logical_unit = 0x3e02,
116 scsi_logical_unit_failed_selftest = 0x3e03,
117 scsi_logical_unit_unable_to_update_selftest_log = 0x3e04,
118 scsi_target_operating_conditions_have_changed = 0x3f00,
119 scsi_microcode_has_been_changed = 0x3f01,
120 scsi_inquiry_data_has_changed = 0x3f03,
121 scsi_echo_buffer_overwritten = 0x3f0f,
122 scsi_diagnostic_failure_on_component_nn_first = 0x4080,
123 scsi_diagnostic_failure_on_component_nn_last = 0x40ff,
124 scsi_message_error = 0x4300,
125 scsi_internal_target_failure = 0x4400,
126 scsi_select_or_reselect_failure = 0x4500,
127 scsi_scsi_parity_error = 0x4700,
128 scsi_data_phase_crc_error_detected = 0x4701,
129 scsi_scsi_parity_error_detected_during_st_data_phase = 0x4702,
130 scsi_asynchronous_information_protection_error_detected = 0x4704,
131 scsi_protocol_service_crc_error = 0x4705,
132 scsi_phy_test_function_in_progress = 0x4706,
133 scsi_invalid_message_error = 0x4900,
134 scsi_command_phase_error = 0x4a00,
135 scsi_data_phase_error = 0x4b00,
136 scsi_logical_unit_failed_self_configuration = 0x4c00,
137 scsi_overlapped_commands_attempted = 0x4e00,
138 osd_quota_error = 0x5507,
139 scsi_failure_prediction_threshold_exceeded = 0x5d00,
140 scsi_failure_prediction_threshold_exceeded_false = 0x5dff,
141 scsi_voltage_fault = 0x6500,
142};
143
144enum scsi_descriptor_types {
145 scsi_sense_information = 0x0,
146 scsi_sense_command_specific_information = 0x1,
147 scsi_sense_key_specific = 0x2,
148 scsi_sense_field_replaceable_unit = 0x3,
149 scsi_sense_stream_commands = 0x4,
150 scsi_sense_block_commands = 0x5,
151 osd_sense_object_identification = 0x6,
152 osd_sense_response_integrity_check = 0x7,
153 osd_sense_attribute_identification = 0x8,
154 scsi_sense_ata_return = 0x9,
155
156 scsi_sense_Reserved_first = 0x0A,
157 scsi_sense_Reserved_last = 0x7F,
158 scsi_sense_Vendor_specific_first = 0x80,
159 scsi_sense_Vendor_specific_last = 0xFF,
160};
161
162struct scsi_sense_descriptor { /* for picking into desc type */
163 u8 descriptor_type; /* one of enum scsi_descriptor_types */
164 u8 additional_length; /* n - 1 */
165 u8 data[];
166} __packed;
167
168/* OSD deploys only scsi descriptor_based sense buffers */
169struct scsi_sense_descriptor_based {
170/*0*/ u8 response_code; /* 0x72 or 0x73 */
171/*1*/ u8 sense_key; /* one of enum scsi_sense_keys (4 lower bits) */
172/*2*/ __be16 additional_sense_code; /* enum osd_additional_sense_codes */
173/*4*/ u8 Reserved[3];
174/*7*/ u8 additional_sense_length; /* n - 7 */
175/*8*/ struct scsi_sense_descriptor ssd[0]; /* variable length, 1 or more */
176} __packed;
177
178/* some descriptors deployed by OSD */
179
180/* SPC3r23 4.5.2.3 Command-specific information sense data descriptor */
181/* Note: this is the same for descriptor_type=00 but with type=00 the
182 * Reserved[0] == 0x80 (ie. bit-7 set)
183 */
184struct scsi_sense_command_specific_data_descriptor {
185/*0*/ u8 descriptor_type; /* (00h/01h) */
186/*1*/ u8 additional_length; /* (0Ah) */
187/*2*/ u8 Reserved[2];
188/*4*/ __be64 information;
189} __packed;
190/*12*/
191
192struct scsi_sense_key_specific_data_descriptor {
193/*0*/ u8 descriptor_type; /* (02h) */
194/*1*/ u8 additional_length; /* (06h) */
195/*2*/ u8 Reserved[2];
196/* SKSV, C/D, Reserved (2), BPV, BIT POINTER (3) */
197/*4*/ u8 sksv_cd_bpv_bp;
198/*5*/ __be16 value; /* field-pointer/progress-value/retry-count/... */
199/*7*/ u8 Reserved2;
200} __packed;
201/*8*/
202
203/* 4.16.2.1 OSD error identification sense data descriptor - table 52 */
204/* Note: these bits are defined LE order for easy definition, this way the BIT()
205 * number is the same as in the documentation. Below members at
206 * osd_sense_identification_data_descriptor are therefore defined __le32.
207 */
208enum osd_command_functions_bits {
209 OSD_CFB_COMMAND = BIT(4),
210 OSD_CFB_CMD_CAP_VERIFIED = BIT(5),
211 OSD_CFB_VALIDATION = BIT(7),
212 OSD_CFB_IMP_ST_ATT = BIT(12),
213 OSD_CFB_SET_ATT = BIT(20),
214 OSD_CFB_SA_CAP_VERIFIED = BIT(21),
215 OSD_CFB_GET_ATT = BIT(28),
216 OSD_CFB_GA_CAP_VERIFIED = BIT(29),
217};
218
219struct osd_sense_identification_data_descriptor {
220/*0*/ u8 descriptor_type; /* (06h) */
221/*1*/ u8 additional_length; /* (1Eh) */
222/*2*/ u8 Reserved[6];
223/*8*/ __le32 not_initiated_functions; /*osd_command_functions_bits*/
224/*12*/ __le32 completed_functions; /*osd_command_functions_bits*/
225/*16*/ __be64 partition_id;
226/*24*/ __be64 object_id;
227} __packed;
228/*32*/
229
230struct osd_sense_response_integrity_check_descriptor {
231/*0*/ u8 descriptor_type; /* (07h) */
232/*1*/ u8 additional_length; /* (20h) */
233/*2*/ u8 integrity_check_value[32]; /*FIXME: OSDv2_CRYPTO_KEYID_SIZE*/
234} __packed;
235/*34*/
236
237struct osd_sense_attributes_data_descriptor {
238/*0*/ u8 descriptor_type; /* (08h) */
239/*1*/ u8 additional_length; /* (n-2) */
240/*2*/ u8 Reserved[6];
241 struct osd_sense_attr {
242/*8*/ __be32 attr_page;
243/*12*/ __be32 attr_id;
244/*16*/ } sense_attrs[0]; /* 1 or more */
245} __packed;
246/*variable*/
247
248/* Dig into scsi_sk_illegal_request/scsi_invalid_field_in_cdb errors */
249
250/*FIXME: Support also field in CAPS*/
251#define OSD_CDB_OFFSET(F) offsetof(struct osd_cdb_head, F)
252
253enum osdv2_cdb_field_offset {
254 OSDv1_CFO_STARTING_BYTE = OSD_CDB_OFFSET(v1.start_address),
255 OSD_CFO_STARTING_BYTE = OSD_CDB_OFFSET(v2.start_address),
256 OSD_CFO_PARTITION_ID = OSD_CDB_OFFSET(partition),
257 OSD_CFO_OBJECT_ID = OSD_CDB_OFFSET(object),
258};
259
260#endif /* ndef __OSD_SENSE_H__ */
diff --git a/include/scsi/osd_types.h b/include/scsi/osd_types.h
new file mode 100644
index 000000000000..3f5e88cc75c0
--- /dev/null
+++ b/include/scsi/osd_types.h
@@ -0,0 +1,40 @@
1/*
2 * osd_types.h - Types and constants which are not part of the protocol.
3 *
4 * Copyright (C) 2008 Panasas Inc. All rights reserved.
5 *
6 * Authors:
7 * Boaz Harrosh <bharrosh@panasas.com>
8 * Benny Halevy <bhalevy@panasas.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2
12 *
13 * Contains types and constants that are implementation specific and are
14 * used by more than one part of the osd library.
15 * (Eg initiator/target/security_manager/...)
16 */
17#ifndef __OSD_TYPES_H__
18#define __OSD_TYPES_H__
19
20struct osd_systemid {
21 u8 data[OSD_SYSTEMID_LEN];
22};
23
24typedef u64 __bitwise osd_id;
25
26struct osd_obj_id {
27 osd_id partition;
28 osd_id id;
29};
30
31static const struct __weak osd_obj_id osd_root_object = {0, 0};
32
33struct osd_attr {
34 u32 attr_page;
35 u32 attr_id;
36 u16 len; /* byte count of operand */
37 void *val_ptr; /* in network order */
38};
39
40#endif /* ndef __OSD_TYPES_H__ */
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index a109165714d6..084478e14d24 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -9,7 +9,8 @@
9#define _SCSI_SCSI_H 9#define _SCSI_SCSI_H
10 10
11#include <linux/types.h> 11#include <linux/types.h>
12#include <scsi/scsi_cmnd.h> 12
13struct scsi_cmnd;
13 14
14/* 15/*
15 * The maximum number of SG segments that we will put inside a 16 * The maximum number of SG segments that we will put inside a
@@ -263,6 +264,7 @@ static inline int scsi_status_is_good(int status)
263#define TYPE_RAID 0x0c 264#define TYPE_RAID 0x0c
264#define TYPE_ENCLOSURE 0x0d /* Enclosure Services Device */ 265#define TYPE_ENCLOSURE 0x0d /* Enclosure Services Device */
265#define TYPE_RBC 0x0e 266#define TYPE_RBC 0x0e
267#define TYPE_OSD 0x11
266#define TYPE_NO_LUN 0x7f 268#define TYPE_NO_LUN 0x7f
267 269
268/* SCSI protocols; these are taken from SPC-3 section 7.5 */ 270/* SCSI protocols; these are taken from SPC-3 section 7.5 */
@@ -402,16 +404,6 @@ static inline int scsi_is_wlun(unsigned int lun)
402#define DRIVER_HARD 0x07 404#define DRIVER_HARD 0x07
403#define DRIVER_SENSE 0x08 405#define DRIVER_SENSE 0x08
404 406
405#define SUGGEST_RETRY 0x10
406#define SUGGEST_ABORT 0x20
407#define SUGGEST_REMAP 0x30
408#define SUGGEST_DIE 0x40
409#define SUGGEST_SENSE 0x80
410#define SUGGEST_IS_OK 0xff
411
412#define DRIVER_MASK 0x0f
413#define SUGGEST_MASK 0xf0
414
415/* 407/*
416 * Internal return values. 408 * Internal return values.
417 */ 409 */
@@ -447,23 +439,6 @@ static inline int scsi_is_wlun(unsigned int lun)
447#define msg_byte(result) (((result) >> 8) & 0xff) 439#define msg_byte(result) (((result) >> 8) & 0xff)
448#define host_byte(result) (((result) >> 16) & 0xff) 440#define host_byte(result) (((result) >> 16) & 0xff)
449#define driver_byte(result) (((result) >> 24) & 0xff) 441#define driver_byte(result) (((result) >> 24) & 0xff)
450#define suggestion(result) (driver_byte(result) & SUGGEST_MASK)
451
452static inline void set_msg_byte(struct scsi_cmnd *cmd, char status)
453{
454 cmd->result |= status << 8;
455}
456
457static inline void set_host_byte(struct scsi_cmnd *cmd, char status)
458{
459 cmd->result |= status << 16;
460}
461
462static inline void set_driver_byte(struct scsi_cmnd *cmd, char status)
463{
464 cmd->result |= status << 24;
465}
466
467 442
468#define sense_class(sense) (((sense) >> 4) & 0x7) 443#define sense_class(sense) (((sense) >> 4) & 0x7)
469#define sense_error(sense) ((sense) & 0xf) 444#define sense_error(sense) ((sense) & 0xf)
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 855bf95963e7..43b50d36925c 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -291,4 +291,19 @@ static inline struct scsi_data_buffer *scsi_prot(struct scsi_cmnd *cmd)
291#define scsi_for_each_prot_sg(cmd, sg, nseg, __i) \ 291#define scsi_for_each_prot_sg(cmd, sg, nseg, __i) \
292 for_each_sg(scsi_prot_sglist(cmd), sg, nseg, __i) 292 for_each_sg(scsi_prot_sglist(cmd), sg, nseg, __i)
293 293
294static inline void set_msg_byte(struct scsi_cmnd *cmd, char status)
295{
296 cmd->result |= status << 8;
297}
298
299static inline void set_host_byte(struct scsi_cmnd *cmd, char status)
300{
301 cmd->result |= status << 16;
302}
303
304static inline void set_driver_byte(struct scsi_cmnd *cmd, char status)
305{
306 cmd->result |= status << 24;
307}
308
294#endif /* _SCSI_SCSI_CMND_H */ 309#endif /* _SCSI_SCSI_CMND_H */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 01a4c58f8bad..3f566af3f101 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -340,6 +340,7 @@ extern int scsi_mode_select(struct scsi_device *sdev, int pf, int sp,
340 struct scsi_sense_hdr *); 340 struct scsi_sense_hdr *);
341extern int scsi_test_unit_ready(struct scsi_device *sdev, int timeout, 341extern int scsi_test_unit_ready(struct scsi_device *sdev, int timeout,
342 int retries, struct scsi_sense_hdr *sshdr); 342 int retries, struct scsi_sense_hdr *sshdr);
343extern unsigned char *scsi_get_vpd_page(struct scsi_device *, u8 page);
343extern int scsi_device_set_state(struct scsi_device *sdev, 344extern int scsi_device_set_state(struct scsi_device *sdev,
344 enum scsi_device_state state); 345 enum scsi_device_state state);
345extern struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, 346extern struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
@@ -370,12 +371,6 @@ extern int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
370 int data_direction, void *buffer, unsigned bufflen, 371 int data_direction, void *buffer, unsigned bufflen,
371 struct scsi_sense_hdr *, int timeout, int retries, 372 struct scsi_sense_hdr *, int timeout, int retries,
372 int *resid); 373 int *resid);
373extern int scsi_execute_async(struct scsi_device *sdev,
374 const unsigned char *cmd, int cmd_len, int data_direction,
375 void *buffer, unsigned bufflen, int use_sg,
376 int timeout, int retries, void *privdata,
377 void (*done)(void *, char *, int, int),
378 gfp_t gfp);
379 374
380static inline int __must_check scsi_device_reprobe(struct scsi_device *sdev) 375static inline int __must_check scsi_device_reprobe(struct scsi_device *sdev)
381{ 376{
@@ -400,7 +395,8 @@ static inline unsigned int sdev_id(struct scsi_device *sdev)
400 */ 395 */
401static inline int scsi_device_online(struct scsi_device *sdev) 396static inline int scsi_device_online(struct scsi_device *sdev)
402{ 397{
403 return sdev->sdev_state != SDEV_OFFLINE; 398 return (sdev->sdev_state != SDEV_OFFLINE &&
399 sdev->sdev_state != SDEV_DEL);
404} 400}
405static inline int scsi_device_blocked(struct scsi_device *sdev) 401static inline int scsi_device_blocked(struct scsi_device *sdev)
406{ 402{
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index b50aabe2861e..457588e1119b 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -88,7 +88,7 @@ struct iscsi_transport {
88 uint64_t host_param_mask; 88 uint64_t host_param_mask;
89 struct iscsi_cls_session *(*create_session) (struct iscsi_endpoint *ep, 89 struct iscsi_cls_session *(*create_session) (struct iscsi_endpoint *ep,
90 uint16_t cmds_max, uint16_t qdepth, 90 uint16_t cmds_max, uint16_t qdepth,
91 uint32_t sn, uint32_t *hn); 91 uint32_t sn);
92 void (*destroy_session) (struct iscsi_cls_session *session); 92 void (*destroy_session) (struct iscsi_cls_session *session);
93 struct iscsi_cls_conn *(*create_conn) (struct iscsi_cls_session *sess, 93 struct iscsi_cls_conn *(*create_conn) (struct iscsi_cls_session *sess,
94 uint32_t cid); 94 uint32_t cid);
@@ -206,8 +206,6 @@ struct iscsi_cls_session {
206struct iscsi_cls_host { 206struct iscsi_cls_host {
207 atomic_t nr_scans; 207 atomic_t nr_scans;
208 struct mutex mutex; 208 struct mutex mutex;
209 struct workqueue_struct *scan_workq;
210 char scan_workq_name[20];
211}; 209};
212 210
213extern void iscsi_host_for_each_session(struct Scsi_Host *shost, 211extern void iscsi_host_for_each_session(struct Scsi_Host *shost,
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 9edb5c4b79b4..c500ca7239b2 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1071,7 +1071,8 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
1071 mutex_unlock(&cgroup_mutex); 1071 mutex_unlock(&cgroup_mutex);
1072 } 1072 }
1073 1073
1074 return simple_set_mnt(mnt, sb); 1074 simple_set_mnt(mnt, sb);
1075 return 0;
1075 1076
1076 free_cg_links: 1077 free_cg_links:
1077 free_cg_links(&tmp_cg_links); 1078 free_cg_links(&tmp_cg_links);
@@ -1627,7 +1628,7 @@ static struct inode_operations cgroup_dir_inode_operations = {
1627static int cgroup_create_file(struct dentry *dentry, int mode, 1628static int cgroup_create_file(struct dentry *dentry, int mode,
1628 struct super_block *sb) 1629 struct super_block *sb)
1629{ 1630{
1630 static struct dentry_operations cgroup_dops = { 1631 static const struct dentry_operations cgroup_dops = {
1631 .d_iput = cgroup_diput, 1632 .d_iput = cgroup_diput,
1632 }; 1633 };
1633 1634
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 80009a24e21d..c4706eb98d3d 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -133,6 +133,32 @@ errout:
133} 133}
134 134
135/** 135/**
136 * nla_policy_len - Determin the max. length of a policy
137 * @policy: policy to use
138 * @n: number of policies
139 *
140 * Determines the max. length of the policy. It is currently used
141 * to allocated Netlink buffers roughly the size of the actual
142 * message.
143 *
144 * Returns 0 on success or a negative error code.
145 */
146int
147nla_policy_len(const struct nla_policy *p, int n)
148{
149 int i, len = 0;
150
151 for (i = 0; i < n; i++) {
152 if (p->len)
153 len += nla_total_size(p->len);
154 else if (nla_attr_minlen[p->type])
155 len += nla_total_size(nla_attr_minlen[p->type]);
156 }
157
158 return len;
159}
160
161/**
136 * nla_parse - Parse a stream of attributes into a tb buffer 162 * nla_parse - Parse a stream of attributes into a tb buffer
137 * @tb: destination array with maxtype+1 elements 163 * @tb: destination array with maxtype+1 elements
138 * @maxtype: maximum attribute type to be expected 164 * @maxtype: maximum attribute type to be expected
@@ -467,6 +493,7 @@ EXPORT_SYMBOL(nla_append);
467#endif 493#endif
468 494
469EXPORT_SYMBOL(nla_validate); 495EXPORT_SYMBOL(nla_validate);
496EXPORT_SYMBOL(nla_policy_len);
470EXPORT_SYMBOL(nla_parse); 497EXPORT_SYMBOL(nla_parse);
471EXPORT_SYMBOL(nla_find); 498EXPORT_SYMBOL(nla_find);
472EXPORT_SYMBOL(nla_strlcpy); 499EXPORT_SYMBOL(nla_strlcpy);
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 3e0671df3a3f..d6a9243641af 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1571,14 +1571,10 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
1571 usat->sat_family != AF_APPLETALK) 1571 usat->sat_family != AF_APPLETALK)
1572 return -EINVAL; 1572 return -EINVAL;
1573 1573
1574 /* netatalk doesn't implement this check */ 1574 /* netatalk didn't implement this check */
1575 if (usat->sat_addr.s_node == ATADDR_BCAST && 1575 if (usat->sat_addr.s_node == ATADDR_BCAST &&
1576 !sock_flag(sk, SOCK_BROADCAST)) { 1576 !sock_flag(sk, SOCK_BROADCAST)) {
1577 printk(KERN_INFO "SO_BROADCAST: Fix your netatalk as "
1578 "it will break before 2.2\n");
1579#if 0
1580 return -EPERM; 1577 return -EPERM;
1581#endif
1582 } 1578 }
1583 } else { 1579 } else {
1584 if (sk->sk_state != TCP_ESTABLISHED) 1580 if (sk->sk_state != TCP_ESTABLISHED)
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 7da5ebb84e97..fd9d06f291dc 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1435,11 +1435,6 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock,
1435 size_t size; 1435 size_t size;
1436 int lv, err, addr_len = msg->msg_namelen; 1436 int lv, err, addr_len = msg->msg_namelen;
1437 1437
1438 /* AX.25 empty data frame has no meaning : don't send */
1439 if (len == 0) {
1440 return (0);
1441 }
1442
1443 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT)) 1438 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
1444 return -EINVAL; 1439 return -EINVAL;
1445 1440
@@ -1639,13 +1634,6 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock,
1639 skb_reset_transport_header(skb); 1634 skb_reset_transport_header(skb);
1640 copied = skb->len; 1635 copied = skb->len;
1641 1636
1642 /* AX.25 empty data frame has no meaning : ignore it */
1643 if (copied == 0) {
1644 err = copied;
1645 skb_free_datagram(sk, skb);
1646 goto out;
1647 }
1648
1649 if (copied > size) { 1637 if (copied > size) {
1650 copied = size; 1638 copied = size;
1651 msg->msg_flags |= MSG_TRUNC; 1639 msg->msg_flags |= MSG_TRUNC;
diff --git a/net/core/dev.c b/net/core/dev.c
index 052dd478d3e1..52fea5b28ca6 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1457,7 +1457,9 @@ static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1457 ((features & NETIF_F_IP_CSUM) && 1457 ((features & NETIF_F_IP_CSUM) &&
1458 protocol == htons(ETH_P_IP)) || 1458 protocol == htons(ETH_P_IP)) ||
1459 ((features & NETIF_F_IPV6_CSUM) && 1459 ((features & NETIF_F_IPV6_CSUM) &&
1460 protocol == htons(ETH_P_IPV6))); 1460 protocol == htons(ETH_P_IPV6)) ||
1461 ((features & NETIF_F_FCOE_CRC) &&
1462 protocol == htons(ETH_P_FCOE)));
1461} 1463}
1462 1464
1463static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb) 1465static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
@@ -2627,18 +2629,15 @@ static int process_backlog(struct napi_struct *napi, int quota)
2627 local_irq_disable(); 2629 local_irq_disable();
2628 skb = __skb_dequeue(&queue->input_pkt_queue); 2630 skb = __skb_dequeue(&queue->input_pkt_queue);
2629 if (!skb) { 2631 if (!skb) {
2632 __napi_complete(napi);
2630 local_irq_enable(); 2633 local_irq_enable();
2631 napi_complete(napi); 2634 break;
2632 goto out;
2633 } 2635 }
2634 local_irq_enable(); 2636 local_irq_enable();
2635 2637
2636 napi_gro_receive(napi, skb); 2638 netif_receive_skb(skb);
2637 } while (++work < quota && jiffies == start_time); 2639 } while (++work < quota && jiffies == start_time);
2638 2640
2639 napi_gro_flush(napi);
2640
2641out:
2642 return work; 2641 return work;
2643} 2642}
2644 2643
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 84b9c179df51..35c5f6a5cb7c 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -81,19 +81,7 @@ static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap,
81static unsigned long ifname_compare(const char *_a, const char *_b, const char *_mask) 81static unsigned long ifname_compare(const char *_a, const char *_b, const char *_mask)
82{ 82{
83#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 83#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
84 const unsigned long *a = (const unsigned long *)_a; 84 unsigned long ret = ifname_compare_aligned(_a, _b, _mask);
85 const unsigned long *b = (const unsigned long *)_b;
86 const unsigned long *mask = (const unsigned long *)_mask;
87 unsigned long ret;
88
89 ret = (a[0] ^ b[0]) & mask[0];
90 if (IFNAMSIZ > sizeof(unsigned long))
91 ret |= (a[1] ^ b[1]) & mask[1];
92 if (IFNAMSIZ > 2 * sizeof(unsigned long))
93 ret |= (a[2] ^ b[2]) & mask[2];
94 if (IFNAMSIZ > 3 * sizeof(unsigned long))
95 ret |= (a[3] ^ b[3]) & mask[3];
96 BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
97#else 85#else
98 unsigned long ret = 0; 86 unsigned long ret = 0;
99 const u16 *a = (const u16 *)_a; 87 const u16 *a = (const u16 *)_a;
@@ -404,7 +392,9 @@ static int mark_source_chains(struct xt_table_info *newinfo,
404 && unconditional(&e->arp)) || visited) { 392 && unconditional(&e->arp)) || visited) {
405 unsigned int oldpos, size; 393 unsigned int oldpos, size;
406 394
407 if (t->verdict < -NF_MAX_VERDICT - 1) { 395 if ((strcmp(t->target.u.user.name,
396 ARPT_STANDARD_TARGET) == 0) &&
397 t->verdict < -NF_MAX_VERDICT - 1) {
408 duprintf("mark_source_chains: bad " 398 duprintf("mark_source_chains: bad "
409 "negative verdict (%i)\n", 399 "negative verdict (%i)\n",
410 t->verdict); 400 t->verdict);
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index e5294aec967d..82ee7c9049ff 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -74,25 +74,6 @@ do { \
74 74
75 Hence the start of any table is given by get_table() below. */ 75 Hence the start of any table is given by get_table() below. */
76 76
77static unsigned long ifname_compare(const char *_a, const char *_b,
78 const unsigned char *_mask)
79{
80 const unsigned long *a = (const unsigned long *)_a;
81 const unsigned long *b = (const unsigned long *)_b;
82 const unsigned long *mask = (const unsigned long *)_mask;
83 unsigned long ret;
84
85 ret = (a[0] ^ b[0]) & mask[0];
86 if (IFNAMSIZ > sizeof(unsigned long))
87 ret |= (a[1] ^ b[1]) & mask[1];
88 if (IFNAMSIZ > 2 * sizeof(unsigned long))
89 ret |= (a[2] ^ b[2]) & mask[2];
90 if (IFNAMSIZ > 3 * sizeof(unsigned long))
91 ret |= (a[3] ^ b[3]) & mask[3];
92 BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
93 return ret;
94}
95
96/* Returns whether matches rule or not. */ 77/* Returns whether matches rule or not. */
97/* Performance critical - called for every packet */ 78/* Performance critical - called for every packet */
98static inline bool 79static inline bool
@@ -121,7 +102,7 @@ ip_packet_match(const struct iphdr *ip,
121 return false; 102 return false;
122 } 103 }
123 104
124 ret = ifname_compare(indev, ipinfo->iniface, ipinfo->iniface_mask); 105 ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask);
125 106
126 if (FWINV(ret != 0, IPT_INV_VIA_IN)) { 107 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
127 dprintf("VIA in mismatch (%s vs %s).%s\n", 108 dprintf("VIA in mismatch (%s vs %s).%s\n",
@@ -130,7 +111,7 @@ ip_packet_match(const struct iphdr *ip,
130 return false; 111 return false;
131 } 112 }
132 113
133 ret = ifname_compare(outdev, ipinfo->outiface, ipinfo->outiface_mask); 114 ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask);
134 115
135 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) { 116 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
136 dprintf("VIA out mismatch (%s vs %s).%s\n", 117 dprintf("VIA out mismatch (%s vs %s).%s\n",
@@ -507,7 +488,9 @@ mark_source_chains(struct xt_table_info *newinfo,
507 && unconditional(&e->ip)) || visited) { 488 && unconditional(&e->ip)) || visited) {
508 unsigned int oldpos, size; 489 unsigned int oldpos, size;
509 490
510 if (t->verdict < -NF_MAX_VERDICT - 1) { 491 if ((strcmp(t->target.u.user.name,
492 IPT_STANDARD_TARGET) == 0) &&
493 t->verdict < -NF_MAX_VERDICT - 1) {
511 duprintf("mark_source_chains: bad " 494 duprintf("mark_source_chains: bad "
512 "negative verdict (%i)\n", 495 "negative verdict (%i)\n",
513 t->verdict); 496 t->verdict);
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 8b681f24e271..7d2ead7228ac 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -328,6 +328,11 @@ static int ipv4_nlattr_to_tuple(struct nlattr *tb[],
328 328
329 return 0; 329 return 0;
330} 330}
331
332static int ipv4_nlattr_tuple_size(void)
333{
334 return nla_policy_len(ipv4_nla_policy, CTA_IP_MAX + 1);
335}
331#endif 336#endif
332 337
333static struct nf_sockopt_ops so_getorigdst = { 338static struct nf_sockopt_ops so_getorigdst = {
@@ -347,6 +352,7 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = {
347 .get_l4proto = ipv4_get_l4proto, 352 .get_l4proto = ipv4_get_l4proto,
348#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 353#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
349 .tuple_to_nlattr = ipv4_tuple_to_nlattr, 354 .tuple_to_nlattr = ipv4_tuple_to_nlattr,
355 .nlattr_tuple_size = ipv4_nlattr_tuple_size,
350 .nlattr_to_tuple = ipv4_nlattr_to_tuple, 356 .nlattr_to_tuple = ipv4_nlattr_to_tuple,
351 .nla_policy = ipv4_nla_policy, 357 .nla_policy = ipv4_nla_policy,
352#endif 358#endif
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index 6ba5c557690c..8668a3defda6 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -25,40 +25,42 @@ struct ct_iter_state {
25 unsigned int bucket; 25 unsigned int bucket;
26}; 26};
27 27
28static struct hlist_node *ct_get_first(struct seq_file *seq) 28static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
29{ 29{
30 struct net *net = seq_file_net(seq); 30 struct net *net = seq_file_net(seq);
31 struct ct_iter_state *st = seq->private; 31 struct ct_iter_state *st = seq->private;
32 struct hlist_node *n; 32 struct hlist_nulls_node *n;
33 33
34 for (st->bucket = 0; 34 for (st->bucket = 0;
35 st->bucket < nf_conntrack_htable_size; 35 st->bucket < nf_conntrack_htable_size;
36 st->bucket++) { 36 st->bucket++) {
37 n = rcu_dereference(net->ct.hash[st->bucket].first); 37 n = rcu_dereference(net->ct.hash[st->bucket].first);
38 if (n) 38 if (!is_a_nulls(n))
39 return n; 39 return n;
40 } 40 }
41 return NULL; 41 return NULL;
42} 42}
43 43
44static struct hlist_node *ct_get_next(struct seq_file *seq, 44static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
45 struct hlist_node *head) 45 struct hlist_nulls_node *head)
46{ 46{
47 struct net *net = seq_file_net(seq); 47 struct net *net = seq_file_net(seq);
48 struct ct_iter_state *st = seq->private; 48 struct ct_iter_state *st = seq->private;
49 49
50 head = rcu_dereference(head->next); 50 head = rcu_dereference(head->next);
51 while (head == NULL) { 51 while (is_a_nulls(head)) {
52 if (++st->bucket >= nf_conntrack_htable_size) 52 if (likely(get_nulls_value(head) == st->bucket)) {
53 return NULL; 53 if (++st->bucket >= nf_conntrack_htable_size)
54 return NULL;
55 }
54 head = rcu_dereference(net->ct.hash[st->bucket].first); 56 head = rcu_dereference(net->ct.hash[st->bucket].first);
55 } 57 }
56 return head; 58 return head;
57} 59}
58 60
59static struct hlist_node *ct_get_idx(struct seq_file *seq, loff_t pos) 61static struct hlist_nulls_node *ct_get_idx(struct seq_file *seq, loff_t pos)
60{ 62{
61 struct hlist_node *head = ct_get_first(seq); 63 struct hlist_nulls_node *head = ct_get_first(seq);
62 64
63 if (head) 65 if (head)
64 while (pos && (head = ct_get_next(seq, head))) 66 while (pos && (head = ct_get_next(seq, head)))
@@ -87,69 +89,76 @@ static void ct_seq_stop(struct seq_file *s, void *v)
87 89
88static int ct_seq_show(struct seq_file *s, void *v) 90static int ct_seq_show(struct seq_file *s, void *v)
89{ 91{
90 const struct nf_conntrack_tuple_hash *hash = v; 92 struct nf_conntrack_tuple_hash *hash = v;
91 const struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash); 93 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash);
92 const struct nf_conntrack_l3proto *l3proto; 94 const struct nf_conntrack_l3proto *l3proto;
93 const struct nf_conntrack_l4proto *l4proto; 95 const struct nf_conntrack_l4proto *l4proto;
96 int ret = 0;
94 97
95 NF_CT_ASSERT(ct); 98 NF_CT_ASSERT(ct);
99 if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
100 return 0;
101
96 102
97 /* we only want to print DIR_ORIGINAL */ 103 /* we only want to print DIR_ORIGINAL */
98 if (NF_CT_DIRECTION(hash)) 104 if (NF_CT_DIRECTION(hash))
99 return 0; 105 goto release;
100 if (nf_ct_l3num(ct) != AF_INET) 106 if (nf_ct_l3num(ct) != AF_INET)
101 return 0; 107 goto release;
102 108
103 l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct)); 109 l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
104 NF_CT_ASSERT(l3proto); 110 NF_CT_ASSERT(l3proto);
105 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); 111 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
106 NF_CT_ASSERT(l4proto); 112 NF_CT_ASSERT(l4proto);
107 113
114 ret = -ENOSPC;
108 if (seq_printf(s, "%-8s %u %ld ", 115 if (seq_printf(s, "%-8s %u %ld ",
109 l4proto->name, nf_ct_protonum(ct), 116 l4proto->name, nf_ct_protonum(ct),
110 timer_pending(&ct->timeout) 117 timer_pending(&ct->timeout)
111 ? (long)(ct->timeout.expires - jiffies)/HZ : 0) != 0) 118 ? (long)(ct->timeout.expires - jiffies)/HZ : 0) != 0)
112 return -ENOSPC; 119 goto release;
113 120
114 if (l4proto->print_conntrack && l4proto->print_conntrack(s, ct)) 121 if (l4proto->print_conntrack && l4proto->print_conntrack(s, ct))
115 return -ENOSPC; 122 goto release;
116 123
117 if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, 124 if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
118 l3proto, l4proto)) 125 l3proto, l4proto))
119 return -ENOSPC; 126 goto release;
120 127
121 if (seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL)) 128 if (seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL))
122 return -ENOSPC; 129 goto release;
123 130
124 if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status))) 131 if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status)))
125 if (seq_printf(s, "[UNREPLIED] ")) 132 if (seq_printf(s, "[UNREPLIED] "))
126 return -ENOSPC; 133 goto release;
127 134
128 if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, 135 if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
129 l3proto, l4proto)) 136 l3proto, l4proto))
130 return -ENOSPC; 137 goto release;
131 138
132 if (seq_print_acct(s, ct, IP_CT_DIR_REPLY)) 139 if (seq_print_acct(s, ct, IP_CT_DIR_REPLY))
133 return -ENOSPC; 140 goto release;
134 141
135 if (test_bit(IPS_ASSURED_BIT, &ct->status)) 142 if (test_bit(IPS_ASSURED_BIT, &ct->status))
136 if (seq_printf(s, "[ASSURED] ")) 143 if (seq_printf(s, "[ASSURED] "))
137 return -ENOSPC; 144 goto release;
138 145
139#ifdef CONFIG_NF_CONNTRACK_MARK 146#ifdef CONFIG_NF_CONNTRACK_MARK
140 if (seq_printf(s, "mark=%u ", ct->mark)) 147 if (seq_printf(s, "mark=%u ", ct->mark))
141 return -ENOSPC; 148 goto release;
142#endif 149#endif
143 150
144#ifdef CONFIG_NF_CONNTRACK_SECMARK 151#ifdef CONFIG_NF_CONNTRACK_SECMARK
145 if (seq_printf(s, "secmark=%u ", ct->secmark)) 152 if (seq_printf(s, "secmark=%u ", ct->secmark))
146 return -ENOSPC; 153 goto release;
147#endif 154#endif
148 155
149 if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use))) 156 if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
150 return -ENOSPC; 157 goto release;
151 158 ret = 0;
152 return 0; 159release:
160 nf_ct_put(ct);
161 return ret;
153} 162}
154 163
155static const struct seq_operations ct_seq_ops = { 164static const struct seq_operations ct_seq_ops = {
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 2a8bee26f43d..23b2c2ee869a 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -262,6 +262,11 @@ static int icmp_nlattr_to_tuple(struct nlattr *tb[],
262 262
263 return 0; 263 return 0;
264} 264}
265
266static int icmp_nlattr_tuple_size(void)
267{
268 return nla_policy_len(icmp_nla_policy, CTA_PROTO_MAX + 1);
269}
265#endif 270#endif
266 271
267#ifdef CONFIG_SYSCTL 272#ifdef CONFIG_SYSCTL
@@ -309,6 +314,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp __read_mostly =
309 .me = NULL, 314 .me = NULL,
310#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 315#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
311 .tuple_to_nlattr = icmp_tuple_to_nlattr, 316 .tuple_to_nlattr = icmp_tuple_to_nlattr,
317 .nlattr_tuple_size = icmp_nlattr_tuple_size,
312 .nlattr_to_tuple = icmp_nlattr_to_tuple, 318 .nlattr_to_tuple = icmp_nlattr_to_tuple,
313 .nla_policy = icmp_nla_policy, 319 .nla_policy = icmp_nla_policy,
314#endif 320#endif
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index a65cf692359f..fe65187810f0 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -679,7 +679,7 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct,
679static int __net_init nf_nat_net_init(struct net *net) 679static int __net_init nf_nat_net_init(struct net *net)
680{ 680{
681 net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 681 net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size,
682 &net->ipv4.nat_vmalloced); 682 &net->ipv4.nat_vmalloced, 0);
683 if (!net->ipv4.nat_bysource) 683 if (!net->ipv4.nat_bysource)
684 return -ENOMEM; 684 return -ENOMEM;
685 return 0; 685 return 0;
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index f171e8dbac91..8f04bd9da274 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -75,8 +75,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
75 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL || 75 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL ||
76 !idev || unlikely(idev->cnf.disable_ipv6)) { 76 !idev || unlikely(idev->cnf.disable_ipv6)) {
77 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDISCARDS); 77 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDISCARDS);
78 rcu_read_unlock(); 78 goto drop;
79 goto out;
80 } 79 }
81 80
82 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); 81 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
@@ -147,7 +146,6 @@ err:
147drop: 146drop:
148 rcu_read_unlock(); 147 rcu_read_unlock();
149 kfree_skb(skb); 148 kfree_skb(skb);
150out:
151 return 0; 149 return 0;
152} 150}
153 151
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 34af7bb8df5f..e89cfa3a8f25 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -89,25 +89,6 @@ ip6t_ext_hdr(u8 nexthdr)
89 (nexthdr == IPPROTO_DSTOPTS) ); 89 (nexthdr == IPPROTO_DSTOPTS) );
90} 90}
91 91
92static unsigned long ifname_compare(const char *_a, const char *_b,
93 const unsigned char *_mask)
94{
95 const unsigned long *a = (const unsigned long *)_a;
96 const unsigned long *b = (const unsigned long *)_b;
97 const unsigned long *mask = (const unsigned long *)_mask;
98 unsigned long ret;
99
100 ret = (a[0] ^ b[0]) & mask[0];
101 if (IFNAMSIZ > sizeof(unsigned long))
102 ret |= (a[1] ^ b[1]) & mask[1];
103 if (IFNAMSIZ > 2 * sizeof(unsigned long))
104 ret |= (a[2] ^ b[2]) & mask[2];
105 if (IFNAMSIZ > 3 * sizeof(unsigned long))
106 ret |= (a[3] ^ b[3]) & mask[3];
107 BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
108 return ret;
109}
110
111/* Returns whether matches rule or not. */ 92/* Returns whether matches rule or not. */
112/* Performance critical - called for every packet */ 93/* Performance critical - called for every packet */
113static inline bool 94static inline bool
@@ -138,7 +119,7 @@ ip6_packet_match(const struct sk_buff *skb,
138 return false; 119 return false;
139 } 120 }
140 121
141 ret = ifname_compare(indev, ip6info->iniface, ip6info->iniface_mask); 122 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
142 123
143 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) { 124 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
144 dprintf("VIA in mismatch (%s vs %s).%s\n", 125 dprintf("VIA in mismatch (%s vs %s).%s\n",
@@ -147,7 +128,7 @@ ip6_packet_match(const struct sk_buff *skb,
147 return false; 128 return false;
148 } 129 }
149 130
150 ret = ifname_compare(outdev, ip6info->outiface, ip6info->outiface_mask); 131 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
151 132
152 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) { 133 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
153 dprintf("VIA out mismatch (%s vs %s).%s\n", 134 dprintf("VIA out mismatch (%s vs %s).%s\n",
@@ -536,7 +517,9 @@ mark_source_chains(struct xt_table_info *newinfo,
536 && unconditional(&e->ipv6)) || visited) { 517 && unconditional(&e->ipv6)) || visited) {
537 unsigned int oldpos, size; 518 unsigned int oldpos, size;
538 519
539 if (t->verdict < -NF_MAX_VERDICT - 1) { 520 if ((strcmp(t->target.u.user.name,
521 IP6T_STANDARD_TARGET) == 0) &&
522 t->verdict < -NF_MAX_VERDICT - 1) {
540 duprintf("mark_source_chains: bad " 523 duprintf("mark_source_chains: bad "
541 "negative verdict (%i)\n", 524 "negative verdict (%i)\n",
542 t->verdict); 525 t->verdict);
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index e6852f617217..2a15c2d66c69 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -342,6 +342,11 @@ static int ipv6_nlattr_to_tuple(struct nlattr *tb[],
342 342
343 return 0; 343 return 0;
344} 344}
345
346static int ipv6_nlattr_tuple_size(void)
347{
348 return nla_policy_len(ipv6_nla_policy, CTA_IP_MAX + 1);
349}
345#endif 350#endif
346 351
347struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6 __read_mostly = { 352struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6 __read_mostly = {
@@ -353,6 +358,7 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6 __read_mostly = {
353 .get_l4proto = ipv6_get_l4proto, 358 .get_l4proto = ipv6_get_l4proto,
354#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 359#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
355 .tuple_to_nlattr = ipv6_tuple_to_nlattr, 360 .tuple_to_nlattr = ipv6_tuple_to_nlattr,
361 .nlattr_tuple_size = ipv6_nlattr_tuple_size,
356 .nlattr_to_tuple = ipv6_nlattr_to_tuple, 362 .nlattr_to_tuple = ipv6_nlattr_to_tuple,
357 .nla_policy = ipv6_nla_policy, 363 .nla_policy = ipv6_nla_policy,
358#endif 364#endif
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 41b8a956e1be..9903227bf37c 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -269,6 +269,11 @@ static int icmpv6_nlattr_to_tuple(struct nlattr *tb[],
269 269
270 return 0; 270 return 0;
271} 271}
272
273static int icmpv6_nlattr_tuple_size(void)
274{
275 return nla_policy_len(icmpv6_nla_policy, CTA_PROTO_MAX + 1);
276}
272#endif 277#endif
273 278
274#ifdef CONFIG_SYSCTL 279#ifdef CONFIG_SYSCTL
@@ -300,6 +305,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 __read_mostly =
300 .error = icmpv6_error, 305 .error = icmpv6_error,
301#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 306#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
302 .tuple_to_nlattr = icmpv6_tuple_to_nlattr, 307 .tuple_to_nlattr = icmpv6_tuple_to_nlattr,
308 .nlattr_tuple_size = icmpv6_nlattr_tuple_size,
303 .nlattr_to_tuple = icmpv6_nlattr_to_tuple, 309 .nlattr_to_tuple = icmpv6_nlattr_to_tuple,
304 .nla_policy = icmpv6_nla_policy, 310 .nla_policy = icmpv6_nla_policy,
305#endif 311#endif
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index a95affc94629..07656d830bc4 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -197,6 +197,14 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
197 197
198 status = WLAN_STATUS_REQUEST_DECLINED; 198 status = WLAN_STATUS_REQUEST_DECLINED;
199 199
200 if (test_sta_flags(sta, WLAN_STA_SUSPEND)) {
201#ifdef CONFIG_MAC80211_HT_DEBUG
202 printk(KERN_DEBUG "Suspend in progress. "
203 "Denying ADDBA request\n");
204#endif
205 goto end_no_lock;
206 }
207
200 /* sanity check for incoming parameters: 208 /* sanity check for incoming parameters:
201 * check if configuration can support the BA policy 209 * check if configuration can support the BA policy
202 * and if buffer size does not exceeds max value */ 210 * and if buffer size does not exceeds max value */
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 1df116d4d6e7..947aaaad35d2 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -131,24 +131,6 @@ static int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
131 131
132 state = &sta->ampdu_mlme.tid_state_tx[tid]; 132 state = &sta->ampdu_mlme.tid_state_tx[tid];
133 133
134 if (local->hw.ampdu_queues) {
135 if (initiator) {
136 /*
137 * Stop the AC queue to avoid issues where we send
138 * unaggregated frames already before the delba.
139 */
140 ieee80211_stop_queue_by_reason(&local->hw,
141 local->hw.queues + sta->tid_to_tx_q[tid],
142 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
143 }
144
145 /*
146 * Pretend the driver woke the queue, just in case
147 * it disabled it before the session was stopped.
148 */
149 ieee80211_wake_queue(
150 &local->hw, local->hw.queues + sta->tid_to_tx_q[tid]);
151 }
152 *state = HT_AGG_STATE_REQ_STOP_BA_MSK | 134 *state = HT_AGG_STATE_REQ_STOP_BA_MSK |
153 (initiator << HT_AGG_STATE_INITIATOR_SHIFT); 135 (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
154 136
@@ -158,6 +140,10 @@ static int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
158 /* HW shall not deny going back to legacy */ 140 /* HW shall not deny going back to legacy */
159 if (WARN_ON(ret)) { 141 if (WARN_ON(ret)) {
160 *state = HT_AGG_STATE_OPERATIONAL; 142 *state = HT_AGG_STATE_OPERATIONAL;
143 /*
144 * We may have pending packets get stuck in this case...
145 * Not bothering with a workaround for now.
146 */
161 } 147 }
162 148
163 return ret; 149 return ret;
@@ -212,7 +198,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
212 struct sta_info *sta; 198 struct sta_info *sta;
213 struct ieee80211_sub_if_data *sdata; 199 struct ieee80211_sub_if_data *sdata;
214 u8 *state; 200 u8 *state;
215 int i, qn = -1, ret = 0; 201 int ret = 0;
216 u16 start_seq_num; 202 u16 start_seq_num;
217 203
218 if (WARN_ON(!local->ops->ampdu_action)) 204 if (WARN_ON(!local->ops->ampdu_action))
@@ -226,13 +212,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
226 ra, tid); 212 ra, tid);
227#endif /* CONFIG_MAC80211_HT_DEBUG */ 213#endif /* CONFIG_MAC80211_HT_DEBUG */
228 214
229 if (hw->ampdu_queues && ieee80211_ac_from_tid(tid) == 0) {
230#ifdef CONFIG_MAC80211_HT_DEBUG
231 printk(KERN_DEBUG "rejecting on voice AC\n");
232#endif
233 return -EINVAL;
234 }
235
236 rcu_read_lock(); 215 rcu_read_lock();
237 216
238 sta = sta_info_get(local, ra); 217 sta = sta_info_get(local, ra);
@@ -257,7 +236,17 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
257 goto unlock; 236 goto unlock;
258 } 237 }
259 238
239 if (test_sta_flags(sta, WLAN_STA_SUSPEND)) {
240#ifdef CONFIG_MAC80211_HT_DEBUG
241 printk(KERN_DEBUG "Suspend in progress. "
242 "Denying BA session request\n");
243#endif
244 ret = -EINVAL;
245 goto unlock;
246 }
247
260 spin_lock_bh(&sta->lock); 248 spin_lock_bh(&sta->lock);
249 spin_lock(&local->ampdu_lock);
261 250
262 sdata = sta->sdata; 251 sdata = sta->sdata;
263 252
@@ -278,41 +267,16 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
278 goto err_unlock_sta; 267 goto err_unlock_sta;
279 } 268 }
280 269
281 if (hw->ampdu_queues) { 270 /*
282 spin_lock(&local->queue_stop_reason_lock); 271 * While we're asking the driver about the aggregation,
283 /* reserve a new queue for this session */ 272 * stop the AC queue so that we don't have to worry
284 for (i = 0; i < local->hw.ampdu_queues; i++) { 273 * about frames that came in while we were doing that,
285 if (local->ampdu_ac_queue[i] < 0) { 274 * which would require us to put them to the AC pending
286 qn = i; 275 * afterwards which just makes the code more complex.
287 local->ampdu_ac_queue[qn] = 276 */
288 ieee80211_ac_from_tid(tid); 277 ieee80211_stop_queue_by_reason(
289 break; 278 &local->hw, ieee80211_ac_from_tid(tid),
290 } 279 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
291 }
292 spin_unlock(&local->queue_stop_reason_lock);
293
294 if (qn < 0) {
295#ifdef CONFIG_MAC80211_HT_DEBUG
296 printk(KERN_DEBUG "BA request denied - "
297 "queue unavailable for tid %d\n", tid);
298#endif /* CONFIG_MAC80211_HT_DEBUG */
299 ret = -ENOSPC;
300 goto err_unlock_sta;
301 }
302
303 /*
304 * If we successfully allocate the session, we can't have
305 * anything going on on the queue this TID maps into, so
306 * stop it for now. This is a "virtual" stop using the same
307 * mechanism that drivers will use.
308 *
309 * XXX: queue up frames for this session in the sta_info
310 * struct instead to avoid hitting all other STAs.
311 */
312 ieee80211_stop_queue_by_reason(
313 &local->hw, hw->queues + qn,
314 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
315 }
316 280
317 /* prepare A-MPDU MLME for Tx aggregation */ 281 /* prepare A-MPDU MLME for Tx aggregation */
318 sta->ampdu_mlme.tid_tx[tid] = 282 sta->ampdu_mlme.tid_tx[tid] =
@@ -324,9 +288,11 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
324 tid); 288 tid);
325#endif 289#endif
326 ret = -ENOMEM; 290 ret = -ENOMEM;
327 goto err_return_queue; 291 goto err_wake_queue;
328 } 292 }
329 293
294 skb_queue_head_init(&sta->ampdu_mlme.tid_tx[tid]->pending);
295
330 /* Tx timer */ 296 /* Tx timer */
331 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function = 297 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function =
332 sta_addba_resp_timer_expired; 298 sta_addba_resp_timer_expired;
@@ -351,8 +317,13 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
351 *state = HT_AGG_STATE_IDLE; 317 *state = HT_AGG_STATE_IDLE;
352 goto err_free; 318 goto err_free;
353 } 319 }
354 sta->tid_to_tx_q[tid] = qn;
355 320
321 /* Driver vetoed or OKed, but we can take packets again now */
322 ieee80211_wake_queue_by_reason(
323 &local->hw, ieee80211_ac_from_tid(tid),
324 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
325
326 spin_unlock(&local->ampdu_lock);
356 spin_unlock_bh(&sta->lock); 327 spin_unlock_bh(&sta->lock);
357 328
358 /* send an addBA request */ 329 /* send an addBA request */
@@ -377,17 +348,12 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
377 err_free: 348 err_free:
378 kfree(sta->ampdu_mlme.tid_tx[tid]); 349 kfree(sta->ampdu_mlme.tid_tx[tid]);
379 sta->ampdu_mlme.tid_tx[tid] = NULL; 350 sta->ampdu_mlme.tid_tx[tid] = NULL;
380 err_return_queue: 351 err_wake_queue:
381 if (qn >= 0) { 352 ieee80211_wake_queue_by_reason(
382 /* We failed, so start queue again right away. */ 353 &local->hw, ieee80211_ac_from_tid(tid),
383 ieee80211_wake_queue_by_reason(hw, hw->queues + qn, 354 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
384 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
385 /* give queue back to pool */
386 spin_lock(&local->queue_stop_reason_lock);
387 local->ampdu_ac_queue[qn] = -1;
388 spin_unlock(&local->queue_stop_reason_lock);
389 }
390 err_unlock_sta: 355 err_unlock_sta:
356 spin_unlock(&local->ampdu_lock);
391 spin_unlock_bh(&sta->lock); 357 spin_unlock_bh(&sta->lock);
392 unlock: 358 unlock:
393 rcu_read_unlock(); 359 rcu_read_unlock();
@@ -395,6 +361,67 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
395} 361}
396EXPORT_SYMBOL(ieee80211_start_tx_ba_session); 362EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
397 363
364/*
365 * splice packets from the STA's pending to the local pending,
366 * requires a call to ieee80211_agg_splice_finish and holding
367 * local->ampdu_lock across both calls.
368 */
369static void ieee80211_agg_splice_packets(struct ieee80211_local *local,
370 struct sta_info *sta, u16 tid)
371{
372 unsigned long flags;
373 u16 queue = ieee80211_ac_from_tid(tid);
374
375 ieee80211_stop_queue_by_reason(
376 &local->hw, queue,
377 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
378
379 if (!skb_queue_empty(&sta->ampdu_mlme.tid_tx[tid]->pending)) {
380 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
381 /* mark queue as pending, it is stopped already */
382 __set_bit(IEEE80211_QUEUE_STOP_REASON_PENDING,
383 &local->queue_stop_reasons[queue]);
384 /* copy over remaining packets */
385 skb_queue_splice_tail_init(
386 &sta->ampdu_mlme.tid_tx[tid]->pending,
387 &local->pending[queue]);
388 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
389 }
390}
391
392static void ieee80211_agg_splice_finish(struct ieee80211_local *local,
393 struct sta_info *sta, u16 tid)
394{
395 u16 queue = ieee80211_ac_from_tid(tid);
396
397 ieee80211_wake_queue_by_reason(
398 &local->hw, queue,
399 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
400}
401
402/* caller must hold sta->lock */
403static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
404 struct sta_info *sta, u16 tid)
405{
406#ifdef CONFIG_MAC80211_HT_DEBUG
407 printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid);
408#endif
409
410 spin_lock(&local->ampdu_lock);
411 ieee80211_agg_splice_packets(local, sta, tid);
412 /*
413 * NB: we rely on sta->lock being taken in the TX
414 * processing here when adding to the pending queue,
415 * otherwise we could only change the state of the
416 * session to OPERATIONAL _here_.
417 */
418 ieee80211_agg_splice_finish(local, sta, tid);
419 spin_unlock(&local->ampdu_lock);
420
421 local->ops->ampdu_action(&local->hw, IEEE80211_AMPDU_TX_OPERATIONAL,
422 &sta->sta, tid, NULL);
423}
424
398void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid) 425void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
399{ 426{
400 struct ieee80211_local *local = hw_to_local(hw); 427 struct ieee80211_local *local = hw_to_local(hw);
@@ -437,20 +464,8 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
437 464
438 *state |= HT_ADDBA_DRV_READY_MSK; 465 *state |= HT_ADDBA_DRV_READY_MSK;
439 466
440 if (*state == HT_AGG_STATE_OPERATIONAL) { 467 if (*state == HT_AGG_STATE_OPERATIONAL)
441#ifdef CONFIG_MAC80211_HT_DEBUG 468 ieee80211_agg_tx_operational(local, sta, tid);
442 printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid);
443#endif
444 if (hw->ampdu_queues) {
445 /*
446 * Wake up this queue, we stopped it earlier,
447 * this will in turn wake the entire AC.
448 */
449 ieee80211_wake_queue_by_reason(hw,
450 hw->queues + sta->tid_to_tx_q[tid],
451 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
452 }
453 }
454 469
455 out: 470 out:
456 spin_unlock_bh(&sta->lock); 471 spin_unlock_bh(&sta->lock);
@@ -584,22 +599,19 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
584 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); 599 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
585 600
586 spin_lock_bh(&sta->lock); 601 spin_lock_bh(&sta->lock);
602 spin_lock(&local->ampdu_lock);
587 603
588 if (*state & HT_AGG_STATE_INITIATOR_MSK && 604 ieee80211_agg_splice_packets(local, sta, tid);
589 hw->ampdu_queues) {
590 /*
591 * Wake up this queue, we stopped it earlier,
592 * this will in turn wake the entire AC.
593 */
594 ieee80211_wake_queue_by_reason(hw,
595 hw->queues + sta->tid_to_tx_q[tid],
596 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
597 }
598 605
599 *state = HT_AGG_STATE_IDLE; 606 *state = HT_AGG_STATE_IDLE;
607 /* from now on packets are no longer put onto sta->pending */
600 sta->ampdu_mlme.addba_req_num[tid] = 0; 608 sta->ampdu_mlme.addba_req_num[tid] = 0;
601 kfree(sta->ampdu_mlme.tid_tx[tid]); 609 kfree(sta->ampdu_mlme.tid_tx[tid]);
602 sta->ampdu_mlme.tid_tx[tid] = NULL; 610 sta->ampdu_mlme.tid_tx[tid] = NULL;
611
612 ieee80211_agg_splice_finish(local, sta, tid);
613
614 spin_unlock(&local->ampdu_lock);
603 spin_unlock_bh(&sta->lock); 615 spin_unlock_bh(&sta->lock);
604 616
605 rcu_read_unlock(); 617 rcu_read_unlock();
@@ -637,9 +649,7 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
637 struct ieee80211_mgmt *mgmt, 649 struct ieee80211_mgmt *mgmt,
638 size_t len) 650 size_t len)
639{ 651{
640 struct ieee80211_hw *hw = &local->hw; 652 u16 capab, tid;
641 u16 capab;
642 u16 tid, start_seq_num;
643 u8 *state; 653 u8 *state;
644 654
645 capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); 655 capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
@@ -673,26 +683,10 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
673 683
674 *state |= HT_ADDBA_RECEIVED_MSK; 684 *state |= HT_ADDBA_RECEIVED_MSK;
675 685
676 if (hw->ampdu_queues && *state != curstate && 686 if (*state != curstate && *state == HT_AGG_STATE_OPERATIONAL)
677 *state == HT_AGG_STATE_OPERATIONAL) { 687 ieee80211_agg_tx_operational(local, sta, tid);
678 /*
679 * Wake up this queue, we stopped it earlier,
680 * this will in turn wake the entire AC.
681 */
682 ieee80211_wake_queue_by_reason(hw,
683 hw->queues + sta->tid_to_tx_q[tid],
684 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
685 }
686 sta->ampdu_mlme.addba_req_num[tid] = 0;
687 688
688 if (local->ops->ampdu_action) { 689 sta->ampdu_mlme.addba_req_num[tid] = 0;
689 (void)local->ops->ampdu_action(hw,
690 IEEE80211_AMPDU_TX_RESUME,
691 &sta->sta, tid, &start_seq_num);
692 }
693#ifdef CONFIG_MAC80211_HT_DEBUG
694 printk(KERN_DEBUG "Resuming TX aggregation for tid %d\n", tid);
695#endif /* CONFIG_MAC80211_HT_DEBUG */
696 } else { 690 } else {
697 sta->ampdu_mlme.addba_req_num[tid]++; 691 sta->ampdu_mlme.addba_req_num[tid]++;
698 ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR); 692 ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 58693e52d458..e677b751d468 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -540,9 +540,6 @@ static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev,
540 540
541 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 541 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
542 542
543 if (sdata->vif.type != NL80211_IFTYPE_AP)
544 return -EINVAL;
545
546 old = sdata->u.ap.beacon; 543 old = sdata->u.ap.beacon;
547 544
548 if (old) 545 if (old)
@@ -559,9 +556,6 @@ static int ieee80211_set_beacon(struct wiphy *wiphy, struct net_device *dev,
559 556
560 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 557 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
561 558
562 if (sdata->vif.type != NL80211_IFTYPE_AP)
563 return -EINVAL;
564
565 old = sdata->u.ap.beacon; 559 old = sdata->u.ap.beacon;
566 560
567 if (!old) 561 if (!old)
@@ -577,9 +571,6 @@ static int ieee80211_del_beacon(struct wiphy *wiphy, struct net_device *dev)
577 571
578 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 572 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
579 573
580 if (sdata->vif.type != NL80211_IFTYPE_AP)
581 return -EINVAL;
582
583 old = sdata->u.ap.beacon; 574 old = sdata->u.ap.beacon;
584 575
585 if (!old) 576 if (!old)
@@ -728,10 +719,6 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
728 int err; 719 int err;
729 int layer2_update; 720 int layer2_update;
730 721
731 /* Prevent a race with changing the rate control algorithm */
732 if (!netif_running(dev))
733 return -ENETDOWN;
734
735 if (params->vlan) { 722 if (params->vlan) {
736 sdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); 723 sdata = IEEE80211_DEV_TO_SUB_IF(params->vlan);
737 724
@@ -860,14 +847,8 @@ static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev,
860 struct sta_info *sta; 847 struct sta_info *sta;
861 int err; 848 int err;
862 849
863 if (!netif_running(dev))
864 return -ENETDOWN;
865
866 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 850 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
867 851
868 if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
869 return -ENOTSUPP;
870
871 rcu_read_lock(); 852 rcu_read_lock();
872 sta = sta_info_get(local, next_hop); 853 sta = sta_info_get(local, next_hop);
873 if (!sta) { 854 if (!sta) {
@@ -913,14 +894,8 @@ static int ieee80211_change_mpath(struct wiphy *wiphy,
913 struct mesh_path *mpath; 894 struct mesh_path *mpath;
914 struct sta_info *sta; 895 struct sta_info *sta;
915 896
916 if (!netif_running(dev))
917 return -ENETDOWN;
918
919 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 897 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
920 898
921 if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
922 return -ENOTSUPP;
923
924 rcu_read_lock(); 899 rcu_read_lock();
925 900
926 sta = sta_info_get(local, next_hop); 901 sta = sta_info_get(local, next_hop);
@@ -989,9 +964,6 @@ static int ieee80211_get_mpath(struct wiphy *wiphy, struct net_device *dev,
989 964
990 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 965 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
991 966
992 if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
993 return -ENOTSUPP;
994
995 rcu_read_lock(); 967 rcu_read_lock();
996 mpath = mesh_path_lookup(dst, sdata); 968 mpath = mesh_path_lookup(dst, sdata);
997 if (!mpath) { 969 if (!mpath) {
@@ -1013,9 +985,6 @@ static int ieee80211_dump_mpath(struct wiphy *wiphy, struct net_device *dev,
1013 985
1014 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 986 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1015 987
1016 if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
1017 return -ENOTSUPP;
1018
1019 rcu_read_lock(); 988 rcu_read_lock();
1020 mpath = mesh_path_lookup_by_idx(idx, sdata); 989 mpath = mesh_path_lookup_by_idx(idx, sdata);
1021 if (!mpath) { 990 if (!mpath) {
@@ -1035,8 +1004,6 @@ static int ieee80211_get_mesh_params(struct wiphy *wiphy,
1035 struct ieee80211_sub_if_data *sdata; 1004 struct ieee80211_sub_if_data *sdata;
1036 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1005 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1037 1006
1038 if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
1039 return -ENOTSUPP;
1040 memcpy(conf, &(sdata->u.mesh.mshcfg), sizeof(struct mesh_config)); 1007 memcpy(conf, &(sdata->u.mesh.mshcfg), sizeof(struct mesh_config));
1041 return 0; 1008 return 0;
1042} 1009}
@@ -1054,9 +1021,6 @@ static int ieee80211_set_mesh_params(struct wiphy *wiphy,
1054 struct ieee80211_sub_if_data *sdata; 1021 struct ieee80211_sub_if_data *sdata;
1055 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1022 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1056 1023
1057 if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
1058 return -ENOTSUPP;
1059
1060 /* Set the config options which we are interested in setting */ 1024 /* Set the config options which we are interested in setting */
1061 conf = &(sdata->u.mesh.mshcfg); 1025 conf = &(sdata->u.mesh.mshcfg);
1062 if (_chg_mesh_attr(NL80211_MESHCONF_RETRY_TIMEOUT, mask)) 1026 if (_chg_mesh_attr(NL80211_MESHCONF_RETRY_TIMEOUT, mask))
@@ -1104,9 +1068,6 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
1104 1068
1105 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1069 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1106 1070
1107 if (sdata->vif.type != NL80211_IFTYPE_AP)
1108 return -EINVAL;
1109
1110 if (params->use_cts_prot >= 0) { 1071 if (params->use_cts_prot >= 0) {
1111 sdata->vif.bss_conf.use_cts_prot = params->use_cts_prot; 1072 sdata->vif.bss_conf.use_cts_prot = params->use_cts_prot;
1112 changed |= BSS_CHANGED_ERP_CTS_PROT; 1073 changed |= BSS_CHANGED_ERP_CTS_PROT;
@@ -1181,91 +1142,6 @@ static int ieee80211_set_channel(struct wiphy *wiphy,
1181 return ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); 1142 return ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
1182} 1143}
1183 1144
1184static int set_mgmt_extra_ie_sta(struct ieee80211_sub_if_data *sdata,
1185 u8 subtype, u8 *ies, size_t ies_len)
1186{
1187 struct ieee80211_local *local = sdata->local;
1188 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1189
1190 switch (subtype) {
1191 case IEEE80211_STYPE_PROBE_REQ >> 4:
1192 if (local->ops->hw_scan)
1193 break;
1194 kfree(ifmgd->ie_probereq);
1195 ifmgd->ie_probereq = ies;
1196 ifmgd->ie_probereq_len = ies_len;
1197 return 0;
1198 case IEEE80211_STYPE_PROBE_RESP >> 4:
1199 kfree(ifmgd->ie_proberesp);
1200 ifmgd->ie_proberesp = ies;
1201 ifmgd->ie_proberesp_len = ies_len;
1202 return 0;
1203 case IEEE80211_STYPE_AUTH >> 4:
1204 kfree(ifmgd->ie_auth);
1205 ifmgd->ie_auth = ies;
1206 ifmgd->ie_auth_len = ies_len;
1207 return 0;
1208 case IEEE80211_STYPE_ASSOC_REQ >> 4:
1209 kfree(ifmgd->ie_assocreq);
1210 ifmgd->ie_assocreq = ies;
1211 ifmgd->ie_assocreq_len = ies_len;
1212 return 0;
1213 case IEEE80211_STYPE_REASSOC_REQ >> 4:
1214 kfree(ifmgd->ie_reassocreq);
1215 ifmgd->ie_reassocreq = ies;
1216 ifmgd->ie_reassocreq_len = ies_len;
1217 return 0;
1218 case IEEE80211_STYPE_DEAUTH >> 4:
1219 kfree(ifmgd->ie_deauth);
1220 ifmgd->ie_deauth = ies;
1221 ifmgd->ie_deauth_len = ies_len;
1222 return 0;
1223 case IEEE80211_STYPE_DISASSOC >> 4:
1224 kfree(ifmgd->ie_disassoc);
1225 ifmgd->ie_disassoc = ies;
1226 ifmgd->ie_disassoc_len = ies_len;
1227 return 0;
1228 }
1229
1230 return -EOPNOTSUPP;
1231}
1232
1233static int ieee80211_set_mgmt_extra_ie(struct wiphy *wiphy,
1234 struct net_device *dev,
1235 struct mgmt_extra_ie_params *params)
1236{
1237 struct ieee80211_sub_if_data *sdata;
1238 u8 *ies;
1239 size_t ies_len;
1240 int ret = -EOPNOTSUPP;
1241
1242 if (params->ies) {
1243 ies = kmemdup(params->ies, params->ies_len, GFP_KERNEL);
1244 if (ies == NULL)
1245 return -ENOMEM;
1246 ies_len = params->ies_len;
1247 } else {
1248 ies = NULL;
1249 ies_len = 0;
1250 }
1251
1252 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1253
1254 switch (sdata->vif.type) {
1255 case NL80211_IFTYPE_STATION:
1256 ret = set_mgmt_extra_ie_sta(sdata, params->subtype,
1257 ies, ies_len);
1258 break;
1259 default:
1260 ret = -EOPNOTSUPP;
1261 break;
1262 }
1263
1264 if (ret)
1265 kfree(ies);
1266 return ret;
1267}
1268
1269#ifdef CONFIG_PM 1145#ifdef CONFIG_PM
1270static int ieee80211_suspend(struct wiphy *wiphy) 1146static int ieee80211_suspend(struct wiphy *wiphy)
1271{ 1147{
@@ -1287,9 +1163,6 @@ static int ieee80211_scan(struct wiphy *wiphy,
1287{ 1163{
1288 struct ieee80211_sub_if_data *sdata; 1164 struct ieee80211_sub_if_data *sdata;
1289 1165
1290 if (!netif_running(dev))
1291 return -ENETDOWN;
1292
1293 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1166 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1294 1167
1295 if (sdata->vif.type != NL80211_IFTYPE_STATION && 1168 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
@@ -1300,6 +1173,119 @@ static int ieee80211_scan(struct wiphy *wiphy,
1300 return ieee80211_request_scan(sdata, req); 1173 return ieee80211_request_scan(sdata, req);
1301} 1174}
1302 1175
1176static int ieee80211_auth(struct wiphy *wiphy, struct net_device *dev,
1177 struct cfg80211_auth_request *req)
1178{
1179 struct ieee80211_sub_if_data *sdata;
1180
1181 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1182
1183 switch (req->auth_type) {
1184 case NL80211_AUTHTYPE_OPEN_SYSTEM:
1185 sdata->u.mgd.auth_algs = IEEE80211_AUTH_ALG_OPEN;
1186 break;
1187 case NL80211_AUTHTYPE_SHARED_KEY:
1188 sdata->u.mgd.auth_algs = IEEE80211_AUTH_ALG_SHARED_KEY;
1189 break;
1190 case NL80211_AUTHTYPE_FT:
1191 sdata->u.mgd.auth_algs = IEEE80211_AUTH_ALG_FT;
1192 break;
1193 case NL80211_AUTHTYPE_NETWORK_EAP:
1194 sdata->u.mgd.auth_algs = IEEE80211_AUTH_ALG_LEAP;
1195 break;
1196 default:
1197 return -EOPNOTSUPP;
1198 }
1199
1200 memcpy(sdata->u.mgd.bssid, req->peer_addr, ETH_ALEN);
1201 sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL;
1202 sdata->u.mgd.flags |= IEEE80211_STA_BSSID_SET;
1203
1204 /* TODO: req->chan */
1205 sdata->u.mgd.flags |= IEEE80211_STA_AUTO_CHANNEL_SEL;
1206
1207 if (req->ssid) {
1208 sdata->u.mgd.flags |= IEEE80211_STA_SSID_SET;
1209 memcpy(sdata->u.mgd.ssid, req->ssid, req->ssid_len);
1210 sdata->u.mgd.ssid_len = req->ssid_len;
1211 sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_SSID_SEL;
1212 }
1213
1214 kfree(sdata->u.mgd.sme_auth_ie);
1215 sdata->u.mgd.sme_auth_ie = NULL;
1216 sdata->u.mgd.sme_auth_ie_len = 0;
1217 if (req->ie) {
1218 sdata->u.mgd.sme_auth_ie = kmalloc(req->ie_len, GFP_KERNEL);
1219 if (sdata->u.mgd.sme_auth_ie == NULL)
1220 return -ENOMEM;
1221 memcpy(sdata->u.mgd.sme_auth_ie, req->ie, req->ie_len);
1222 sdata->u.mgd.sme_auth_ie_len = req->ie_len;
1223 }
1224
1225 sdata->u.mgd.flags |= IEEE80211_STA_EXT_SME;
1226 sdata->u.mgd.state = IEEE80211_STA_MLME_DIRECT_PROBE;
1227 ieee80211_sta_req_auth(sdata);
1228 return 0;
1229}
1230
1231static int ieee80211_assoc(struct wiphy *wiphy, struct net_device *dev,
1232 struct cfg80211_assoc_request *req)
1233{
1234 struct ieee80211_sub_if_data *sdata;
1235 int ret;
1236
1237 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1238
1239 if (memcmp(sdata->u.mgd.bssid, req->peer_addr, ETH_ALEN) != 0 ||
1240 !(sdata->u.mgd.flags & IEEE80211_STA_AUTHENTICATED))
1241 return -ENOLINK; /* not authenticated */
1242
1243 sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL;
1244 sdata->u.mgd.flags |= IEEE80211_STA_BSSID_SET;
1245
1246 /* TODO: req->chan */
1247 sdata->u.mgd.flags |= IEEE80211_STA_AUTO_CHANNEL_SEL;
1248
1249 if (req->ssid) {
1250 sdata->u.mgd.flags |= IEEE80211_STA_SSID_SET;
1251 memcpy(sdata->u.mgd.ssid, req->ssid, req->ssid_len);
1252 sdata->u.mgd.ssid_len = req->ssid_len;
1253 sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_SSID_SEL;
1254 } else
1255 sdata->u.mgd.flags |= IEEE80211_STA_AUTO_SSID_SEL;
1256
1257 ret = ieee80211_sta_set_extra_ie(sdata, req->ie, req->ie_len);
1258 if (ret)
1259 return ret;
1260
1261 sdata->u.mgd.flags |= IEEE80211_STA_EXT_SME;
1262 sdata->u.mgd.state = IEEE80211_STA_MLME_ASSOCIATE;
1263 ieee80211_sta_req_auth(sdata);
1264 return 0;
1265}
1266
1267static int ieee80211_deauth(struct wiphy *wiphy, struct net_device *dev,
1268 struct cfg80211_deauth_request *req)
1269{
1270 struct ieee80211_sub_if_data *sdata;
1271
1272 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1273
1274 /* TODO: req->ie */
1275 return ieee80211_sta_deauthenticate(sdata, req->reason_code);
1276}
1277
1278static int ieee80211_disassoc(struct wiphy *wiphy, struct net_device *dev,
1279 struct cfg80211_disassoc_request *req)
1280{
1281 struct ieee80211_sub_if_data *sdata;
1282
1283 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1284
1285 /* TODO: req->ie */
1286 return ieee80211_sta_disassociate(sdata, req->reason_code);
1287}
1288
1303struct cfg80211_ops mac80211_config_ops = { 1289struct cfg80211_ops mac80211_config_ops = {
1304 .add_virtual_intf = ieee80211_add_iface, 1290 .add_virtual_intf = ieee80211_add_iface,
1305 .del_virtual_intf = ieee80211_del_iface, 1291 .del_virtual_intf = ieee80211_del_iface,
@@ -1329,8 +1315,11 @@ struct cfg80211_ops mac80211_config_ops = {
1329 .change_bss = ieee80211_change_bss, 1315 .change_bss = ieee80211_change_bss,
1330 .set_txq_params = ieee80211_set_txq_params, 1316 .set_txq_params = ieee80211_set_txq_params,
1331 .set_channel = ieee80211_set_channel, 1317 .set_channel = ieee80211_set_channel,
1332 .set_mgmt_extra_ie = ieee80211_set_mgmt_extra_ie,
1333 .suspend = ieee80211_suspend, 1318 .suspend = ieee80211_suspend,
1334 .resume = ieee80211_resume, 1319 .resume = ieee80211_resume,
1335 .scan = ieee80211_scan, 1320 .scan = ieee80211_scan,
1321 .auth = ieee80211_auth,
1322 .assoc = ieee80211_assoc,
1323 .deauth = ieee80211_deauth,
1324 .disassoc = ieee80211_disassoc,
1336}; 1325};
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index e37f557de3f3..210b9b6fecd2 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -40,6 +40,10 @@ static const struct file_operations name## _ops = { \
40 local->debugfs.name = debugfs_create_file(#name, 0400, phyd, \ 40 local->debugfs.name = debugfs_create_file(#name, 0400, phyd, \
41 local, &name## _ops); 41 local, &name## _ops);
42 42
43#define DEBUGFS_ADD_MODE(name, mode) \
44 local->debugfs.name = debugfs_create_file(#name, mode, phyd, \
45 local, &name## _ops);
46
43#define DEBUGFS_DEL(name) \ 47#define DEBUGFS_DEL(name) \
44 debugfs_remove(local->debugfs.name); \ 48 debugfs_remove(local->debugfs.name); \
45 local->debugfs.name = NULL; 49 local->debugfs.name = NULL;
@@ -113,6 +117,24 @@ static const struct file_operations tsf_ops = {
113 .open = mac80211_open_file_generic 117 .open = mac80211_open_file_generic
114}; 118};
115 119
120static ssize_t reset_write(struct file *file, const char __user *user_buf,
121 size_t count, loff_t *ppos)
122{
123 struct ieee80211_local *local = file->private_data;
124
125 rtnl_lock();
126 __ieee80211_suspend(&local->hw);
127 __ieee80211_resume(&local->hw);
128 rtnl_unlock();
129
130 return count;
131}
132
133static const struct file_operations reset_ops = {
134 .write = reset_write,
135 .open = mac80211_open_file_generic,
136};
137
116/* statistics stuff */ 138/* statistics stuff */
117 139
118#define DEBUGFS_STATS_FILE(name, buflen, fmt, value...) \ 140#define DEBUGFS_STATS_FILE(name, buflen, fmt, value...) \
@@ -254,6 +276,7 @@ void debugfs_hw_add(struct ieee80211_local *local)
254 DEBUGFS_ADD(total_ps_buffered); 276 DEBUGFS_ADD(total_ps_buffered);
255 DEBUGFS_ADD(wep_iv); 277 DEBUGFS_ADD(wep_iv);
256 DEBUGFS_ADD(tsf); 278 DEBUGFS_ADD(tsf);
279 DEBUGFS_ADD_MODE(reset, 0200);
257 280
258 statsd = debugfs_create_dir("statistics", phyd); 281 statsd = debugfs_create_dir("statistics", phyd);
259 local->debugfs.statistics = statsd; 282 local->debugfs.statistics = statsd;
@@ -308,6 +331,7 @@ void debugfs_hw_del(struct ieee80211_local *local)
308 DEBUGFS_DEL(total_ps_buffered); 331 DEBUGFS_DEL(total_ps_buffered);
309 DEBUGFS_DEL(wep_iv); 332 DEBUGFS_DEL(wep_iv);
310 DEBUGFS_DEL(tsf); 333 DEBUGFS_DEL(tsf);
334 DEBUGFS_DEL(reset);
311 335
312 DEBUGFS_STATS_DEL(transmitted_fragment_count); 336 DEBUGFS_STATS_DEL(transmitted_fragment_count);
313 DEBUGFS_STATS_DEL(multicast_transmitted_frame_count); 337 DEBUGFS_STATS_DEL(multicast_transmitted_frame_count);
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index f4becc12904e..3201e1f96365 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -812,8 +812,9 @@ int ieee80211_ibss_commit(struct ieee80211_sub_if_data *sdata)
812 812
813 ifibss->ibss_join_req = jiffies; 813 ifibss->ibss_join_req = jiffies;
814 ifibss->state = IEEE80211_IBSS_MLME_SEARCH; 814 ifibss->state = IEEE80211_IBSS_MLME_SEARCH;
815 set_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request);
815 816
816 return ieee80211_sta_find_ibss(sdata); 817 return 0;
817} 818}
818 819
819int ieee80211_ibss_set_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t len) 820int ieee80211_ibss_set_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t len)
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index fbb91f1aebb2..e6ed78cb16b3 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -149,11 +149,6 @@ struct ieee80211_tx_data {
149 149
150 struct ieee80211_channel *channel; 150 struct ieee80211_channel *channel;
151 151
152 /* Extra fragments (in addition to the first fragment
153 * in skb) */
154 struct sk_buff **extra_frag;
155 int num_extra_frag;
156
157 u16 ethertype; 152 u16 ethertype;
158 unsigned int flags; 153 unsigned int flags;
159}; 154};
@@ -189,12 +184,6 @@ struct ieee80211_rx_data {
189 u16 tkip_iv16; 184 u16 tkip_iv16;
190}; 185};
191 186
192struct ieee80211_tx_stored_packet {
193 struct sk_buff *skb;
194 struct sk_buff **extra_frag;
195 int num_extra_frag;
196};
197
198struct beacon_data { 187struct beacon_data {
199 u8 *head, *tail; 188 u8 *head, *tail;
200 int head_len, tail_len; 189 int head_len, tail_len;
@@ -247,8 +236,9 @@ struct mesh_preq_queue {
247#define IEEE80211_STA_ASSOCIATED BIT(4) 236#define IEEE80211_STA_ASSOCIATED BIT(4)
248#define IEEE80211_STA_PROBEREQ_POLL BIT(5) 237#define IEEE80211_STA_PROBEREQ_POLL BIT(5)
249#define IEEE80211_STA_CREATE_IBSS BIT(6) 238#define IEEE80211_STA_CREATE_IBSS BIT(6)
250#define IEEE80211_STA_MIXED_CELL BIT(7) 239/* hole at 7, please re-use */
251#define IEEE80211_STA_WMM_ENABLED BIT(8) 240#define IEEE80211_STA_WMM_ENABLED BIT(8)
241/* hole at 9, please re-use */
252#define IEEE80211_STA_AUTO_SSID_SEL BIT(10) 242#define IEEE80211_STA_AUTO_SSID_SEL BIT(10)
253#define IEEE80211_STA_AUTO_BSSID_SEL BIT(11) 243#define IEEE80211_STA_AUTO_BSSID_SEL BIT(11)
254#define IEEE80211_STA_AUTO_CHANNEL_SEL BIT(12) 244#define IEEE80211_STA_AUTO_CHANNEL_SEL BIT(12)
@@ -256,6 +246,7 @@ struct mesh_preq_queue {
256#define IEEE80211_STA_TKIP_WEP_USED BIT(14) 246#define IEEE80211_STA_TKIP_WEP_USED BIT(14)
257#define IEEE80211_STA_CSA_RECEIVED BIT(15) 247#define IEEE80211_STA_CSA_RECEIVED BIT(15)
258#define IEEE80211_STA_MFP_ENABLED BIT(16) 248#define IEEE80211_STA_MFP_ENABLED BIT(16)
249#define IEEE80211_STA_EXT_SME BIT(17)
259/* flags for MLME request */ 250/* flags for MLME request */
260#define IEEE80211_STA_REQ_SCAN 0 251#define IEEE80211_STA_REQ_SCAN 0
261#define IEEE80211_STA_REQ_DIRECT_PROBE 1 252#define IEEE80211_STA_REQ_DIRECT_PROBE 1
@@ -266,12 +257,14 @@ struct mesh_preq_queue {
266#define IEEE80211_AUTH_ALG_OPEN BIT(0) 257#define IEEE80211_AUTH_ALG_OPEN BIT(0)
267#define IEEE80211_AUTH_ALG_SHARED_KEY BIT(1) 258#define IEEE80211_AUTH_ALG_SHARED_KEY BIT(1)
268#define IEEE80211_AUTH_ALG_LEAP BIT(2) 259#define IEEE80211_AUTH_ALG_LEAP BIT(2)
260#define IEEE80211_AUTH_ALG_FT BIT(3)
269 261
270struct ieee80211_if_managed { 262struct ieee80211_if_managed {
271 struct timer_list timer; 263 struct timer_list timer;
272 struct timer_list chswitch_timer; 264 struct timer_list chswitch_timer;
273 struct work_struct work; 265 struct work_struct work;
274 struct work_struct chswitch_work; 266 struct work_struct chswitch_work;
267 struct work_struct beacon_loss_work;
275 268
276 u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN]; 269 u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN];
277 270
@@ -305,6 +298,7 @@ struct ieee80211_if_managed {
305 unsigned long request; 298 unsigned long request;
306 299
307 unsigned long last_probe; 300 unsigned long last_probe;
301 unsigned long last_beacon;
308 302
309 unsigned int flags; 303 unsigned int flags;
310 304
@@ -321,20 +315,8 @@ struct ieee80211_if_managed {
321 int wmm_last_param_set; 315 int wmm_last_param_set;
322 316
323 /* Extra IE data for management frames */ 317 /* Extra IE data for management frames */
324 u8 *ie_probereq; 318 u8 *sme_auth_ie;
325 size_t ie_probereq_len; 319 size_t sme_auth_ie_len;
326 u8 *ie_proberesp;
327 size_t ie_proberesp_len;
328 u8 *ie_auth;
329 size_t ie_auth_len;
330 u8 *ie_assocreq;
331 size_t ie_assocreq_len;
332 u8 *ie_reassocreq;
333 size_t ie_reassocreq_len;
334 u8 *ie_deauth;
335 size_t ie_deauth_len;
336 u8 *ie_disassoc;
337 size_t ie_disassoc_len;
338}; 320};
339 321
340enum ieee80211_ibss_flags { 322enum ieee80211_ibss_flags {
@@ -421,7 +403,6 @@ struct ieee80211_if_mesh {
421 * 403 *
422 * @IEEE80211_SDATA_ALLMULTI: interface wants all multicast packets 404 * @IEEE80211_SDATA_ALLMULTI: interface wants all multicast packets
423 * @IEEE80211_SDATA_PROMISC: interface is promisc 405 * @IEEE80211_SDATA_PROMISC: interface is promisc
424 * @IEEE80211_SDATA_USERSPACE_MLME: userspace MLME is active
425 * @IEEE80211_SDATA_OPERATING_GMODE: operating in G-only mode 406 * @IEEE80211_SDATA_OPERATING_GMODE: operating in G-only mode
426 * @IEEE80211_SDATA_DONT_BRIDGE_PACKETS: bridge packets between 407 * @IEEE80211_SDATA_DONT_BRIDGE_PACKETS: bridge packets between
427 * associated stations and deliver multicast frames both 408 * associated stations and deliver multicast frames both
@@ -430,9 +411,8 @@ struct ieee80211_if_mesh {
430enum ieee80211_sub_if_data_flags { 411enum ieee80211_sub_if_data_flags {
431 IEEE80211_SDATA_ALLMULTI = BIT(0), 412 IEEE80211_SDATA_ALLMULTI = BIT(0),
432 IEEE80211_SDATA_PROMISC = BIT(1), 413 IEEE80211_SDATA_PROMISC = BIT(1),
433 IEEE80211_SDATA_USERSPACE_MLME = BIT(2), 414 IEEE80211_SDATA_OPERATING_GMODE = BIT(2),
434 IEEE80211_SDATA_OPERATING_GMODE = BIT(3), 415 IEEE80211_SDATA_DONT_BRIDGE_PACKETS = BIT(3),
435 IEEE80211_SDATA_DONT_BRIDGE_PACKETS = BIT(4),
436}; 416};
437 417
438struct ieee80211_sub_if_data { 418struct ieee80211_sub_if_data {
@@ -598,6 +578,8 @@ enum queue_stop_reason {
598 IEEE80211_QUEUE_STOP_REASON_PS, 578 IEEE80211_QUEUE_STOP_REASON_PS,
599 IEEE80211_QUEUE_STOP_REASON_CSA, 579 IEEE80211_QUEUE_STOP_REASON_CSA,
600 IEEE80211_QUEUE_STOP_REASON_AGGREGATION, 580 IEEE80211_QUEUE_STOP_REASON_AGGREGATION,
581 IEEE80211_QUEUE_STOP_REASON_SUSPEND,
582 IEEE80211_QUEUE_STOP_REASON_PENDING,
601}; 583};
602 584
603struct ieee80211_master_priv { 585struct ieee80211_master_priv {
@@ -612,12 +594,7 @@ struct ieee80211_local {
612 594
613 const struct ieee80211_ops *ops; 595 const struct ieee80211_ops *ops;
614 596
615 /* AC queue corresponding to each AMPDU queue */ 597 unsigned long queue_stop_reasons[IEEE80211_MAX_QUEUES];
616 s8 ampdu_ac_queue[IEEE80211_MAX_AMPDU_QUEUES];
617 unsigned int amdpu_ac_stop_refcnt[IEEE80211_MAX_AMPDU_QUEUES];
618
619 unsigned long queue_stop_reasons[IEEE80211_MAX_QUEUES +
620 IEEE80211_MAX_AMPDU_QUEUES];
621 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */ 598 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
622 spinlock_t queue_stop_reason_lock; 599 spinlock_t queue_stop_reason_lock;
623 600
@@ -654,11 +631,17 @@ struct ieee80211_local {
654 struct sta_info *sta_hash[STA_HASH_SIZE]; 631 struct sta_info *sta_hash[STA_HASH_SIZE];
655 struct timer_list sta_cleanup; 632 struct timer_list sta_cleanup;
656 633
657 unsigned long queues_pending[BITS_TO_LONGS(IEEE80211_MAX_QUEUES)]; 634 struct sk_buff_head pending[IEEE80211_MAX_QUEUES];
658 unsigned long queues_pending_run[BITS_TO_LONGS(IEEE80211_MAX_QUEUES)];
659 struct ieee80211_tx_stored_packet pending_packet[IEEE80211_MAX_QUEUES];
660 struct tasklet_struct tx_pending_tasklet; 635 struct tasklet_struct tx_pending_tasklet;
661 636
637 /*
638 * This lock is used to prevent concurrent A-MPDU
639 * session start/stop processing, this thus also
640 * synchronises the ->ampdu_action() callback to
641 * drivers and limits it to one at a time.
642 */
643 spinlock_t ampdu_lock;
644
662 /* number of interfaces with corresponding IFF_ flags */ 645 /* number of interfaces with corresponding IFF_ flags */
663 atomic_t iff_allmultis, iff_promiscs; 646 atomic_t iff_allmultis, iff_promiscs;
664 647
@@ -774,6 +757,7 @@ struct ieee80211_local {
774 struct dentry *total_ps_buffered; 757 struct dentry *total_ps_buffered;
775 struct dentry *wep_iv; 758 struct dentry *wep_iv;
776 struct dentry *tsf; 759 struct dentry *tsf;
760 struct dentry *reset;
777 struct dentry *statistics; 761 struct dentry *statistics;
778 struct local_debugfsdentries_statsdentries { 762 struct local_debugfsdentries_statsdentries {
779 struct dentry *transmitted_fragment_count; 763 struct dentry *transmitted_fragment_count;
@@ -969,7 +953,7 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata,
969 struct sk_buff *skb, 953 struct sk_buff *skb,
970 struct ieee80211_rx_status *rx_status); 954 struct ieee80211_rx_status *rx_status);
971int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata, 955int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata,
972 char *ie, size_t len); 956 const char *ie, size_t len);
973 957
974void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local); 958void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local);
975void ieee80211_scan_failed(struct ieee80211_local *local); 959void ieee80211_scan_failed(struct ieee80211_local *local);
@@ -1053,8 +1037,19 @@ void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
1053 u8 pwr_constr_elem_len); 1037 u8 pwr_constr_elem_len);
1054 1038
1055/* Suspend/resume */ 1039/* Suspend/resume */
1040#ifdef CONFIG_PM
1056int __ieee80211_suspend(struct ieee80211_hw *hw); 1041int __ieee80211_suspend(struct ieee80211_hw *hw);
1057int __ieee80211_resume(struct ieee80211_hw *hw); 1042int __ieee80211_resume(struct ieee80211_hw *hw);
1043#else
1044static inline int __ieee80211_suspend(struct ieee80211_hw *hw)
1045{
1046 return 0;
1047}
1048static inline int __ieee80211_resume(struct ieee80211_hw *hw)
1049{
1050 return 0;
1051}
1052#endif
1058 1053
1059/* utility functions/constants */ 1054/* utility functions/constants */
1060extern void *mac80211_wiphy_privid; /* for wiphy privid */ 1055extern void *mac80211_wiphy_privid; /* for wiphy privid */
@@ -1081,6 +1076,9 @@ void ieee80211_dynamic_ps_timer(unsigned long data);
1081void ieee80211_send_nullfunc(struct ieee80211_local *local, 1076void ieee80211_send_nullfunc(struct ieee80211_local *local,
1082 struct ieee80211_sub_if_data *sdata, 1077 struct ieee80211_sub_if_data *sdata,
1083 int powersave); 1078 int powersave);
1079void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
1080 struct ieee80211_hdr *hdr);
1081void ieee80211_beacon_loss_work(struct work_struct *work);
1084 1082
1085void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, 1083void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw,
1086 enum queue_stop_reason reason); 1084 enum queue_stop_reason reason);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index f9f27b9cadbe..91e8e1bacaaa 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -261,8 +261,7 @@ static int ieee80211_open(struct net_device *dev)
261 ieee80211_bss_info_change_notify(sdata, changed); 261 ieee80211_bss_info_change_notify(sdata, changed);
262 ieee80211_enable_keys(sdata); 262 ieee80211_enable_keys(sdata);
263 263
264 if (sdata->vif.type == NL80211_IFTYPE_STATION && 264 if (sdata->vif.type == NL80211_IFTYPE_STATION)
265 !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME))
266 netif_carrier_off(dev); 265 netif_carrier_off(dev);
267 else 266 else
268 netif_carrier_on(dev); 267 netif_carrier_on(dev);
@@ -478,6 +477,9 @@ static int ieee80211_stop(struct net_device *dev)
478 */ 477 */
479 cancel_work_sync(&sdata->u.mgd.work); 478 cancel_work_sync(&sdata->u.mgd.work);
480 cancel_work_sync(&sdata->u.mgd.chswitch_work); 479 cancel_work_sync(&sdata->u.mgd.chswitch_work);
480
481 cancel_work_sync(&sdata->u.mgd.beacon_loss_work);
482
481 /* 483 /*
482 * When we get here, the interface is marked down. 484 * When we get here, the interface is marked down.
483 * Call synchronize_rcu() to wait for the RX path 485 * Call synchronize_rcu() to wait for the RX path
@@ -653,13 +655,7 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
653 kfree(sdata->u.mgd.extra_ie); 655 kfree(sdata->u.mgd.extra_ie);
654 kfree(sdata->u.mgd.assocreq_ies); 656 kfree(sdata->u.mgd.assocreq_ies);
655 kfree(sdata->u.mgd.assocresp_ies); 657 kfree(sdata->u.mgd.assocresp_ies);
656 kfree(sdata->u.mgd.ie_probereq); 658 kfree(sdata->u.mgd.sme_auth_ie);
657 kfree(sdata->u.mgd.ie_proberesp);
658 kfree(sdata->u.mgd.ie_auth);
659 kfree(sdata->u.mgd.ie_assocreq);
660 kfree(sdata->u.mgd.ie_reassocreq);
661 kfree(sdata->u.mgd.ie_deauth);
662 kfree(sdata->u.mgd.ie_disassoc);
663 break; 659 break;
664 case NL80211_IFTYPE_WDS: 660 case NL80211_IFTYPE_WDS:
665 case NL80211_IFTYPE_AP_VLAN: 661 case NL80211_IFTYPE_AP_VLAN:
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index f38db4d37e5d..a6f1d8a869bc 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -161,12 +161,6 @@ int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed)
161 if (WARN_ON(!netif_running(sdata->dev))) 161 if (WARN_ON(!netif_running(sdata->dev)))
162 return 0; 162 return 0;
163 163
164 if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
165 return -EINVAL;
166
167 if (!local->ops->config_interface)
168 return 0;
169
170 memset(&conf, 0, sizeof(conf)); 164 memset(&conf, 0, sizeof(conf));
171 165
172 if (sdata->vif.type == NL80211_IFTYPE_STATION) 166 if (sdata->vif.type == NL80211_IFTYPE_STATION)
@@ -183,6 +177,9 @@ int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed)
183 return -EINVAL; 177 return -EINVAL;
184 } 178 }
185 179
180 if (!local->ops->config_interface)
181 return 0;
182
186 switch (sdata->vif.type) { 183 switch (sdata->vif.type) {
187 case NL80211_IFTYPE_AP: 184 case NL80211_IFTYPE_AP:
188 case NL80211_IFTYPE_ADHOC: 185 case NL80211_IFTYPE_ADHOC:
@@ -224,9 +221,6 @@ int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed)
224 } 221 }
225 } 222 }
226 223
227 if (WARN_ON(!conf.bssid && (changed & IEEE80211_IFCC_BSSID)))
228 return -EINVAL;
229
230 conf.changed = changed; 224 conf.changed = changed;
231 225
232 return local->ops->config_interface(local_to_hw(local), 226 return local->ops->config_interface(local_to_hw(local),
@@ -780,13 +774,10 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
780 setup_timer(&local->dynamic_ps_timer, 774 setup_timer(&local->dynamic_ps_timer,
781 ieee80211_dynamic_ps_timer, (unsigned long) local); 775 ieee80211_dynamic_ps_timer, (unsigned long) local);
782 776
783 for (i = 0; i < IEEE80211_MAX_AMPDU_QUEUES; i++)
784 local->ampdu_ac_queue[i] = -1;
785 /* using an s8 won't work with more than that */
786 BUILD_BUG_ON(IEEE80211_MAX_AMPDU_QUEUES > 127);
787
788 sta_info_init(local); 777 sta_info_init(local);
789 778
779 for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
780 skb_queue_head_init(&local->pending[i]);
790 tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending, 781 tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending,
791 (unsigned long)local); 782 (unsigned long)local);
792 tasklet_disable(&local->tx_pending_tasklet); 783 tasklet_disable(&local->tx_pending_tasklet);
@@ -799,6 +790,8 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
799 skb_queue_head_init(&local->skb_queue); 790 skb_queue_head_init(&local->skb_queue);
800 skb_queue_head_init(&local->skb_queue_unreliable); 791 skb_queue_head_init(&local->skb_queue_unreliable);
801 792
793 spin_lock_init(&local->ampdu_lock);
794
802 return local_to_hw(local); 795 return local_to_hw(local);
803} 796}
804EXPORT_SYMBOL(ieee80211_alloc_hw); 797EXPORT_SYMBOL(ieee80211_alloc_hw);
@@ -876,10 +869,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
876 */ 869 */
877 if (hw->queues > IEEE80211_MAX_QUEUES) 870 if (hw->queues > IEEE80211_MAX_QUEUES)
878 hw->queues = IEEE80211_MAX_QUEUES; 871 hw->queues = IEEE80211_MAX_QUEUES;
879 if (hw->ampdu_queues > IEEE80211_MAX_AMPDU_QUEUES)
880 hw->ampdu_queues = IEEE80211_MAX_AMPDU_QUEUES;
881 if (hw->queues < 4)
882 hw->ampdu_queues = 0;
883 872
884 mdev = alloc_netdev_mq(sizeof(struct ieee80211_master_priv), 873 mdev = alloc_netdev_mq(sizeof(struct ieee80211_master_priv),
885 "wmaster%d", ieee80211_master_setup, 874 "wmaster%d", ieee80211_master_setup,
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 841b8450b3de..7ecda9d59d8a 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -30,7 +30,7 @@
30#define IEEE80211_ASSOC_TIMEOUT (HZ / 5) 30#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
31#define IEEE80211_ASSOC_MAX_TRIES 3 31#define IEEE80211_ASSOC_MAX_TRIES 3
32#define IEEE80211_MONITORING_INTERVAL (2 * HZ) 32#define IEEE80211_MONITORING_INTERVAL (2 * HZ)
33#define IEEE80211_PROBE_INTERVAL (60 * HZ) 33#define IEEE80211_PROBE_IDLE_TIME (60 * HZ)
34#define IEEE80211_RETRY_AUTH_INTERVAL (1 * HZ) 34#define IEEE80211_RETRY_AUTH_INTERVAL (1 * HZ)
35 35
36/* utils */ 36/* utils */
@@ -82,38 +82,23 @@ static int ieee80211_compatible_rates(struct ieee80211_bss *bss,
82 82
83/* frame sending functions */ 83/* frame sending functions */
84 84
85static void add_extra_ies(struct sk_buff *skb, u8 *ies, size_t ies_len)
86{
87 if (ies)
88 memcpy(skb_put(skb, ies_len), ies, ies_len);
89}
90
91static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) 85static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
92{ 86{
93 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 87 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
94 struct ieee80211_local *local = sdata->local; 88 struct ieee80211_local *local = sdata->local;
95 struct sk_buff *skb; 89 struct sk_buff *skb;
96 struct ieee80211_mgmt *mgmt; 90 struct ieee80211_mgmt *mgmt;
97 u8 *pos, *ies, *ht_ie, *e_ies; 91 u8 *pos, *ies, *ht_ie;
98 int i, len, count, rates_len, supp_rates_len; 92 int i, len, count, rates_len, supp_rates_len;
99 u16 capab; 93 u16 capab;
100 struct ieee80211_bss *bss; 94 struct ieee80211_bss *bss;
101 int wmm = 0; 95 int wmm = 0;
102 struct ieee80211_supported_band *sband; 96 struct ieee80211_supported_band *sband;
103 u32 rates = 0; 97 u32 rates = 0;
104 size_t e_ies_len;
105
106 if (ifmgd->flags & IEEE80211_IBSS_PREV_BSSID_SET) {
107 e_ies = sdata->u.mgd.ie_reassocreq;
108 e_ies_len = sdata->u.mgd.ie_reassocreq_len;
109 } else {
110 e_ies = sdata->u.mgd.ie_assocreq;
111 e_ies_len = sdata->u.mgd.ie_assocreq_len;
112 }
113 98
114 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 99 skb = dev_alloc_skb(local->hw.extra_tx_headroom +
115 sizeof(*mgmt) + 200 + ifmgd->extra_ie_len + 100 sizeof(*mgmt) + 200 + ifmgd->extra_ie_len +
116 ifmgd->ssid_len + e_ies_len); 101 ifmgd->ssid_len);
117 if (!skb) { 102 if (!skb) {
118 printk(KERN_DEBUG "%s: failed to allocate buffer for assoc " 103 printk(KERN_DEBUG "%s: failed to allocate buffer for assoc "
119 "frame\n", sdata->dev->name); 104 "frame\n", sdata->dev->name);
@@ -304,8 +289,6 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
304 memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs)); 289 memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs));
305 } 290 }
306 291
307 add_extra_ies(skb, e_ies, e_ies_len);
308
309 kfree(ifmgd->assocreq_ies); 292 kfree(ifmgd->assocreq_ies);
310 ifmgd->assocreq_ies_len = (skb->data + skb->len) - ies; 293 ifmgd->assocreq_ies_len = (skb->data + skb->len) - ies;
311 ifmgd->assocreq_ies = kmalloc(ifmgd->assocreq_ies_len, GFP_KERNEL); 294 ifmgd->assocreq_ies = kmalloc(ifmgd->assocreq_ies_len, GFP_KERNEL);
@@ -323,19 +306,8 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
323 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 306 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
324 struct sk_buff *skb; 307 struct sk_buff *skb;
325 struct ieee80211_mgmt *mgmt; 308 struct ieee80211_mgmt *mgmt;
326 u8 *ies;
327 size_t ies_len;
328
329 if (stype == IEEE80211_STYPE_DEAUTH) {
330 ies = sdata->u.mgd.ie_deauth;
331 ies_len = sdata->u.mgd.ie_deauth_len;
332 } else {
333 ies = sdata->u.mgd.ie_disassoc;
334 ies_len = sdata->u.mgd.ie_disassoc_len;
335 }
336 309
337 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) + 310 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt));
338 ies_len);
339 if (!skb) { 311 if (!skb) {
340 printk(KERN_DEBUG "%s: failed to allocate buffer for " 312 printk(KERN_DEBUG "%s: failed to allocate buffer for "
341 "deauth/disassoc frame\n", sdata->dev->name); 313 "deauth/disassoc frame\n", sdata->dev->name);
@@ -353,8 +325,6 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
353 /* u.deauth.reason_code == u.disassoc.reason_code */ 325 /* u.deauth.reason_code == u.disassoc.reason_code */
354 mgmt->u.deauth.reason_code = cpu_to_le16(reason); 326 mgmt->u.deauth.reason_code = cpu_to_le16(reason);
355 327
356 add_extra_ies(skb, ies, ies_len);
357
358 ieee80211_tx_skb(sdata, skb, ifmgd->flags & IEEE80211_STA_MFP_ENABLED); 328 ieee80211_tx_skb(sdata, skb, ifmgd->flags & IEEE80211_STA_MFP_ENABLED);
359} 329}
360 330
@@ -640,6 +610,8 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
640 bss_info_changed |= ieee80211_handle_bss_capability(sdata, 610 bss_info_changed |= ieee80211_handle_bss_capability(sdata,
641 bss->cbss.capability, bss->has_erp_value, bss->erp_value); 611 bss->cbss.capability, bss->has_erp_value, bss->erp_value);
642 612
613 cfg80211_hold_bss(&bss->cbss);
614
643 ieee80211_rx_bss_put(local, bss); 615 ieee80211_rx_bss_put(local, bss);
644 } 616 }
645 617
@@ -682,6 +654,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
682static void ieee80211_direct_probe(struct ieee80211_sub_if_data *sdata) 654static void ieee80211_direct_probe(struct ieee80211_sub_if_data *sdata)
683{ 655{
684 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 656 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
657 struct ieee80211_local *local = sdata->local;
685 658
686 ifmgd->direct_probe_tries++; 659 ifmgd->direct_probe_tries++;
687 if (ifmgd->direct_probe_tries > IEEE80211_AUTH_MAX_TRIES) { 660 if (ifmgd->direct_probe_tries > IEEE80211_AUTH_MAX_TRIES) {
@@ -697,6 +670,13 @@ static void ieee80211_direct_probe(struct ieee80211_sub_if_data *sdata)
697 ieee80211_rx_bss_remove(sdata, ifmgd->bssid, 670 ieee80211_rx_bss_remove(sdata, ifmgd->bssid,
698 sdata->local->hw.conf.channel->center_freq, 671 sdata->local->hw.conf.channel->center_freq,
699 ifmgd->ssid, ifmgd->ssid_len); 672 ifmgd->ssid, ifmgd->ssid_len);
673
674 /*
675 * We might have a pending scan which had no chance to run yet
676 * due to state == IEEE80211_STA_MLME_DIRECT_PROBE.
677 * Hence, queue the STAs work again
678 */
679 queue_work(local->hw.workqueue, &ifmgd->work);
700 return; 680 return;
701 } 681 }
702 682
@@ -721,6 +701,9 @@ static void ieee80211_direct_probe(struct ieee80211_sub_if_data *sdata)
721static void ieee80211_authenticate(struct ieee80211_sub_if_data *sdata) 701static void ieee80211_authenticate(struct ieee80211_sub_if_data *sdata)
722{ 702{
723 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 703 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
704 struct ieee80211_local *local = sdata->local;
705 u8 *ies;
706 size_t ies_len;
724 707
725 ifmgd->auth_tries++; 708 ifmgd->auth_tries++;
726 if (ifmgd->auth_tries > IEEE80211_AUTH_MAX_TRIES) { 709 if (ifmgd->auth_tries > IEEE80211_AUTH_MAX_TRIES) {
@@ -732,6 +715,13 @@ static void ieee80211_authenticate(struct ieee80211_sub_if_data *sdata)
732 ieee80211_rx_bss_remove(sdata, ifmgd->bssid, 715 ieee80211_rx_bss_remove(sdata, ifmgd->bssid,
733 sdata->local->hw.conf.channel->center_freq, 716 sdata->local->hw.conf.channel->center_freq,
734 ifmgd->ssid, ifmgd->ssid_len); 717 ifmgd->ssid, ifmgd->ssid_len);
718
719 /*
720 * We might have a pending scan which had no chance to run yet
721 * due to state == IEEE80211_STA_MLME_AUTHENTICATE.
722 * Hence, queue the STAs work again
723 */
724 queue_work(local->hw.workqueue, &ifmgd->work);
735 return; 725 return;
736 } 726 }
737 727
@@ -739,7 +729,14 @@ static void ieee80211_authenticate(struct ieee80211_sub_if_data *sdata)
739 printk(KERN_DEBUG "%s: authenticate with AP %pM\n", 729 printk(KERN_DEBUG "%s: authenticate with AP %pM\n",
740 sdata->dev->name, ifmgd->bssid); 730 sdata->dev->name, ifmgd->bssid);
741 731
742 ieee80211_send_auth(sdata, 1, ifmgd->auth_alg, NULL, 0, 732 if (ifmgd->flags & IEEE80211_STA_EXT_SME) {
733 ies = ifmgd->sme_auth_ie;
734 ies_len = ifmgd->sme_auth_ie_len;
735 } else {
736 ies = NULL;
737 ies_len = 0;
738 }
739 ieee80211_send_auth(sdata, 1, ifmgd->auth_alg, ies, ies_len,
743 ifmgd->bssid, 0); 740 ifmgd->bssid, 0);
744 ifmgd->auth_transaction = 2; 741 ifmgd->auth_transaction = 2;
745 742
@@ -756,6 +753,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
756{ 753{
757 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 754 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
758 struct ieee80211_local *local = sdata->local; 755 struct ieee80211_local *local = sdata->local;
756 struct ieee80211_conf *conf = &local_to_hw(local)->conf;
757 struct ieee80211_bss *bss;
759 struct sta_info *sta; 758 struct sta_info *sta;
760 u32 changed = 0, config_changed = 0; 759 u32 changed = 0, config_changed = 0;
761 760
@@ -779,6 +778,15 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
779 778
780 ieee80211_sta_tear_down_BA_sessions(sta); 779 ieee80211_sta_tear_down_BA_sessions(sta);
781 780
781 bss = ieee80211_rx_bss_get(local, ifmgd->bssid,
782 conf->channel->center_freq,
783 ifmgd->ssid, ifmgd->ssid_len);
784
785 if (bss) {
786 cfg80211_unhold_bss(&bss->cbss);
787 ieee80211_rx_bss_put(local, bss);
788 }
789
782 if (self_disconnected) { 790 if (self_disconnected) {
783 if (deauth) 791 if (deauth)
784 ieee80211_send_deauth_disassoc(sdata, 792 ieee80211_send_deauth_disassoc(sdata,
@@ -854,7 +862,7 @@ static int ieee80211_privacy_mismatch(struct ieee80211_sub_if_data *sdata)
854 int wep_privacy; 862 int wep_privacy;
855 int privacy_invoked; 863 int privacy_invoked;
856 864
857 if (!ifmgd || (ifmgd->flags & IEEE80211_STA_MIXED_CELL)) 865 if (!ifmgd || (ifmgd->flags & IEEE80211_STA_EXT_SME))
858 return 0; 866 return 0;
859 867
860 bss = ieee80211_rx_bss_get(local, ifmgd->bssid, 868 bss = ieee80211_rx_bss_get(local, ifmgd->bssid,
@@ -878,6 +886,7 @@ static int ieee80211_privacy_mismatch(struct ieee80211_sub_if_data *sdata)
878static void ieee80211_associate(struct ieee80211_sub_if_data *sdata) 886static void ieee80211_associate(struct ieee80211_sub_if_data *sdata)
879{ 887{
880 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 888 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
889 struct ieee80211_local *local = sdata->local;
881 890
882 ifmgd->assoc_tries++; 891 ifmgd->assoc_tries++;
883 if (ifmgd->assoc_tries > IEEE80211_ASSOC_MAX_TRIES) { 892 if (ifmgd->assoc_tries > IEEE80211_ASSOC_MAX_TRIES) {
@@ -889,6 +898,12 @@ static void ieee80211_associate(struct ieee80211_sub_if_data *sdata)
889 ieee80211_rx_bss_remove(sdata, ifmgd->bssid, 898 ieee80211_rx_bss_remove(sdata, ifmgd->bssid,
890 sdata->local->hw.conf.channel->center_freq, 899 sdata->local->hw.conf.channel->center_freq,
891 ifmgd->ssid, ifmgd->ssid_len); 900 ifmgd->ssid, ifmgd->ssid_len);
901 /*
902 * We might have a pending scan which had no chance to run yet
903 * due to state == IEEE80211_STA_MLME_ASSOCIATE.
904 * Hence, queue the STAs work again
905 */
906 queue_work(local->hw.workqueue, &ifmgd->work);
892 return; 907 return;
893 } 908 }
894 909
@@ -907,13 +922,55 @@ static void ieee80211_associate(struct ieee80211_sub_if_data *sdata)
907 mod_timer(&ifmgd->timer, jiffies + IEEE80211_ASSOC_TIMEOUT); 922 mod_timer(&ifmgd->timer, jiffies + IEEE80211_ASSOC_TIMEOUT);
908} 923}
909 924
925void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
926 struct ieee80211_hdr *hdr)
927{
928 /*
929 * We can postpone the mgd.timer whenever receiving unicast frames
930 * from AP because we know that the connection is working both ways
931 * at that time. But multicast frames (and hence also beacons) must
932 * be ignored here, because we need to trigger the timer during
933 * data idle periods for sending the periodical probe request to
934 * the AP.
935 */
936 if (!is_multicast_ether_addr(hdr->addr1))
937 mod_timer(&sdata->u.mgd.timer,
938 jiffies + IEEE80211_MONITORING_INTERVAL);
939}
940
941void ieee80211_beacon_loss_work(struct work_struct *work)
942{
943 struct ieee80211_sub_if_data *sdata =
944 container_of(work, struct ieee80211_sub_if_data,
945 u.mgd.beacon_loss_work);
946 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
947
948 printk(KERN_DEBUG "%s: driver reports beacon loss from AP %pM "
949 "- sending probe request\n", sdata->dev->name,
950 sdata->u.mgd.bssid);
951
952 ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL;
953 ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid,
954 ifmgd->ssid_len, NULL, 0);
955
956 mod_timer(&ifmgd->timer, jiffies + IEEE80211_MONITORING_INTERVAL);
957}
958
959void ieee80211_beacon_loss(struct ieee80211_vif *vif)
960{
961 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
962
963 queue_work(sdata->local->hw.workqueue,
964 &sdata->u.mgd.beacon_loss_work);
965}
966EXPORT_SYMBOL(ieee80211_beacon_loss);
910 967
911static void ieee80211_associated(struct ieee80211_sub_if_data *sdata) 968static void ieee80211_associated(struct ieee80211_sub_if_data *sdata)
912{ 969{
913 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 970 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
914 struct ieee80211_local *local = sdata->local; 971 struct ieee80211_local *local = sdata->local;
915 struct sta_info *sta; 972 struct sta_info *sta;
916 int disassoc; 973 bool disassoc = false;
917 974
918 /* TODO: start monitoring current AP signal quality and number of 975 /* TODO: start monitoring current AP signal quality and number of
919 * missed beacons. Scan other channels every now and then and search 976 * missed beacons. Scan other channels every now and then and search
@@ -928,36 +985,45 @@ static void ieee80211_associated(struct ieee80211_sub_if_data *sdata)
928 if (!sta) { 985 if (!sta) {
929 printk(KERN_DEBUG "%s: No STA entry for own AP %pM\n", 986 printk(KERN_DEBUG "%s: No STA entry for own AP %pM\n",
930 sdata->dev->name, ifmgd->bssid); 987 sdata->dev->name, ifmgd->bssid);
931 disassoc = 1; 988 disassoc = true;
932 } else { 989 goto unlock;
933 disassoc = 0;
934 if (time_after(jiffies,
935 sta->last_rx + IEEE80211_MONITORING_INTERVAL)) {
936 if (ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL) {
937 printk(KERN_DEBUG "%s: No ProbeResp from "
938 "current AP %pM - assume out of "
939 "range\n",
940 sdata->dev->name, ifmgd->bssid);
941 disassoc = 1;
942 } else
943 ieee80211_send_probe_req(sdata, ifmgd->bssid,
944 ifmgd->ssid,
945 ifmgd->ssid_len,
946 NULL, 0);
947 ifmgd->flags ^= IEEE80211_STA_PROBEREQ_POLL;
948 } else {
949 ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL;
950 if (time_after(jiffies, ifmgd->last_probe +
951 IEEE80211_PROBE_INTERVAL)) {
952 ifmgd->last_probe = jiffies;
953 ieee80211_send_probe_req(sdata, ifmgd->bssid,
954 ifmgd->ssid,
955 ifmgd->ssid_len,
956 NULL, 0);
957 }
958 }
959 } 990 }
960 991
992 if ((ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL) &&
993 time_after(jiffies, sta->last_rx + IEEE80211_MONITORING_INTERVAL)) {
994 printk(KERN_DEBUG "%s: no probe response from AP %pM "
995 "- disassociating\n",
996 sdata->dev->name, ifmgd->bssid);
997 disassoc = true;
998 ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL;
999 goto unlock;
1000 }
1001
1002 /*
1003 * Beacon filtering is only enabled with power save and then the
1004 * stack should not check for beacon loss.
1005 */
1006 if (!((local->hw.flags & IEEE80211_HW_BEACON_FILTER) &&
1007 (local->hw.conf.flags & IEEE80211_CONF_PS)) &&
1008 time_after(jiffies,
1009 ifmgd->last_beacon + IEEE80211_MONITORING_INTERVAL)) {
1010 printk(KERN_DEBUG "%s: beacon loss from AP %pM "
1011 "- sending probe request\n",
1012 sdata->dev->name, ifmgd->bssid);
1013 ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL;
1014 ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid,
1015 ifmgd->ssid_len, NULL, 0);
1016 goto unlock;
1017
1018 }
1019
1020 if (time_after(jiffies, sta->last_rx + IEEE80211_PROBE_IDLE_TIME)) {
1021 ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL;
1022 ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid,
1023 ifmgd->ssid_len, NULL, 0);
1024 }
1025
1026 unlock:
961 rcu_read_unlock(); 1027 rcu_read_unlock();
962 1028
963 if (disassoc) 1029 if (disassoc)
@@ -975,7 +1041,11 @@ static void ieee80211_auth_completed(struct ieee80211_sub_if_data *sdata)
975 1041
976 printk(KERN_DEBUG "%s: authenticated\n", sdata->dev->name); 1042 printk(KERN_DEBUG "%s: authenticated\n", sdata->dev->name);
977 ifmgd->flags |= IEEE80211_STA_AUTHENTICATED; 1043 ifmgd->flags |= IEEE80211_STA_AUTHENTICATED;
978 ieee80211_associate(sdata); 1044 if (ifmgd->flags & IEEE80211_STA_EXT_SME) {
1045 /* Wait for SME to request association */
1046 ifmgd->state = IEEE80211_STA_MLME_DISABLED;
1047 } else
1048 ieee80211_associate(sdata);
979} 1049}
980 1050
981 1051
@@ -1061,12 +1131,15 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
1061 switch (ifmgd->auth_alg) { 1131 switch (ifmgd->auth_alg) {
1062 case WLAN_AUTH_OPEN: 1132 case WLAN_AUTH_OPEN:
1063 case WLAN_AUTH_LEAP: 1133 case WLAN_AUTH_LEAP:
1134 case WLAN_AUTH_FT:
1064 ieee80211_auth_completed(sdata); 1135 ieee80211_auth_completed(sdata);
1136 cfg80211_send_rx_auth(sdata->dev, (u8 *) mgmt, len);
1065 break; 1137 break;
1066 case WLAN_AUTH_SHARED_KEY: 1138 case WLAN_AUTH_SHARED_KEY:
1067 if (ifmgd->auth_transaction == 4) 1139 if (ifmgd->auth_transaction == 4) {
1068 ieee80211_auth_completed(sdata); 1140 ieee80211_auth_completed(sdata);
1069 else 1141 cfg80211_send_rx_auth(sdata->dev, (u8 *) mgmt, len);
1142 } else
1070 ieee80211_auth_challenge(sdata, mgmt, len); 1143 ieee80211_auth_challenge(sdata, mgmt, len);
1071 break; 1144 break;
1072 } 1145 }
@@ -1092,9 +1165,10 @@ static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
1092 printk(KERN_DEBUG "%s: deauthenticated (Reason: %u)\n", 1165 printk(KERN_DEBUG "%s: deauthenticated (Reason: %u)\n",
1093 sdata->dev->name, reason_code); 1166 sdata->dev->name, reason_code);
1094 1167
1095 if (ifmgd->state == IEEE80211_STA_MLME_AUTHENTICATE || 1168 if (!(ifmgd->flags & IEEE80211_STA_EXT_SME) &&
1096 ifmgd->state == IEEE80211_STA_MLME_ASSOCIATE || 1169 (ifmgd->state == IEEE80211_STA_MLME_AUTHENTICATE ||
1097 ifmgd->state == IEEE80211_STA_MLME_ASSOCIATED) { 1170 ifmgd->state == IEEE80211_STA_MLME_ASSOCIATE ||
1171 ifmgd->state == IEEE80211_STA_MLME_ASSOCIATED)) {
1098 ifmgd->state = IEEE80211_STA_MLME_DIRECT_PROBE; 1172 ifmgd->state = IEEE80211_STA_MLME_DIRECT_PROBE;
1099 mod_timer(&ifmgd->timer, jiffies + 1173 mod_timer(&ifmgd->timer, jiffies +
1100 IEEE80211_RETRY_AUTH_INTERVAL); 1174 IEEE80211_RETRY_AUTH_INTERVAL);
@@ -1102,6 +1176,7 @@ static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
1102 1176
1103 ieee80211_set_disassoc(sdata, true, false, 0); 1177 ieee80211_set_disassoc(sdata, true, false, 0);
1104 ifmgd->flags &= ~IEEE80211_STA_AUTHENTICATED; 1178 ifmgd->flags &= ~IEEE80211_STA_AUTHENTICATED;
1179 cfg80211_send_rx_deauth(sdata->dev, (u8 *) mgmt, len);
1105} 1180}
1106 1181
1107 1182
@@ -1124,13 +1199,15 @@ static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
1124 printk(KERN_DEBUG "%s: disassociated (Reason: %u)\n", 1199 printk(KERN_DEBUG "%s: disassociated (Reason: %u)\n",
1125 sdata->dev->name, reason_code); 1200 sdata->dev->name, reason_code);
1126 1201
1127 if (ifmgd->state == IEEE80211_STA_MLME_ASSOCIATED) { 1202 if (!(ifmgd->flags & IEEE80211_STA_EXT_SME) &&
1203 ifmgd->state == IEEE80211_STA_MLME_ASSOCIATED) {
1128 ifmgd->state = IEEE80211_STA_MLME_ASSOCIATE; 1204 ifmgd->state = IEEE80211_STA_MLME_ASSOCIATE;
1129 mod_timer(&ifmgd->timer, jiffies + 1205 mod_timer(&ifmgd->timer, jiffies +
1130 IEEE80211_RETRY_AUTH_INTERVAL); 1206 IEEE80211_RETRY_AUTH_INTERVAL);
1131 } 1207 }
1132 1208
1133 ieee80211_set_disassoc(sdata, false, false, reason_code); 1209 ieee80211_set_disassoc(sdata, false, false, reason_code);
1210 cfg80211_send_rx_disassoc(sdata->dev, (u8 *) mgmt, len);
1134} 1211}
1135 1212
1136 1213
@@ -1346,7 +1423,14 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1346 bss_conf->assoc_capability = capab_info; 1423 bss_conf->assoc_capability = capab_info;
1347 ieee80211_set_associated(sdata, changed); 1424 ieee80211_set_associated(sdata, changed);
1348 1425
1426 /*
1427 * initialise the time of last beacon to be the association time,
1428 * otherwise beacon loss check will trigger immediately
1429 */
1430 ifmgd->last_beacon = jiffies;
1431
1349 ieee80211_associated(sdata); 1432 ieee80211_associated(sdata);
1433 cfg80211_send_rx_assoc(sdata->dev, (u8 *) mgmt, len);
1350} 1434}
1351 1435
1352 1436
@@ -1393,9 +1477,12 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
1393 size_t len, 1477 size_t len,
1394 struct ieee80211_rx_status *rx_status) 1478 struct ieee80211_rx_status *rx_status)
1395{ 1479{
1480 struct ieee80211_if_managed *ifmgd;
1396 size_t baselen; 1481 size_t baselen;
1397 struct ieee802_11_elems elems; 1482 struct ieee802_11_elems elems;
1398 1483
1484 ifmgd = &sdata->u.mgd;
1485
1399 if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN)) 1486 if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN))
1400 return; /* ignore ProbeResp to foreign address */ 1487 return; /* ignore ProbeResp to foreign address */
1401 1488
@@ -1410,11 +1497,14 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
1410 1497
1411 /* direct probe may be part of the association flow */ 1498 /* direct probe may be part of the association flow */
1412 if (test_and_clear_bit(IEEE80211_STA_REQ_DIRECT_PROBE, 1499 if (test_and_clear_bit(IEEE80211_STA_REQ_DIRECT_PROBE,
1413 &sdata->u.mgd.request)) { 1500 &ifmgd->request)) {
1414 printk(KERN_DEBUG "%s direct probe responded\n", 1501 printk(KERN_DEBUG "%s direct probe responded\n",
1415 sdata->dev->name); 1502 sdata->dev->name);
1416 ieee80211_authenticate(sdata); 1503 ieee80211_authenticate(sdata);
1417 } 1504 }
1505
1506 if (ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL)
1507 ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL;
1418} 1508}
1419 1509
1420static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, 1510static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
@@ -1636,6 +1726,8 @@ static void ieee80211_sta_reset_auth(struct ieee80211_sub_if_data *sdata)
1636 ifmgd->auth_alg = WLAN_AUTH_SHARED_KEY; 1726 ifmgd->auth_alg = WLAN_AUTH_SHARED_KEY;
1637 else if (ifmgd->auth_algs & IEEE80211_AUTH_ALG_LEAP) 1727 else if (ifmgd->auth_algs & IEEE80211_AUTH_ALG_LEAP)
1638 ifmgd->auth_alg = WLAN_AUTH_LEAP; 1728 ifmgd->auth_alg = WLAN_AUTH_LEAP;
1729 else if (ifmgd->auth_algs & IEEE80211_AUTH_ALG_FT)
1730 ifmgd->auth_alg = WLAN_AUTH_FT;
1639 else 1731 else
1640 ifmgd->auth_alg = WLAN_AUTH_OPEN; 1732 ifmgd->auth_alg = WLAN_AUTH_OPEN;
1641 ifmgd->auth_transaction = -1; 1733 ifmgd->auth_transaction = -1;
@@ -1659,7 +1751,8 @@ static int ieee80211_sta_config_auth(struct ieee80211_sub_if_data *sdata)
1659 u16 capa_val = WLAN_CAPABILITY_ESS; 1751 u16 capa_val = WLAN_CAPABILITY_ESS;
1660 struct ieee80211_channel *chan = local->oper_channel; 1752 struct ieee80211_channel *chan = local->oper_channel;
1661 1753
1662 if (ifmgd->flags & (IEEE80211_STA_AUTO_SSID_SEL | 1754 if (!(ifmgd->flags & IEEE80211_STA_EXT_SME) &&
1755 ifmgd->flags & (IEEE80211_STA_AUTO_SSID_SEL |
1663 IEEE80211_STA_AUTO_BSSID_SEL | 1756 IEEE80211_STA_AUTO_BSSID_SEL |
1664 IEEE80211_STA_AUTO_CHANNEL_SEL)) { 1757 IEEE80211_STA_AUTO_CHANNEL_SEL)) {
1665 capa_mask |= WLAN_CAPABILITY_PRIVACY; 1758 capa_mask |= WLAN_CAPABILITY_PRIVACY;
@@ -1822,6 +1915,7 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
1822 ifmgd = &sdata->u.mgd; 1915 ifmgd = &sdata->u.mgd;
1823 INIT_WORK(&ifmgd->work, ieee80211_sta_work); 1916 INIT_WORK(&ifmgd->work, ieee80211_sta_work);
1824 INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work); 1917 INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work);
1918 INIT_WORK(&ifmgd->beacon_loss_work, ieee80211_beacon_loss_work);
1825 setup_timer(&ifmgd->timer, ieee80211_sta_timer, 1919 setup_timer(&ifmgd->timer, ieee80211_sta_timer,
1826 (unsigned long) sdata); 1920 (unsigned long) sdata);
1827 setup_timer(&ifmgd->chswitch_timer, ieee80211_chswitch_timer, 1921 setup_timer(&ifmgd->chswitch_timer, ieee80211_chswitch_timer,
@@ -1834,7 +1928,7 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
1834 ifmgd->flags |= IEEE80211_STA_CREATE_IBSS | 1928 ifmgd->flags |= IEEE80211_STA_CREATE_IBSS |
1835 IEEE80211_STA_AUTO_BSSID_SEL | 1929 IEEE80211_STA_AUTO_BSSID_SEL |
1836 IEEE80211_STA_AUTO_CHANNEL_SEL; 1930 IEEE80211_STA_AUTO_CHANNEL_SEL;
1837 if (ieee80211_num_regular_queues(&sdata->local->hw) >= 4) 1931 if (sdata->local->hw.queues >= 4)
1838 ifmgd->flags |= IEEE80211_STA_WMM_ENABLED; 1932 ifmgd->flags |= IEEE80211_STA_WMM_ENABLED;
1839} 1933}
1840 1934
@@ -1856,7 +1950,11 @@ void ieee80211_sta_req_auth(struct ieee80211_sub_if_data *sdata)
1856 ieee80211_set_disassoc(sdata, true, true, 1950 ieee80211_set_disassoc(sdata, true, true,
1857 WLAN_REASON_DEAUTH_LEAVING); 1951 WLAN_REASON_DEAUTH_LEAVING);
1858 1952
1859 set_bit(IEEE80211_STA_REQ_AUTH, &ifmgd->request); 1953 if (!(ifmgd->flags & IEEE80211_STA_EXT_SME) ||
1954 ifmgd->state != IEEE80211_STA_MLME_ASSOCIATE)
1955 set_bit(IEEE80211_STA_REQ_AUTH, &ifmgd->request);
1956 else if (ifmgd->flags & IEEE80211_STA_EXT_SME)
1957 set_bit(IEEE80211_STA_REQ_RUN, &ifmgd->request);
1860 queue_work(local->hw.workqueue, &ifmgd->work); 1958 queue_work(local->hw.workqueue, &ifmgd->work);
1861 } 1959 }
1862} 1960}
@@ -1865,8 +1963,6 @@ int ieee80211_sta_commit(struct ieee80211_sub_if_data *sdata)
1865{ 1963{
1866 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1964 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1867 1965
1868 ifmgd->flags &= ~IEEE80211_STA_PREV_BSSID_SET;
1869
1870 if (ifmgd->ssid_len) 1966 if (ifmgd->ssid_len)
1871 ifmgd->flags |= IEEE80211_STA_SSID_SET; 1967 ifmgd->flags |= IEEE80211_STA_SSID_SET;
1872 else 1968 else
@@ -1885,6 +1981,10 @@ int ieee80211_sta_set_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size
1885 ifmgd = &sdata->u.mgd; 1981 ifmgd = &sdata->u.mgd;
1886 1982
1887 if (ifmgd->ssid_len != len || memcmp(ifmgd->ssid, ssid, len) != 0) { 1983 if (ifmgd->ssid_len != len || memcmp(ifmgd->ssid, ssid, len) != 0) {
1984 /*
1985 * Do not use reassociation if SSID is changed (different ESS).
1986 */
1987 ifmgd->flags &= ~IEEE80211_STA_PREV_BSSID_SET;
1888 memset(ifmgd->ssid, 0, sizeof(ifmgd->ssid)); 1988 memset(ifmgd->ssid, 0, sizeof(ifmgd->ssid));
1889 memcpy(ifmgd->ssid, ssid, len); 1989 memcpy(ifmgd->ssid, ssid, len);
1890 ifmgd->ssid_len = len; 1990 ifmgd->ssid_len = len;
@@ -1923,7 +2023,8 @@ int ieee80211_sta_set_bssid(struct ieee80211_sub_if_data *sdata, u8 *bssid)
1923 return ieee80211_sta_commit(sdata); 2023 return ieee80211_sta_commit(sdata);
1924} 2024}
1925 2025
1926int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata, char *ie, size_t len) 2026int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata,
2027 const char *ie, size_t len)
1927{ 2028{
1928 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2029 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1929 2030
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 44525f517077..027302326498 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -10,6 +10,10 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
10 struct ieee80211_sub_if_data *sdata; 10 struct ieee80211_sub_if_data *sdata;
11 struct ieee80211_if_init_conf conf; 11 struct ieee80211_if_init_conf conf;
12 struct sta_info *sta; 12 struct sta_info *sta;
13 unsigned long flags;
14
15 ieee80211_stop_queues_by_reason(hw,
16 IEEE80211_QUEUE_STOP_REASON_SUSPEND);
13 17
14 flush_workqueue(local->hw.workqueue); 18 flush_workqueue(local->hw.workqueue);
15 19
@@ -17,10 +21,23 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
17 list_for_each_entry(sdata, &local->interfaces, list) 21 list_for_each_entry(sdata, &local->interfaces, list)
18 ieee80211_disable_keys(sdata); 22 ieee80211_disable_keys(sdata);
19 23
20 /* remove STAs */ 24 /* Tear down aggregation sessions */
21 list_for_each_entry(sta, &local->sta_list, list) { 25
26 rcu_read_lock();
27
28 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
29 list_for_each_entry_rcu(sta, &local->sta_list, list) {
30 set_sta_flags(sta, WLAN_STA_SUSPEND);
31 ieee80211_sta_tear_down_BA_sessions(sta);
32 }
33 }
22 34
23 if (local->ops->sta_notify) { 35 rcu_read_unlock();
36
37 /* remove STAs */
38 if (local->ops->sta_notify) {
39 spin_lock_irqsave(&local->sta_lock, flags);
40 list_for_each_entry(sta, &local->sta_list, list) {
24 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 41 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
25 sdata = container_of(sdata->bss, 42 sdata = container_of(sdata->bss,
26 struct ieee80211_sub_if_data, 43 struct ieee80211_sub_if_data,
@@ -29,11 +46,11 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
29 local->ops->sta_notify(hw, &sdata->vif, 46 local->ops->sta_notify(hw, &sdata->vif,
30 STA_NOTIFY_REMOVE, &sta->sta); 47 STA_NOTIFY_REMOVE, &sta->sta);
31 } 48 }
49 spin_unlock_irqrestore(&local->sta_lock, flags);
32 } 50 }
33 51
34 /* remove all interfaces */ 52 /* remove all interfaces */
35 list_for_each_entry(sdata, &local->interfaces, list) { 53 list_for_each_entry(sdata, &local->interfaces, list) {
36
37 if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 54 if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
38 sdata->vif.type != NL80211_IFTYPE_MONITOR && 55 sdata->vif.type != NL80211_IFTYPE_MONITOR &&
39 netif_running(sdata->dev)) { 56 netif_running(sdata->dev)) {
@@ -61,6 +78,7 @@ int __ieee80211_resume(struct ieee80211_hw *hw)
61 struct ieee80211_sub_if_data *sdata; 78 struct ieee80211_sub_if_data *sdata;
62 struct ieee80211_if_init_conf conf; 79 struct ieee80211_if_init_conf conf;
63 struct sta_info *sta; 80 struct sta_info *sta;
81 unsigned long flags;
64 int res; 82 int res;
65 83
66 /* restart hardware */ 84 /* restart hardware */
@@ -72,7 +90,6 @@ int __ieee80211_resume(struct ieee80211_hw *hw)
72 90
73 /* add interfaces */ 91 /* add interfaces */
74 list_for_each_entry(sdata, &local->interfaces, list) { 92 list_for_each_entry(sdata, &local->interfaces, list) {
75
76 if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 93 if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
77 sdata->vif.type != NL80211_IFTYPE_MONITOR && 94 sdata->vif.type != NL80211_IFTYPE_MONITOR &&
78 netif_running(sdata->dev)) { 95 netif_running(sdata->dev)) {
@@ -84,9 +101,9 @@ int __ieee80211_resume(struct ieee80211_hw *hw)
84 } 101 }
85 102
86 /* add STAs back */ 103 /* add STAs back */
87 list_for_each_entry(sta, &local->sta_list, list) { 104 if (local->ops->sta_notify) {
88 105 spin_lock_irqsave(&local->sta_lock, flags);
89 if (local->ops->sta_notify) { 106 list_for_each_entry(sta, &local->sta_list, list) {
90 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 107 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
91 sdata = container_of(sdata->bss, 108 sdata = container_of(sdata->bss,
92 struct ieee80211_sub_if_data, 109 struct ieee80211_sub_if_data,
@@ -95,8 +112,21 @@ int __ieee80211_resume(struct ieee80211_hw *hw)
95 local->ops->sta_notify(hw, &sdata->vif, 112 local->ops->sta_notify(hw, &sdata->vif,
96 STA_NOTIFY_ADD, &sta->sta); 113 STA_NOTIFY_ADD, &sta->sta);
97 } 114 }
115 spin_unlock_irqrestore(&local->sta_lock, flags);
98 } 116 }
99 117
118 /* Clear Suspend state so that ADDBA requests can be processed */
119
120 rcu_read_lock();
121
122 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
123 list_for_each_entry_rcu(sta, &local->sta_list, list) {
124 clear_sta_flags(sta, WLAN_STA_SUSPEND);
125 }
126 }
127
128 rcu_read_unlock();
129
100 /* add back keys */ 130 /* add back keys */
101 list_for_each_entry(sdata, &local->interfaces, list) 131 list_for_each_entry(sdata, &local->interfaces, list)
102 if (netif_running(sdata->dev)) 132 if (netif_running(sdata->dev))
@@ -113,5 +143,37 @@ int __ieee80211_resume(struct ieee80211_hw *hw)
113 ieee80211_configure_filter(local); 143 ieee80211_configure_filter(local);
114 netif_addr_unlock_bh(local->mdev); 144 netif_addr_unlock_bh(local->mdev);
115 145
146 /* Finally also reconfigure all the BSS information */
147 list_for_each_entry(sdata, &local->interfaces, list) {
148 u32 changed = ~0;
149 if (!netif_running(sdata->dev))
150 continue;
151 switch (sdata->vif.type) {
152 case NL80211_IFTYPE_STATION:
153 /* disable beacon change bits */
154 changed &= ~IEEE80211_IFCC_BEACON;
155 /* fall through */
156 case NL80211_IFTYPE_ADHOC:
157 case NL80211_IFTYPE_AP:
158 case NL80211_IFTYPE_MESH_POINT:
159 WARN_ON(ieee80211_if_config(sdata, changed));
160 ieee80211_bss_info_change_notify(sdata, ~0);
161 break;
162 case NL80211_IFTYPE_WDS:
163 break;
164 case NL80211_IFTYPE_AP_VLAN:
165 case NL80211_IFTYPE_MONITOR:
166 /* ignore virtual */
167 break;
168 case NL80211_IFTYPE_UNSPECIFIED:
169 case __NL80211_IFTYPE_AFTER_LAST:
170 WARN_ON(1);
171 break;
172 }
173 }
174
175 ieee80211_wake_queues_by_reason(hw,
176 IEEE80211_QUEUE_STOP_REASON_SUSPEND);
177
116 return 0; 178 return 0;
117} 179}
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 3fa7ab285066..4641f00a1e5c 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -219,10 +219,12 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
219 info->control.rates[i].count = 1; 219 info->control.rates[i].count = 1;
220 } 220 }
221 221
222 if (sta && sdata->force_unicast_rateidx > -1) 222 if (sta && sdata->force_unicast_rateidx > -1) {
223 info->control.rates[0].idx = sdata->force_unicast_rateidx; 223 info->control.rates[0].idx = sdata->force_unicast_rateidx;
224 else 224 } else {
225 ref->ops->get_rate(ref->priv, ista, priv_sta, txrc); 225 ref->ops->get_rate(ref->priv, ista, priv_sta, txrc);
226 info->flags |= IEEE80211_TX_INTFL_RCALGO;
227 }
226 228
227 /* 229 /*
228 * try to enforce the maximum rate the user wanted 230 * try to enforce the maximum rate the user wanted
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index b9164c9a9563..2ab5ad9e71ce 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -44,8 +44,10 @@ static inline void rate_control_tx_status(struct ieee80211_local *local,
44 struct rate_control_ref *ref = local->rate_ctrl; 44 struct rate_control_ref *ref = local->rate_ctrl;
45 struct ieee80211_sta *ista = &sta->sta; 45 struct ieee80211_sta *ista = &sta->sta;
46 void *priv_sta = sta->rate_ctrl_priv; 46 void *priv_sta = sta->rate_ctrl_priv;
47 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
47 48
48 ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb); 49 if (likely(info->flags & IEEE80211_TX_INTFL_RCALGO))
50 ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb);
49} 51}
50 52
51 53
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 66f7ecf51b92..64ebe664effc 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -142,6 +142,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
142 /* IEEE80211_RADIOTAP_FLAGS */ 142 /* IEEE80211_RADIOTAP_FLAGS */
143 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) 143 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
144 *pos |= IEEE80211_RADIOTAP_F_FCS; 144 *pos |= IEEE80211_RADIOTAP_F_FCS;
145 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
146 *pos |= IEEE80211_RADIOTAP_F_BADFCS;
145 if (status->flag & RX_FLAG_SHORTPRE) 147 if (status->flag & RX_FLAG_SHORTPRE)
146 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE; 148 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
147 pos++; 149 pos++;
@@ -204,9 +206,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
204 /* ensure 2 byte alignment for the 2 byte field as required */ 206 /* ensure 2 byte alignment for the 2 byte field as required */
205 if ((pos - (unsigned char *)rthdr) & 1) 207 if ((pos - (unsigned char *)rthdr) & 1)
206 pos++; 208 pos++;
207 /* FIXME: when radiotap gets a 'bad PLCP' flag use it here */ 209 if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
208 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) 210 *(__le16 *)pos |= cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADPLCP);
209 *(__le16 *)pos |= cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADFCS);
210 pos += 2; 211 pos += 2;
211} 212}
212 213
@@ -849,12 +850,19 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
849 * Mesh beacons will update last_rx when if they are found to 850 * Mesh beacons will update last_rx when if they are found to
850 * match the current local configuration when processed. 851 * match the current local configuration when processed.
851 */ 852 */
852 sta->last_rx = jiffies; 853 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
854 ieee80211_is_beacon(hdr->frame_control)) {
855 rx->sdata->u.mgd.last_beacon = jiffies;
856 } else
857 sta->last_rx = jiffies;
853 } 858 }
854 859
855 if (!(rx->flags & IEEE80211_RX_RA_MATCH)) 860 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
856 return RX_CONTINUE; 861 return RX_CONTINUE;
857 862
863 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
864 ieee80211_sta_rx_notify(rx->sdata, hdr);
865
858 sta->rx_fragments++; 866 sta->rx_fragments++;
859 sta->rx_bytes += rx->skb->len; 867 sta->rx_bytes += rx->skb->len;
860 sta->last_signal = rx->status->signal; 868 sta->last_signal = rx->status->signal;
@@ -1876,18 +1884,13 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
1876 if (ieee80211_vif_is_mesh(&sdata->vif)) 1884 if (ieee80211_vif_is_mesh(&sdata->vif))
1877 return ieee80211_mesh_rx_mgmt(sdata, rx->skb, rx->status); 1885 return ieee80211_mesh_rx_mgmt(sdata, rx->skb, rx->status);
1878 1886
1879 if (sdata->vif.type != NL80211_IFTYPE_STATION && 1887 if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
1880 sdata->vif.type != NL80211_IFTYPE_ADHOC) 1888 return ieee80211_ibss_rx_mgmt(sdata, rx->skb, rx->status);
1881 return RX_DROP_MONITOR;
1882
1883 1889
1884 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 1890 if (sdata->vif.type == NL80211_IFTYPE_STATION)
1885 if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME)
1886 return RX_DROP_MONITOR;
1887 return ieee80211_sta_rx_mgmt(sdata, rx->skb, rx->status); 1891 return ieee80211_sta_rx_mgmt(sdata, rx->skb, rx->status);
1888 }
1889 1892
1890 return ieee80211_ibss_rx_mgmt(sdata, rx->skb, rx->status); 1893 return RX_DROP_MONITOR;
1891} 1894}
1892 1895
1893static void ieee80211_rx_michael_mic_report(struct net_device *dev, 1896static void ieee80211_rx_michael_mic_report(struct net_device *dev,
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 5030a3c87509..3bf9839f5916 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -214,6 +214,66 @@ void ieee80211_scan_failed(struct ieee80211_local *local)
214 local->scan_req = NULL; 214 local->scan_req = NULL;
215} 215}
216 216
217/*
218 * inform AP that we will go to sleep so that it will buffer the frames
219 * while we scan
220 */
221static void ieee80211_scan_ps_enable(struct ieee80211_sub_if_data *sdata)
222{
223 struct ieee80211_local *local = sdata->local;
224 bool ps = false;
225
226 /* FIXME: what to do when local->pspolling is true? */
227
228 del_timer_sync(&local->dynamic_ps_timer);
229 cancel_work_sync(&local->dynamic_ps_enable_work);
230
231 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
232 ps = true;
233 local->hw.conf.flags &= ~IEEE80211_CONF_PS;
234 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
235 }
236
237 if (!ps || !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
238 /*
239 * If power save was enabled, no need to send a nullfunc
240 * frame because AP knows that we are sleeping. But if the
241 * hardware is creating the nullfunc frame for power save
242 * status (ie. IEEE80211_HW_PS_NULLFUNC_STACK is not
243 * enabled) and power save was enabled, the firmware just
244 * sent a null frame with power save disabled. So we need
245 * to send a new nullfunc frame to inform the AP that we
246 * are again sleeping.
247 */
248 ieee80211_send_nullfunc(local, sdata, 1);
249}
250
251/* inform AP that we are awake again, unless power save is enabled */
252static void ieee80211_scan_ps_disable(struct ieee80211_sub_if_data *sdata)
253{
254 struct ieee80211_local *local = sdata->local;
255
256 if (!local->powersave)
257 ieee80211_send_nullfunc(local, sdata, 0);
258 else {
259 /*
260 * In !IEEE80211_HW_PS_NULLFUNC_STACK case the hardware
261 * will send a nullfunc frame with the powersave bit set
262 * even though the AP already knows that we are sleeping.
263 * This could be avoided by sending a null frame with power
264 * save bit disabled before enabling the power save, but
265 * this doesn't gain anything.
266 *
267 * When IEEE80211_HW_PS_NULLFUNC_STACK is enabled, no need
268 * to send a nullfunc frame because AP already knows that
269 * we are sleeping, let's just enable power save mode in
270 * hardware.
271 */
272 local->hw.conf.flags |= IEEE80211_CONF_PS;
273 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
274 }
275}
276
217void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) 277void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
218{ 278{
219 struct ieee80211_local *local = hw_to_local(hw); 279 struct ieee80211_local *local = hw_to_local(hw);
@@ -268,7 +328,7 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
268 /* Tell AP we're back */ 328 /* Tell AP we're back */
269 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 329 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
270 if (sdata->u.mgd.flags & IEEE80211_STA_ASSOCIATED) { 330 if (sdata->u.mgd.flags & IEEE80211_STA_ASSOCIATED) {
271 ieee80211_send_nullfunc(local, sdata, 0); 331 ieee80211_scan_ps_disable(sdata);
272 netif_tx_wake_all_queues(sdata->dev); 332 netif_tx_wake_all_queues(sdata->dev);
273 } 333 }
274 } else 334 } else
@@ -409,6 +469,19 @@ int ieee80211_start_scan(struct ieee80211_sub_if_data *scan_sdata,
409 return 0; 469 return 0;
410 } 470 }
411 471
472 /*
473 * Hardware/driver doesn't support hw_scan, so use software
474 * scanning instead. First send a nullfunc frame with power save
475 * bit on so that AP will buffer the frames for us while we are not
476 * listening, then send probe requests to each channel and wait for
477 * the responses. After all channels are scanned, tune back to the
478 * original channel and send a nullfunc frame with power save bit
479 * off to trigger the AP to send us all the buffered frames.
480 *
481 * Note that while local->sw_scanning is true everything else but
482 * nullfunc frames and probe requests will be dropped in
483 * ieee80211_tx_h_check_assoc().
484 */
412 local->sw_scanning = true; 485 local->sw_scanning = true;
413 if (local->ops->sw_scan_start) 486 if (local->ops->sw_scan_start)
414 local->ops->sw_scan_start(local_to_hw(local)); 487 local->ops->sw_scan_start(local_to_hw(local));
@@ -428,7 +501,7 @@ int ieee80211_start_scan(struct ieee80211_sub_if_data *scan_sdata,
428 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 501 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
429 if (sdata->u.mgd.flags & IEEE80211_STA_ASSOCIATED) { 502 if (sdata->u.mgd.flags & IEEE80211_STA_ASSOCIATED) {
430 netif_tx_stop_all_queues(sdata->dev); 503 netif_tx_stop_all_queues(sdata->dev);
431 ieee80211_send_nullfunc(local, sdata, 1); 504 ieee80211_scan_ps_enable(sdata);
432 } 505 }
433 } else 506 } else
434 netif_tx_stop_all_queues(sdata->dev); 507 netif_tx_stop_all_queues(sdata->dev);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 4ba3c540fcf3..c5f14e6bbde2 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -203,17 +203,6 @@ void sta_info_destroy(struct sta_info *sta)
203 if (tid_rx) 203 if (tid_rx)
204 tid_rx->shutdown = true; 204 tid_rx->shutdown = true;
205 205
206 /*
207 * The stop callback cannot find this station any more, but
208 * it didn't complete its work -- start the queue if necessary
209 */
210 if (sta->ampdu_mlme.tid_state_tx[i] & HT_AGG_STATE_INITIATOR_MSK &&
211 sta->ampdu_mlme.tid_state_tx[i] & HT_AGG_STATE_REQ_STOP_BA_MSK &&
212 local->hw.ampdu_queues)
213 ieee80211_wake_queue_by_reason(&local->hw,
214 local->hw.queues + sta->tid_to_tx_q[i],
215 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
216
217 spin_unlock_bh(&sta->lock); 206 spin_unlock_bh(&sta->lock);
218 207
219 /* 208 /*
@@ -239,6 +228,11 @@ void sta_info_destroy(struct sta_info *sta)
239 tid_tx = sta->ampdu_mlme.tid_tx[i]; 228 tid_tx = sta->ampdu_mlme.tid_tx[i];
240 if (tid_tx) { 229 if (tid_tx) {
241 del_timer_sync(&tid_tx->addba_resp_timer); 230 del_timer_sync(&tid_tx->addba_resp_timer);
231 /*
232 * STA removed while aggregation session being
233 * started? Bit odd, but purge frames anyway.
234 */
235 skb_queue_purge(&tid_tx->pending);
242 kfree(tid_tx); 236 kfree(tid_tx);
243 } 237 }
244 } 238 }
@@ -287,7 +281,6 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
287 * enable session_timer's data differentiation. refer to 281 * enable session_timer's data differentiation. refer to
288 * sta_rx_agg_session_timer_expired for useage */ 282 * sta_rx_agg_session_timer_expired for useage */
289 sta->timer_to_tid[i] = i; 283 sta->timer_to_tid[i] = i;
290 sta->tid_to_tx_q[i] = -1;
291 /* rx */ 284 /* rx */
292 sta->ampdu_mlme.tid_state_rx[i] = HT_AGG_STATE_IDLE; 285 sta->ampdu_mlme.tid_state_rx[i] = HT_AGG_STATE_IDLE;
293 sta->ampdu_mlme.tid_rx[i] = NULL; 286 sta->ampdu_mlme.tid_rx[i] = NULL;
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 1f45573c580c..5534d489f506 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -35,6 +35,8 @@
35 * IEEE80211_TX_CTL_CLEAR_PS_FILT control flag) when the next 35 * IEEE80211_TX_CTL_CLEAR_PS_FILT control flag) when the next
36 * frame to this station is transmitted. 36 * frame to this station is transmitted.
37 * @WLAN_STA_MFP: Management frame protection is used with this STA. 37 * @WLAN_STA_MFP: Management frame protection is used with this STA.
38 * @WLAN_STA_SUSPEND: Set/cleared during a suspend/resume cycle.
39 * Used to deny ADDBA requests (both TX and RX).
38 */ 40 */
39enum ieee80211_sta_info_flags { 41enum ieee80211_sta_info_flags {
40 WLAN_STA_AUTH = 1<<0, 42 WLAN_STA_AUTH = 1<<0,
@@ -48,6 +50,7 @@ enum ieee80211_sta_info_flags {
48 WLAN_STA_PSPOLL = 1<<8, 50 WLAN_STA_PSPOLL = 1<<8,
49 WLAN_STA_CLEAR_PS_FILT = 1<<9, 51 WLAN_STA_CLEAR_PS_FILT = 1<<9,
50 WLAN_STA_MFP = 1<<10, 52 WLAN_STA_MFP = 1<<10,
53 WLAN_STA_SUSPEND = 1<<11
51}; 54};
52 55
53#define STA_TID_NUM 16 56#define STA_TID_NUM 16
@@ -70,11 +73,13 @@ enum ieee80211_sta_info_flags {
70 * struct tid_ampdu_tx - TID aggregation information (Tx). 73 * struct tid_ampdu_tx - TID aggregation information (Tx).
71 * 74 *
72 * @addba_resp_timer: timer for peer's response to addba request 75 * @addba_resp_timer: timer for peer's response to addba request
76 * @pending: pending frames queue -- use sta's spinlock to protect
73 * @ssn: Starting Sequence Number expected to be aggregated. 77 * @ssn: Starting Sequence Number expected to be aggregated.
74 * @dialog_token: dialog token for aggregation session 78 * @dialog_token: dialog token for aggregation session
75 */ 79 */
76struct tid_ampdu_tx { 80struct tid_ampdu_tx {
77 struct timer_list addba_resp_timer; 81 struct timer_list addba_resp_timer;
82 struct sk_buff_head pending;
78 u16 ssn; 83 u16 ssn;
79 u8 dialog_token; 84 u8 dialog_token;
80}; 85};
@@ -201,7 +206,6 @@ struct sta_ampdu_mlme {
201 * @tid_seq: per-TID sequence numbers for sending to this STA 206 * @tid_seq: per-TID sequence numbers for sending to this STA
202 * @ampdu_mlme: A-MPDU state machine state 207 * @ampdu_mlme: A-MPDU state machine state
203 * @timer_to_tid: identity mapping to ID timers 208 * @timer_to_tid: identity mapping to ID timers
204 * @tid_to_tx_q: map tid to tx queue (invalid == negative values)
205 * @llid: Local link ID 209 * @llid: Local link ID
206 * @plid: Peer link ID 210 * @plid: Peer link ID
207 * @reason: Cancel reason on PLINK_HOLDING state 211 * @reason: Cancel reason on PLINK_HOLDING state
@@ -276,7 +280,6 @@ struct sta_info {
276 */ 280 */
277 struct sta_ampdu_mlme ampdu_mlme; 281 struct sta_ampdu_mlme ampdu_mlme;
278 u8 timer_to_tid[STA_TID_NUM]; 282 u8 timer_to_tid[STA_TID_NUM];
279 s8 tid_to_tx_q[STA_TID_NUM];
280 283
281#ifdef CONFIG_MAC80211_MESH 284#ifdef CONFIG_MAC80211_MESH
282 /* 285 /*
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 457238a2f3fc..3fb04a86444d 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -34,8 +34,7 @@
34 34
35#define IEEE80211_TX_OK 0 35#define IEEE80211_TX_OK 0
36#define IEEE80211_TX_AGAIN 1 36#define IEEE80211_TX_AGAIN 1
37#define IEEE80211_TX_FRAG_AGAIN 2 37#define IEEE80211_TX_PENDING 2
38#define IEEE80211_TX_PENDING 3
39 38
40/* misc utils */ 39/* misc utils */
41 40
@@ -193,7 +192,19 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
193 return TX_CONTINUE; 192 return TX_CONTINUE;
194 193
195 if (unlikely(tx->local->sw_scanning) && 194 if (unlikely(tx->local->sw_scanning) &&
196 !ieee80211_is_probe_req(hdr->frame_control)) 195 !ieee80211_is_probe_req(hdr->frame_control) &&
196 !ieee80211_is_nullfunc(hdr->frame_control))
197 /*
198 * When software scanning only nullfunc frames (to notify
199 * the sleep state to the AP) and probe requests (for the
200 * active scan) are allowed, all other frames should not be
201 * sent and we should not get here, but if we do
202 * nonetheless, drop them to avoid sending them
203 * off-channel. See the link below and
204 * ieee80211_start_scan() for more.
205 *
206 * http://article.gmane.org/gmane.linux.kernel.wireless.general/30089
207 */
197 return TX_DROP; 208 return TX_DROP;
198 209
199 if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT) 210 if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
@@ -690,17 +701,62 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
690 return TX_CONTINUE; 701 return TX_CONTINUE;
691} 702}
692 703
704static int ieee80211_fragment(struct ieee80211_local *local,
705 struct sk_buff *skb, int hdrlen,
706 int frag_threshold)
707{
708 struct sk_buff *tail = skb, *tmp;
709 int per_fragm = frag_threshold - hdrlen - FCS_LEN;
710 int pos = hdrlen + per_fragm;
711 int rem = skb->len - hdrlen - per_fragm;
712
713 if (WARN_ON(rem < 0))
714 return -EINVAL;
715
716 while (rem) {
717 int fraglen = per_fragm;
718
719 if (fraglen > rem)
720 fraglen = rem;
721 rem -= fraglen;
722 tmp = dev_alloc_skb(local->tx_headroom +
723 frag_threshold +
724 IEEE80211_ENCRYPT_HEADROOM +
725 IEEE80211_ENCRYPT_TAILROOM);
726 if (!tmp)
727 return -ENOMEM;
728 tail->next = tmp;
729 tail = tmp;
730 skb_reserve(tmp, local->tx_headroom +
731 IEEE80211_ENCRYPT_HEADROOM);
732 /* copy control information */
733 memcpy(tmp->cb, skb->cb, sizeof(tmp->cb));
734 skb_copy_queue_mapping(tmp, skb);
735 tmp->priority = skb->priority;
736 tmp->do_not_encrypt = skb->do_not_encrypt;
737 tmp->dev = skb->dev;
738 tmp->iif = skb->iif;
739
740 /* copy header and data */
741 memcpy(skb_put(tmp, hdrlen), skb->data, hdrlen);
742 memcpy(skb_put(tmp, fraglen), skb->data + pos, fraglen);
743
744 pos += fraglen;
745 }
746
747 skb->len = hdrlen + per_fragm;
748 return 0;
749}
750
693static ieee80211_tx_result debug_noinline 751static ieee80211_tx_result debug_noinline
694ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) 752ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
695{ 753{
696 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 754 struct sk_buff *skb = tx->skb;
697 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 755 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
698 size_t hdrlen, per_fragm, num_fragm, payload_len, left; 756 struct ieee80211_hdr *hdr = (void *)skb->data;
699 struct sk_buff **frags, *first, *frag;
700 int i;
701 u16 seq;
702 u8 *pos;
703 int frag_threshold = tx->local->fragmentation_threshold; 757 int frag_threshold = tx->local->fragmentation_threshold;
758 int hdrlen;
759 int fragnum;
704 760
705 if (!(tx->flags & IEEE80211_TX_FRAGMENTED)) 761 if (!(tx->flags & IEEE80211_TX_FRAGMENTED))
706 return TX_CONTINUE; 762 return TX_CONTINUE;
@@ -713,58 +769,35 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
713 if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU)) 769 if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU))
714 return TX_DROP; 770 return TX_DROP;
715 771
716 first = tx->skb;
717
718 hdrlen = ieee80211_hdrlen(hdr->frame_control); 772 hdrlen = ieee80211_hdrlen(hdr->frame_control);
719 payload_len = first->len - hdrlen;
720 per_fragm = frag_threshold - hdrlen - FCS_LEN;
721 num_fragm = DIV_ROUND_UP(payload_len, per_fragm);
722
723 frags = kzalloc(num_fragm * sizeof(struct sk_buff *), GFP_ATOMIC);
724 if (!frags)
725 goto fail;
726
727 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
728 seq = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ;
729 pos = first->data + hdrlen + per_fragm;
730 left = payload_len - per_fragm;
731 for (i = 0; i < num_fragm - 1; i++) {
732 struct ieee80211_hdr *fhdr;
733 size_t copylen;
734
735 if (left <= 0)
736 goto fail;
737 773
738 /* reserve enough extra head and tail room for possible 774 /* internal error, why is TX_FRAGMENTED set? */
739 * encryption */ 775 if (WARN_ON(skb->len <= frag_threshold))
740 frag = frags[i] = 776 return TX_DROP;
741 dev_alloc_skb(tx->local->tx_headroom +
742 frag_threshold +
743 IEEE80211_ENCRYPT_HEADROOM +
744 IEEE80211_ENCRYPT_TAILROOM);
745 if (!frag)
746 goto fail;
747
748 /* Make sure that all fragments use the same priority so
749 * that they end up using the same TX queue */
750 frag->priority = first->priority;
751 777
752 skb_reserve(frag, tx->local->tx_headroom + 778 /*
753 IEEE80211_ENCRYPT_HEADROOM); 779 * Now fragment the frame. This will allocate all the fragments and
780 * chain them (using skb as the first fragment) to skb->next.
781 * During transmission, we will remove the successfully transmitted
782 * fragments from this list. When the low-level driver rejects one
783 * of the fragments then we will simply pretend to accept the skb
784 * but store it away as pending.
785 */
786 if (ieee80211_fragment(tx->local, skb, hdrlen, frag_threshold))
787 return TX_DROP;
754 788
755 /* copy TX information */ 789 /* update duration/seq/flags of fragments */
756 info = IEEE80211_SKB_CB(frag); 790 fragnum = 0;
757 memcpy(info, first->cb, sizeof(frag->cb)); 791 do {
792 int next_len;
793 const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
758 794
759 /* copy/fill in 802.11 header */ 795 hdr = (void *)skb->data;
760 fhdr = (struct ieee80211_hdr *) skb_put(frag, hdrlen); 796 info = IEEE80211_SKB_CB(skb);
761 memcpy(fhdr, first->data, hdrlen);
762 fhdr->seq_ctrl = cpu_to_le16(seq | ((i + 1) & IEEE80211_SCTL_FRAG));
763 797
764 if (i == num_fragm - 2) { 798 if (skb->next) {
765 /* clear MOREFRAGS bit for the last fragment */ 799 hdr->frame_control |= morefrags;
766 fhdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREFRAGS); 800 next_len = skb->next->len;
767 } else {
768 /* 801 /*
769 * No multi-rate retries for fragmented frames, that 802 * No multi-rate retries for fragmented frames, that
770 * would completely throw off the NAV at other STAs. 803 * would completely throw off the NAV at other STAs.
@@ -775,37 +808,16 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
775 info->control.rates[4].idx = -1; 808 info->control.rates[4].idx = -1;
776 BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 5); 809 BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 5);
777 info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE; 810 info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE;
811 } else {
812 hdr->frame_control &= ~morefrags;
813 next_len = 0;
778 } 814 }
779 815 hdr->duration_id = ieee80211_duration(tx, 0, next_len);
780 /* copy data */ 816 hdr->seq_ctrl |= cpu_to_le16(fragnum & IEEE80211_SCTL_FRAG);
781 copylen = left > per_fragm ? per_fragm : left; 817 fragnum++;
782 memcpy(skb_put(frag, copylen), pos, copylen); 818 } while ((skb = skb->next));
783
784 skb_copy_queue_mapping(frag, first);
785
786 frag->do_not_encrypt = first->do_not_encrypt;
787 frag->dev = first->dev;
788 frag->iif = first->iif;
789
790 pos += copylen;
791 left -= copylen;
792 }
793 skb_trim(first, hdrlen + per_fragm);
794
795 tx->num_extra_frag = num_fragm - 1;
796 tx->extra_frag = frags;
797 819
798 return TX_CONTINUE; 820 return TX_CONTINUE;
799
800 fail:
801 if (frags) {
802 for (i = 0; i < num_fragm - 1; i++)
803 if (frags[i])
804 dev_kfree_skb(frags[i]);
805 kfree(frags);
806 }
807 I802_DEBUG_INC(tx->local->tx_handlers_drop_fragment);
808 return TX_DROP;
809} 821}
810 822
811static ieee80211_tx_result debug_noinline 823static ieee80211_tx_result debug_noinline
@@ -833,27 +845,19 @@ ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
833static ieee80211_tx_result debug_noinline 845static ieee80211_tx_result debug_noinline
834ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx) 846ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx)
835{ 847{
836 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 848 struct sk_buff *skb = tx->skb;
837 int next_len, i; 849 struct ieee80211_hdr *hdr;
838 int group_addr = is_multicast_ether_addr(hdr->addr1); 850 int next_len;
839 851 bool group_addr;
840 if (!(tx->flags & IEEE80211_TX_FRAGMENTED)) {
841 hdr->duration_id = ieee80211_duration(tx, group_addr, 0);
842 return TX_CONTINUE;
843 }
844 852
845 hdr->duration_id = ieee80211_duration(tx, group_addr, 853 do {
846 tx->extra_frag[0]->len); 854 hdr = (void *) skb->data;
855 next_len = skb->next ? skb->next->len : 0;
856 group_addr = is_multicast_ether_addr(hdr->addr1);
847 857
848 for (i = 0; i < tx->num_extra_frag; i++) { 858 hdr->duration_id =
849 if (i + 1 < tx->num_extra_frag) 859 ieee80211_duration(tx, group_addr, next_len);
850 next_len = tx->extra_frag[i + 1]->len; 860 } while ((skb = skb->next));
851 else
852 next_len = 0;
853
854 hdr = (struct ieee80211_hdr *)tx->extra_frag[i]->data;
855 hdr->duration_id = ieee80211_duration(tx, 0, next_len);
856 }
857 861
858 return TX_CONTINUE; 862 return TX_CONTINUE;
859} 863}
@@ -861,19 +865,16 @@ ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx)
861static ieee80211_tx_result debug_noinline 865static ieee80211_tx_result debug_noinline
862ieee80211_tx_h_stats(struct ieee80211_tx_data *tx) 866ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
863{ 867{
864 int i; 868 struct sk_buff *skb = tx->skb;
865 869
866 if (!tx->sta) 870 if (!tx->sta)
867 return TX_CONTINUE; 871 return TX_CONTINUE;
868 872
869 tx->sta->tx_packets++; 873 tx->sta->tx_packets++;
870 tx->sta->tx_fragments++; 874 do {
871 tx->sta->tx_bytes += tx->skb->len; 875 tx->sta->tx_fragments++;
872 if (tx->extra_frag) { 876 tx->sta->tx_bytes += skb->len;
873 tx->sta->tx_fragments += tx->num_extra_frag; 877 } while ((skb = skb->next));
874 for (i = 0; i < tx->num_extra_frag; i++)
875 tx->sta->tx_bytes += tx->extra_frag[i]->len;
876 }
877 878
878 return TX_CONTINUE; 879 return TX_CONTINUE;
879} 880}
@@ -983,9 +984,9 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
983 struct ieee80211_hdr *hdr; 984 struct ieee80211_hdr *hdr;
984 struct ieee80211_sub_if_data *sdata; 985 struct ieee80211_sub_if_data *sdata;
985 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 986 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
986
987 int hdrlen, tid; 987 int hdrlen, tid;
988 u8 *qc, *state; 988 u8 *qc, *state;
989 bool queued = false;
989 990
990 memset(tx, 0, sizeof(*tx)); 991 memset(tx, 0, sizeof(*tx));
991 tx->skb = skb; 992 tx->skb = skb;
@@ -1012,25 +1013,53 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
1012 */ 1013 */
1013 } 1014 }
1014 1015
1016 /*
1017 * If this flag is set to true anywhere, and we get here,
1018 * we are doing the needed processing, so remove the flag
1019 * now.
1020 */
1021 info->flags &= ~IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1022
1015 hdr = (struct ieee80211_hdr *) skb->data; 1023 hdr = (struct ieee80211_hdr *) skb->data;
1016 1024
1017 tx->sta = sta_info_get(local, hdr->addr1); 1025 tx->sta = sta_info_get(local, hdr->addr1);
1018 1026
1019 if (tx->sta && ieee80211_is_data_qos(hdr->frame_control)) { 1027 if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
1028 (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) {
1020 unsigned long flags; 1029 unsigned long flags;
1030 struct tid_ampdu_tx *tid_tx;
1031
1021 qc = ieee80211_get_qos_ctl(hdr); 1032 qc = ieee80211_get_qos_ctl(hdr);
1022 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 1033 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
1023 1034
1024 spin_lock_irqsave(&tx->sta->lock, flags); 1035 spin_lock_irqsave(&tx->sta->lock, flags);
1036 /*
1037 * XXX: This spinlock could be fairly expensive, but see the
1038 * comment in agg-tx.c:ieee80211_agg_tx_operational().
1039 * One way to solve this would be to do something RCU-like
1040 * for managing the tid_tx struct and using atomic bitops
1041 * for the actual state -- by introducing an actual
1042 * 'operational' bit that would be possible. It would
1043 * require changing ieee80211_agg_tx_operational() to
1044 * set that bit, and changing the way tid_tx is managed
1045 * everywhere, including races between that bit and
1046 * tid_tx going away (tid_tx being added can be easily
1047 * committed to memory before the 'operational' bit).
1048 */
1049 tid_tx = tx->sta->ampdu_mlme.tid_tx[tid];
1025 state = &tx->sta->ampdu_mlme.tid_state_tx[tid]; 1050 state = &tx->sta->ampdu_mlme.tid_state_tx[tid];
1026 if (*state == HT_AGG_STATE_OPERATIONAL) { 1051 if (*state == HT_AGG_STATE_OPERATIONAL) {
1027 info->flags |= IEEE80211_TX_CTL_AMPDU; 1052 info->flags |= IEEE80211_TX_CTL_AMPDU;
1028 if (local->hw.ampdu_queues) 1053 } else if (*state != HT_AGG_STATE_IDLE) {
1029 skb_set_queue_mapping( 1054 /* in progress */
1030 skb, tx->local->hw.queues + 1055 queued = true;
1031 tx->sta->tid_to_tx_q[tid]); 1056 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1057 __skb_queue_tail(&tid_tx->pending, skb);
1032 } 1058 }
1033 spin_unlock_irqrestore(&tx->sta->lock, flags); 1059 spin_unlock_irqrestore(&tx->sta->lock, flags);
1060
1061 if (unlikely(queued))
1062 return TX_QUEUED;
1034 } 1063 }
1035 1064
1036 if (is_multicast_ether_addr(hdr->addr1)) { 1065 if (is_multicast_ether_addr(hdr->addr1)) {
@@ -1081,51 +1110,55 @@ static int ieee80211_tx_prepare(struct ieee80211_local *local,
1081 } 1110 }
1082 if (unlikely(!dev)) 1111 if (unlikely(!dev))
1083 return -ENODEV; 1112 return -ENODEV;
1084 /* initialises tx with control */ 1113 /*
1114 * initialises tx with control
1115 *
1116 * return value is safe to ignore here because this function
1117 * can only be invoked for multicast frames
1118 *
1119 * XXX: clean up
1120 */
1085 __ieee80211_tx_prepare(tx, skb, dev); 1121 __ieee80211_tx_prepare(tx, skb, dev);
1086 dev_put(dev); 1122 dev_put(dev);
1087 return 0; 1123 return 0;
1088} 1124}
1089 1125
1090static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb, 1126static int __ieee80211_tx(struct ieee80211_local *local,
1091 struct ieee80211_tx_data *tx) 1127 struct sk_buff **skbp,
1128 struct sta_info *sta)
1092{ 1129{
1130 struct sk_buff *skb = *skbp, *next;
1093 struct ieee80211_tx_info *info; 1131 struct ieee80211_tx_info *info;
1094 int ret, i; 1132 int ret, len;
1133 bool fragm = false;
1095 1134
1096 if (skb) { 1135 local->mdev->trans_start = jiffies;
1136
1137 while (skb) {
1097 if (ieee80211_queue_stopped(&local->hw, 1138 if (ieee80211_queue_stopped(&local->hw,
1098 skb_get_queue_mapping(skb))) 1139 skb_get_queue_mapping(skb)))
1099 return IEEE80211_TX_PENDING; 1140 return IEEE80211_TX_PENDING;
1100 1141
1101 ret = local->ops->tx(local_to_hw(local), skb); 1142 info = IEEE80211_SKB_CB(skb);
1102 if (ret) 1143
1103 return IEEE80211_TX_AGAIN; 1144 if (fragm)
1104 local->mdev->trans_start = jiffies;
1105 ieee80211_led_tx(local, 1);
1106 }
1107 if (tx->extra_frag) {
1108 for (i = 0; i < tx->num_extra_frag; i++) {
1109 if (!tx->extra_frag[i])
1110 continue;
1111 info = IEEE80211_SKB_CB(tx->extra_frag[i]);
1112 info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT | 1145 info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT |
1113 IEEE80211_TX_CTL_FIRST_FRAGMENT); 1146 IEEE80211_TX_CTL_FIRST_FRAGMENT);
1114 if (ieee80211_queue_stopped(&local->hw, 1147
1115 skb_get_queue_mapping(tx->extra_frag[i]))) 1148 next = skb->next;
1116 return IEEE80211_TX_FRAG_AGAIN; 1149 len = skb->len;
1117 1150 ret = local->ops->tx(local_to_hw(local), skb);
1118 ret = local->ops->tx(local_to_hw(local), 1151 if (WARN_ON(ret != NETDEV_TX_OK && skb->len != len)) {
1119 tx->extra_frag[i]); 1152 dev_kfree_skb(skb);
1120 if (ret) 1153 ret = NETDEV_TX_OK;
1121 return IEEE80211_TX_FRAG_AGAIN;
1122 local->mdev->trans_start = jiffies;
1123 ieee80211_led_tx(local, 1);
1124 tx->extra_frag[i] = NULL;
1125 } 1154 }
1126 kfree(tx->extra_frag); 1155 if (ret != NETDEV_TX_OK)
1127 tx->extra_frag = NULL; 1156 return IEEE80211_TX_AGAIN;
1157 *skbp = skb = next;
1158 ieee80211_led_tx(local, 1);
1159 fragm = true;
1128 } 1160 }
1161
1129 return IEEE80211_TX_OK; 1162 return IEEE80211_TX_OK;
1130} 1163}
1131 1164
@@ -1137,7 +1170,6 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1137{ 1170{
1138 struct sk_buff *skb = tx->skb; 1171 struct sk_buff *skb = tx->skb;
1139 ieee80211_tx_result res = TX_DROP; 1172 ieee80211_tx_result res = TX_DROP;
1140 int i;
1141 1173
1142#define CALL_TXH(txh) \ 1174#define CALL_TXH(txh) \
1143 res = txh(tx); \ 1175 res = txh(tx); \
@@ -1161,11 +1193,13 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1161 txh_done: 1193 txh_done:
1162 if (unlikely(res == TX_DROP)) { 1194 if (unlikely(res == TX_DROP)) {
1163 I802_DEBUG_INC(tx->local->tx_handlers_drop); 1195 I802_DEBUG_INC(tx->local->tx_handlers_drop);
1164 dev_kfree_skb(skb); 1196 while (skb) {
1165 for (i = 0; i < tx->num_extra_frag; i++) 1197 struct sk_buff *next;
1166 if (tx->extra_frag[i]) 1198
1167 dev_kfree_skb(tx->extra_frag[i]); 1199 next = skb->next;
1168 kfree(tx->extra_frag); 1200 dev_kfree_skb(skb);
1201 skb = next;
1202 }
1169 return -1; 1203 return -1;
1170 } else if (unlikely(res == TX_QUEUED)) { 1204 } else if (unlikely(res == TX_QUEUED)) {
1171 I802_DEBUG_INC(tx->local->tx_handlers_queued); 1205 I802_DEBUG_INC(tx->local->tx_handlers_queued);
@@ -1175,23 +1209,26 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1175 return 0; 1209 return 0;
1176} 1210}
1177 1211
1178static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb) 1212static void ieee80211_tx(struct net_device *dev, struct sk_buff *skb,
1213 bool txpending)
1179{ 1214{
1180 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1215 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1181 struct sta_info *sta; 1216 struct sta_info *sta;
1182 struct ieee80211_tx_data tx; 1217 struct ieee80211_tx_data tx;
1183 ieee80211_tx_result res_prepare; 1218 ieee80211_tx_result res_prepare;
1184 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1219 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1185 int ret, i; 1220 struct sk_buff *next;
1221 unsigned long flags;
1222 int ret, retries;
1186 u16 queue; 1223 u16 queue;
1187 1224
1188 queue = skb_get_queue_mapping(skb); 1225 queue = skb_get_queue_mapping(skb);
1189 1226
1190 WARN_ON(test_bit(queue, local->queues_pending)); 1227 WARN_ON(!txpending && !skb_queue_empty(&local->pending[queue]));
1191 1228
1192 if (unlikely(skb->len < 10)) { 1229 if (unlikely(skb->len < 10)) {
1193 dev_kfree_skb(skb); 1230 dev_kfree_skb(skb);
1194 return 0; 1231 return;
1195 } 1232 }
1196 1233
1197 rcu_read_lock(); 1234 rcu_read_lock();
@@ -1199,10 +1236,13 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb)
1199 /* initialises tx */ 1236 /* initialises tx */
1200 res_prepare = __ieee80211_tx_prepare(&tx, skb, dev); 1237 res_prepare = __ieee80211_tx_prepare(&tx, skb, dev);
1201 1238
1202 if (res_prepare == TX_DROP) { 1239 if (unlikely(res_prepare == TX_DROP)) {
1203 dev_kfree_skb(skb); 1240 dev_kfree_skb(skb);
1204 rcu_read_unlock(); 1241 rcu_read_unlock();
1205 return 0; 1242 return;
1243 } else if (unlikely(res_prepare == TX_QUEUED)) {
1244 rcu_read_unlock();
1245 return;
1206 } 1246 }
1207 1247
1208 sta = tx.sta; 1248 sta = tx.sta;
@@ -1212,59 +1252,71 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb)
1212 if (invoke_tx_handlers(&tx)) 1252 if (invoke_tx_handlers(&tx))
1213 goto out; 1253 goto out;
1214 1254
1215retry: 1255 retries = 0;
1216 ret = __ieee80211_tx(local, skb, &tx); 1256 retry:
1217 if (ret) { 1257 ret = __ieee80211_tx(local, &tx.skb, tx.sta);
1218 struct ieee80211_tx_stored_packet *store; 1258 switch (ret) {
1219 1259 case IEEE80211_TX_OK:
1260 break;
1261 case IEEE80211_TX_AGAIN:
1220 /* 1262 /*
1221 * Since there are no fragmented frames on A-MPDU 1263 * Since there are no fragmented frames on A-MPDU
1222 * queues, there's no reason for a driver to reject 1264 * queues, there's no reason for a driver to reject
1223 * a frame there, warn and drop it. 1265 * a frame there, warn and drop it.
1224 */ 1266 */
1225 if (ret != IEEE80211_TX_PENDING) 1267 if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU))
1226 if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU)) 1268 goto drop;
1227 goto drop; 1269 /* fall through */
1270 case IEEE80211_TX_PENDING:
1271 skb = tx.skb;
1272
1273 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
1274
1275 if (__netif_subqueue_stopped(local->mdev, queue)) {
1276 do {
1277 next = skb->next;
1278 skb->next = NULL;
1279 if (unlikely(txpending))
1280 skb_queue_head(&local->pending[queue],
1281 skb);
1282 else
1283 skb_queue_tail(&local->pending[queue],
1284 skb);
1285 } while ((skb = next));
1228 1286
1229 store = &local->pending_packet[queue]; 1287 /*
1288 * Make sure nobody will enable the queue on us
1289 * (without going through the tasklet) nor disable the
1290 * netdev queue underneath the pending handling code.
1291 */
1292 __set_bit(IEEE80211_QUEUE_STOP_REASON_PENDING,
1293 &local->queue_stop_reasons[queue]);
1230 1294
1231 if (ret == IEEE80211_TX_FRAG_AGAIN) 1295 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
1232 skb = NULL; 1296 flags);
1297 } else {
1298 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
1299 flags);
1233 1300
1234 set_bit(queue, local->queues_pending); 1301 retries++;
1235 smp_mb(); 1302 if (WARN(retries > 10, "tx refused but queue active"))
1236 /* 1303 goto drop;
1237 * When the driver gets out of buffers during sending of
1238 * fragments and calls ieee80211_stop_queue, the netif
1239 * subqueue is stopped. There is, however, a small window
1240 * in which the PENDING bit is not yet set. If a buffer
1241 * gets available in that window (i.e. driver calls
1242 * ieee80211_wake_queue), we would end up with ieee80211_tx
1243 * called with the PENDING bit still set. Prevent this by
1244 * continuing transmitting here when that situation is
1245 * possible to have happened.
1246 */
1247 if (!__netif_subqueue_stopped(local->mdev, queue)) {
1248 clear_bit(queue, local->queues_pending);
1249 goto retry; 1304 goto retry;
1250 } 1305 }
1251 store->skb = skb;
1252 store->extra_frag = tx.extra_frag;
1253 store->num_extra_frag = tx.num_extra_frag;
1254 } 1306 }
1255 out: 1307 out:
1256 rcu_read_unlock(); 1308 rcu_read_unlock();
1257 return 0; 1309 return;
1258 1310
1259 drop: 1311 drop:
1260 if (skb)
1261 dev_kfree_skb(skb);
1262 for (i = 0; i < tx.num_extra_frag; i++)
1263 if (tx.extra_frag[i])
1264 dev_kfree_skb(tx.extra_frag[i]);
1265 kfree(tx.extra_frag);
1266 rcu_read_unlock(); 1312 rcu_read_unlock();
1267 return 0; 1313
1314 skb = tx.skb;
1315 while (skb) {
1316 next = skb->next;
1317 dev_kfree_skb(skb);
1318 skb = next;
1319 }
1268} 1320}
1269 1321
1270/* device xmit handlers */ 1322/* device xmit handlers */
@@ -1323,7 +1375,6 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
1323 FOUND_SDATA, 1375 FOUND_SDATA,
1324 UNKNOWN_ADDRESS, 1376 UNKNOWN_ADDRESS,
1325 } monitor_iface = NOT_MONITOR; 1377 } monitor_iface = NOT_MONITOR;
1326 int ret;
1327 1378
1328 if (skb->iif) 1379 if (skb->iif)
1329 odev = dev_get_by_index(&init_net, skb->iif); 1380 odev = dev_get_by_index(&init_net, skb->iif);
@@ -1337,7 +1388,7 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
1337 "originating device\n", dev->name); 1388 "originating device\n", dev->name);
1338#endif 1389#endif
1339 dev_kfree_skb(skb); 1390 dev_kfree_skb(skb);
1340 return 0; 1391 return NETDEV_TX_OK;
1341 } 1392 }
1342 1393
1343 if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) && 1394 if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
@@ -1366,7 +1417,7 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
1366 else 1417 else
1367 if (mesh_nexthop_lookup(skb, osdata)) { 1418 if (mesh_nexthop_lookup(skb, osdata)) {
1368 dev_put(odev); 1419 dev_put(odev);
1369 return 0; 1420 return NETDEV_TX_OK;
1370 } 1421 }
1371 if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0) 1422 if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0)
1372 IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.mesh, 1423 IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.mesh,
@@ -1428,7 +1479,7 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
1428 if (ieee80211_skb_resize(osdata->local, skb, headroom, may_encrypt)) { 1479 if (ieee80211_skb_resize(osdata->local, skb, headroom, may_encrypt)) {
1429 dev_kfree_skb(skb); 1480 dev_kfree_skb(skb);
1430 dev_put(odev); 1481 dev_put(odev);
1431 return 0; 1482 return NETDEV_TX_OK;
1432 } 1483 }
1433 1484
1434 if (osdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1485 if (osdata->vif.type == NL80211_IFTYPE_AP_VLAN)
@@ -1437,10 +1488,11 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
1437 u.ap); 1488 u.ap);
1438 if (likely(monitor_iface != UNKNOWN_ADDRESS)) 1489 if (likely(monitor_iface != UNKNOWN_ADDRESS))
1439 info->control.vif = &osdata->vif; 1490 info->control.vif = &osdata->vif;
1440 ret = ieee80211_tx(odev, skb); 1491
1492 ieee80211_tx(odev, skb, false);
1441 dev_put(odev); 1493 dev_put(odev);
1442 1494
1443 return ret; 1495 return NETDEV_TX_OK;
1444} 1496}
1445 1497
1446int ieee80211_monitor_start_xmit(struct sk_buff *skb, 1498int ieee80211_monitor_start_xmit(struct sk_buff *skb,
@@ -1666,8 +1718,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1666 } 1718 }
1667 1719
1668 /* receiver and we are QoS enabled, use a QoS type frame */ 1720 /* receiver and we are QoS enabled, use a QoS type frame */
1669 if (sta_flags & WLAN_STA_WME && 1721 if ((sta_flags & WLAN_STA_WME) && local->hw.queues >= 4) {
1670 ieee80211_num_regular_queues(&local->hw) >= 4) {
1671 fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 1722 fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1672 hdrlen += 2; 1723 hdrlen += 2;
1673 } 1724 }
@@ -1799,19 +1850,58 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1799 */ 1850 */
1800void ieee80211_clear_tx_pending(struct ieee80211_local *local) 1851void ieee80211_clear_tx_pending(struct ieee80211_local *local)
1801{ 1852{
1802 int i, j; 1853 int i;
1803 struct ieee80211_tx_stored_packet *store;
1804 1854
1805 for (i = 0; i < ieee80211_num_regular_queues(&local->hw); i++) { 1855 for (i = 0; i < local->hw.queues; i++)
1806 if (!test_bit(i, local->queues_pending)) 1856 skb_queue_purge(&local->pending[i]);
1807 continue; 1857}
1808 store = &local->pending_packet[i]; 1858
1809 kfree_skb(store->skb); 1859static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
1810 for (j = 0; j < store->num_extra_frag; j++) 1860 struct sk_buff *skb)
1811 kfree_skb(store->extra_frag[j]); 1861{
1812 kfree(store->extra_frag); 1862 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1813 clear_bit(i, local->queues_pending); 1863 struct ieee80211_sub_if_data *sdata;
1864 struct sta_info *sta;
1865 struct ieee80211_hdr *hdr;
1866 struct net_device *dev;
1867 int ret;
1868 bool result = true;
1869
1870 /* does interface still exist? */
1871 dev = dev_get_by_index(&init_net, skb->iif);
1872 if (!dev) {
1873 dev_kfree_skb(skb);
1874 return true;
1814 } 1875 }
1876
1877 /* validate info->control.vif against skb->iif */
1878 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1879 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1880 sdata = container_of(sdata->bss,
1881 struct ieee80211_sub_if_data,
1882 u.ap);
1883
1884 if (unlikely(info->control.vif && info->control.vif != &sdata->vif)) {
1885 dev_kfree_skb(skb);
1886 result = true;
1887 goto out;
1888 }
1889
1890 if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) {
1891 ieee80211_tx(dev, skb, true);
1892 } else {
1893 hdr = (struct ieee80211_hdr *)skb->data;
1894 sta = sta_info_get(local, hdr->addr1);
1895
1896 ret = __ieee80211_tx(local, &skb, sta);
1897 if (ret != IEEE80211_TX_OK)
1898 result = false;
1899 }
1900
1901 out:
1902 dev_put(dev);
1903
1904 return result;
1815} 1905}
1816 1906
1817/* 1907/*
@@ -1822,40 +1912,53 @@ void ieee80211_tx_pending(unsigned long data)
1822{ 1912{
1823 struct ieee80211_local *local = (struct ieee80211_local *)data; 1913 struct ieee80211_local *local = (struct ieee80211_local *)data;
1824 struct net_device *dev = local->mdev; 1914 struct net_device *dev = local->mdev;
1825 struct ieee80211_tx_stored_packet *store; 1915 unsigned long flags;
1826 struct ieee80211_tx_data tx; 1916 int i;
1827 int i, ret; 1917 bool next;
1828 1918
1919 rcu_read_lock();
1829 netif_tx_lock_bh(dev); 1920 netif_tx_lock_bh(dev);
1830 for (i = 0; i < ieee80211_num_regular_queues(&local->hw); i++) {
1831 /* Check that this queue is ok */
1832 if (__netif_subqueue_stopped(local->mdev, i) &&
1833 !test_bit(i, local->queues_pending_run))
1834 continue;
1835 1921
1836 if (!test_bit(i, local->queues_pending)) { 1922 for (i = 0; i < local->hw.queues; i++) {
1837 clear_bit(i, local->queues_pending_run); 1923 /*
1838 ieee80211_wake_queue(&local->hw, i); 1924 * If queue is stopped by something other than due to pending
1925 * frames, or we have no pending frames, proceed to next queue.
1926 */
1927 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
1928 next = false;
1929 if (local->queue_stop_reasons[i] !=
1930 BIT(IEEE80211_QUEUE_STOP_REASON_PENDING) ||
1931 skb_queue_empty(&local->pending[i]))
1932 next = true;
1933 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
1934
1935 if (next)
1839 continue; 1936 continue;
1840 }
1841 1937
1842 clear_bit(i, local->queues_pending_run); 1938 /*
1939 * start the queue now to allow processing our packets,
1940 * we're under the tx lock here anyway so nothing will
1941 * happen as a result of this
1942 */
1843 netif_start_subqueue(local->mdev, i); 1943 netif_start_subqueue(local->mdev, i);
1844 1944
1845 store = &local->pending_packet[i]; 1945 while (!skb_queue_empty(&local->pending[i])) {
1846 tx.extra_frag = store->extra_frag; 1946 struct sk_buff *skb = skb_dequeue(&local->pending[i]);
1847 tx.num_extra_frag = store->num_extra_frag; 1947
1848 tx.flags = 0; 1948 if (!ieee80211_tx_pending_skb(local, skb)) {
1849 ret = __ieee80211_tx(local, store->skb, &tx); 1949 skb_queue_head(&local->pending[i], skb);
1850 if (ret) { 1950 break;
1851 if (ret == IEEE80211_TX_FRAG_AGAIN) 1951 }
1852 store->skb = NULL;
1853 } else {
1854 clear_bit(i, local->queues_pending);
1855 ieee80211_wake_queue(&local->hw, i);
1856 } 1952 }
1953
1954 /* Start regular packet processing again. */
1955 if (skb_queue_empty(&local->pending[i]))
1956 ieee80211_wake_queue_by_reason(&local->hw, i,
1957 IEEE80211_QUEUE_STOP_REASON_PENDING);
1857 } 1958 }
1959
1858 netif_tx_unlock_bh(dev); 1960 netif_tx_unlock_bh(dev);
1961 rcu_read_unlock();
1859} 1962}
1860 1963
1861/* functions for drivers to get certain frames */ 1964/* functions for drivers to get certain frames */
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index e0431a1d218b..fdf432f14554 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -166,18 +166,13 @@ int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
166 166
167void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx) 167void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx)
168{ 168{
169 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data; 169 struct sk_buff *skb = tx->skb;
170 170 struct ieee80211_hdr *hdr;
171 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); 171
172 if (tx->extra_frag) { 172 do {
173 struct ieee80211_hdr *fhdr; 173 hdr = (struct ieee80211_hdr *) skb->data;
174 int i; 174 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
175 for (i = 0; i < tx->num_extra_frag; i++) { 175 } while ((skb = skb->next));
176 fhdr = (struct ieee80211_hdr *)
177 tx->extra_frag[i]->data;
178 fhdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
179 }
180 }
181} 176}
182 177
183int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, 178int ieee80211_frame_duration(struct ieee80211_local *local, size_t len,
@@ -344,42 +339,21 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
344{ 339{
345 struct ieee80211_local *local = hw_to_local(hw); 340 struct ieee80211_local *local = hw_to_local(hw);
346 341
347 if (queue >= hw->queues) { 342 if (WARN_ON(queue >= hw->queues))
348 if (local->ampdu_ac_queue[queue - hw->queues] < 0) 343 return;
349 return;
350
351 /*
352 * for virtual aggregation queues, we need to refcount the
353 * internal mac80211 disable (multiple times!), keep track of
354 * driver disable _and_ make sure the regular queue is
355 * actually enabled.
356 */
357 if (reason == IEEE80211_QUEUE_STOP_REASON_AGGREGATION)
358 local->amdpu_ac_stop_refcnt[queue - hw->queues]--;
359 else
360 __clear_bit(reason, &local->queue_stop_reasons[queue]);
361
362 if (local->queue_stop_reasons[queue] ||
363 local->amdpu_ac_stop_refcnt[queue - hw->queues])
364 return;
365
366 /* now go on to treat the corresponding regular queue */
367 queue = local->ampdu_ac_queue[queue - hw->queues];
368 reason = IEEE80211_QUEUE_STOP_REASON_AGGREGATION;
369 }
370 344
371 __clear_bit(reason, &local->queue_stop_reasons[queue]); 345 __clear_bit(reason, &local->queue_stop_reasons[queue]);
372 346
347 if (!skb_queue_empty(&local->pending[queue]) &&
348 local->queue_stop_reasons[queue] ==
349 BIT(IEEE80211_QUEUE_STOP_REASON_PENDING))
350 tasklet_schedule(&local->tx_pending_tasklet);
351
373 if (local->queue_stop_reasons[queue] != 0) 352 if (local->queue_stop_reasons[queue] != 0)
374 /* someone still has this queue stopped */ 353 /* someone still has this queue stopped */
375 return; 354 return;
376 355
377 if (test_bit(queue, local->queues_pending)) { 356 netif_wake_subqueue(local->mdev, queue);
378 set_bit(queue, local->queues_pending_run);
379 tasklet_schedule(&local->tx_pending_tasklet);
380 } else {
381 netif_wake_subqueue(local->mdev, queue);
382 }
383} 357}
384 358
385void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, 359void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
@@ -405,29 +379,18 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
405{ 379{
406 struct ieee80211_local *local = hw_to_local(hw); 380 struct ieee80211_local *local = hw_to_local(hw);
407 381
408 if (queue >= hw->queues) { 382 if (WARN_ON(queue >= hw->queues))
409 if (local->ampdu_ac_queue[queue - hw->queues] < 0) 383 return;
410 return;
411
412 /*
413 * for virtual aggregation queues, we need to refcount the
414 * internal mac80211 disable (multiple times!), keep track of
415 * driver disable _and_ make sure the regular queue is
416 * actually enabled.
417 */
418 if (reason == IEEE80211_QUEUE_STOP_REASON_AGGREGATION)
419 local->amdpu_ac_stop_refcnt[queue - hw->queues]++;
420 else
421 __set_bit(reason, &local->queue_stop_reasons[queue]);
422 384
423 /* now go on to treat the corresponding regular queue */ 385 /*
424 queue = local->ampdu_ac_queue[queue - hw->queues]; 386 * Only stop if it was previously running, this is necessary
425 reason = IEEE80211_QUEUE_STOP_REASON_AGGREGATION; 387 * for correct pending packets handling because there we may
426 } 388 * start (but not wake) the queue and rely on that.
389 */
390 if (!local->queue_stop_reasons[queue])
391 netif_stop_subqueue(local->mdev, queue);
427 392
428 __set_bit(reason, &local->queue_stop_reasons[queue]); 393 __set_bit(reason, &local->queue_stop_reasons[queue]);
429
430 netif_stop_subqueue(local->mdev, queue);
431} 394}
432 395
433void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue, 396void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
@@ -473,15 +436,9 @@ EXPORT_SYMBOL(ieee80211_stop_queues);
473int ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue) 436int ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue)
474{ 437{
475 struct ieee80211_local *local = hw_to_local(hw); 438 struct ieee80211_local *local = hw_to_local(hw);
476 unsigned long flags;
477 439
478 if (queue >= hw->queues) { 440 if (WARN_ON(queue >= hw->queues))
479 spin_lock_irqsave(&local->queue_stop_reason_lock, flags); 441 return true;
480 queue = local->ampdu_ac_queue[queue - hw->queues];
481 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
482 if (queue < 0)
483 return true;
484 }
485 442
486 return __netif_subqueue_stopped(local->mdev, queue); 443 return __netif_subqueue_stopped(local->mdev, queue);
487} 444}
@@ -496,7 +453,7 @@ void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw,
496 453
497 spin_lock_irqsave(&local->queue_stop_reason_lock, flags); 454 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
498 455
499 for (i = 0; i < hw->queues + hw->ampdu_queues; i++) 456 for (i = 0; i < hw->queues; i++)
500 __ieee80211_wake_queue(hw, i, reason); 457 __ieee80211_wake_queue(hw, i, reason);
501 458
502 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 459 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
@@ -846,16 +803,9 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
846 struct ieee80211_local *local = sdata->local; 803 struct ieee80211_local *local = sdata->local;
847 struct sk_buff *skb; 804 struct sk_buff *skb;
848 struct ieee80211_mgmt *mgmt; 805 struct ieee80211_mgmt *mgmt;
849 const u8 *ie_auth = NULL;
850 int ie_auth_len = 0;
851
852 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
853 ie_auth_len = sdata->u.mgd.ie_auth_len;
854 ie_auth = sdata->u.mgd.ie_auth;
855 }
856 806
857 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 807 skb = dev_alloc_skb(local->hw.extra_tx_headroom +
858 sizeof(*mgmt) + 6 + extra_len + ie_auth_len); 808 sizeof(*mgmt) + 6 + extra_len);
859 if (!skb) { 809 if (!skb) {
860 printk(KERN_DEBUG "%s: failed to allocate buffer for auth " 810 printk(KERN_DEBUG "%s: failed to allocate buffer for auth "
861 "frame\n", sdata->dev->name); 811 "frame\n", sdata->dev->name);
@@ -877,8 +827,6 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
877 mgmt->u.auth.status_code = cpu_to_le16(0); 827 mgmt->u.auth.status_code = cpu_to_le16(0);
878 if (extra) 828 if (extra)
879 memcpy(skb_put(skb, extra_len), extra, extra_len); 829 memcpy(skb_put(skb, extra_len), extra, extra_len);
880 if (ie_auth)
881 memcpy(skb_put(skb, ie_auth_len), ie_auth, ie_auth_len);
882 830
883 ieee80211_tx_skb(sdata, skb, encrypt); 831 ieee80211_tx_skb(sdata, skb, encrypt);
884} 832}
@@ -891,20 +839,11 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
891 struct ieee80211_supported_band *sband; 839 struct ieee80211_supported_band *sband;
892 struct sk_buff *skb; 840 struct sk_buff *skb;
893 struct ieee80211_mgmt *mgmt; 841 struct ieee80211_mgmt *mgmt;
894 u8 *pos, *supp_rates, *esupp_rates = NULL, *extra_preq_ie = NULL; 842 u8 *pos, *supp_rates, *esupp_rates = NULL;
895 int i, extra_preq_ie_len = 0; 843 int i;
896
897 switch (sdata->vif.type) {
898 case NL80211_IFTYPE_STATION:
899 extra_preq_ie_len = sdata->u.mgd.ie_probereq_len;
900 extra_preq_ie = sdata->u.mgd.ie_probereq;
901 break;
902 default:
903 break;
904 }
905 844
906 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) + 200 + 845 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) + 200 +
907 ie_len + extra_preq_ie_len); 846 ie_len);
908 if (!skb) { 847 if (!skb) {
909 printk(KERN_DEBUG "%s: failed to allocate buffer for probe " 848 printk(KERN_DEBUG "%s: failed to allocate buffer for probe "
910 "request\n", sdata->dev->name); 849 "request\n", sdata->dev->name);
@@ -953,9 +892,6 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
953 892
954 if (ie) 893 if (ie)
955 memcpy(skb_put(skb, ie_len), ie, ie_len); 894 memcpy(skb_put(skb, ie_len), ie, ie_len);
956 if (extra_preq_ie)
957 memcpy(skb_put(skb, extra_preq_ie_len), extra_preq_ie,
958 extra_preq_ie_len);
959 895
960 ieee80211_tx_skb(sdata, skb, 0); 896 ieee80211_tx_skb(sdata, skb, 0);
961} 897}
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index 7043ddc75498..ef73105b3061 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -329,24 +329,17 @@ static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
329ieee80211_tx_result 329ieee80211_tx_result
330ieee80211_crypto_wep_encrypt(struct ieee80211_tx_data *tx) 330ieee80211_crypto_wep_encrypt(struct ieee80211_tx_data *tx)
331{ 331{
332 int i; 332 struct sk_buff *skb;
333 333
334 ieee80211_tx_set_protected(tx); 334 ieee80211_tx_set_protected(tx);
335 335
336 if (wep_encrypt_skb(tx, tx->skb) < 0) { 336 skb = tx->skb;
337 I802_DEBUG_INC(tx->local->tx_handlers_drop_wep); 337 do {
338 return TX_DROP; 338 if (wep_encrypt_skb(tx, skb) < 0) {
339 } 339 I802_DEBUG_INC(tx->local->tx_handlers_drop_wep);
340 340 return TX_DROP;
341 if (tx->extra_frag) {
342 for (i = 0; i < tx->num_extra_frag; i++) {
343 if (wep_encrypt_skb(tx, tx->extra_frag[i])) {
344 I802_DEBUG_INC(tx->local->
345 tx_handlers_drop_wep);
346 return TX_DROP;
347 }
348 } 341 }
349 } 342 } while ((skb = skb->next));
350 343
351 return TX_CONTINUE; 344 return TX_CONTINUE;
352} 345}
diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c
index 935c63ed3dfa..deb4ecec122a 100644
--- a/net/mac80211/wext.c
+++ b/net/mac80211/wext.c
@@ -129,14 +129,12 @@ static int ieee80211_ioctl_siwgenie(struct net_device *dev,
129 129
130 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 130 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
131 131
132 if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME)
133 return -EOPNOTSUPP;
134
135 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 132 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
136 int ret = ieee80211_sta_set_extra_ie(sdata, extra, data->length); 133 int ret = ieee80211_sta_set_extra_ie(sdata, extra, data->length);
137 if (ret) 134 if (ret)
138 return ret; 135 return ret;
139 sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL; 136 sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL;
137 sdata->u.mgd.flags &= ~IEEE80211_STA_EXT_SME;
140 ieee80211_sta_req_auth(sdata); 138 ieee80211_sta_req_auth(sdata);
141 return 0; 139 return 0;
142 } 140 }
@@ -207,14 +205,6 @@ static int ieee80211_ioctl_siwessid(struct net_device *dev,
207 205
208 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 206 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
209 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 207 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
210 if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME) {
211 if (len > IEEE80211_MAX_SSID_LEN)
212 return -EINVAL;
213 memcpy(sdata->u.mgd.ssid, ssid, len);
214 sdata->u.mgd.ssid_len = len;
215 return 0;
216 }
217
218 if (data->flags) 208 if (data->flags)
219 sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_SSID_SEL; 209 sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_SSID_SEL;
220 else 210 else
@@ -224,6 +214,7 @@ static int ieee80211_ioctl_siwessid(struct net_device *dev,
224 if (ret) 214 if (ret)
225 return ret; 215 return ret;
226 216
217 sdata->u.mgd.flags &= ~IEEE80211_STA_EXT_SME;
227 ieee80211_sta_req_auth(sdata); 218 ieee80211_sta_req_auth(sdata);
228 return 0; 219 return 0;
229 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) 220 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
@@ -272,11 +263,7 @@ static int ieee80211_ioctl_siwap(struct net_device *dev,
272 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 263 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
273 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 264 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
274 int ret; 265 int ret;
275 if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME) { 266
276 memcpy(sdata->u.mgd.bssid, (u8 *) &ap_addr->sa_data,
277 ETH_ALEN);
278 return 0;
279 }
280 if (is_zero_ether_addr((u8 *) &ap_addr->sa_data)) 267 if (is_zero_ether_addr((u8 *) &ap_addr->sa_data))
281 sdata->u.mgd.flags |= IEEE80211_STA_AUTO_BSSID_SEL | 268 sdata->u.mgd.flags |= IEEE80211_STA_AUTO_BSSID_SEL |
282 IEEE80211_STA_AUTO_CHANNEL_SEL; 269 IEEE80211_STA_AUTO_CHANNEL_SEL;
@@ -287,6 +274,7 @@ static int ieee80211_ioctl_siwap(struct net_device *dev,
287 ret = ieee80211_sta_set_bssid(sdata, (u8 *) &ap_addr->sa_data); 274 ret = ieee80211_sta_set_bssid(sdata, (u8 *) &ap_addr->sa_data);
288 if (ret) 275 if (ret)
289 return ret; 276 return ret;
277 sdata->u.mgd.flags &= ~IEEE80211_STA_EXT_SME;
290 ieee80211_sta_req_auth(sdata); 278 ieee80211_sta_req_auth(sdata);
291 return 0; 279 return 0;
292 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { 280 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
@@ -630,7 +618,7 @@ static int ieee80211_ioctl_siwencode(struct net_device *dev,
630 struct ieee80211_sub_if_data *sdata; 618 struct ieee80211_sub_if_data *sdata;
631 int idx, i, alg = ALG_WEP; 619 int idx, i, alg = ALG_WEP;
632 u8 bcaddr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 620 u8 bcaddr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
633 int remove = 0; 621 int remove = 0, ret;
634 622
635 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 623 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
636 624
@@ -656,11 +644,20 @@ static int ieee80211_ioctl_siwencode(struct net_device *dev,
656 return 0; 644 return 0;
657 } 645 }
658 646
659 return ieee80211_set_encryption( 647 ret = ieee80211_set_encryption(
660 sdata, bcaddr, 648 sdata, bcaddr,
661 idx, alg, remove, 649 idx, alg, remove,
662 !sdata->default_key, 650 !sdata->default_key,
663 keybuf, erq->length); 651 keybuf, erq->length);
652
653 if (!ret) {
654 if (remove)
655 sdata->u.mgd.flags &= ~IEEE80211_STA_TKIP_WEP_USED;
656 else
657 sdata->u.mgd.flags |= IEEE80211_STA_TKIP_WEP_USED;
658 }
659
660 return ret;
664} 661}
665 662
666 663
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 9101b48ec2ae..4f8bfea278f2 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -196,19 +196,13 @@ ieee80211_tx_result
196ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx) 196ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx)
197{ 197{
198 struct sk_buff *skb = tx->skb; 198 struct sk_buff *skb = tx->skb;
199 int i;
200 199
201 ieee80211_tx_set_protected(tx); 200 ieee80211_tx_set_protected(tx);
202 201
203 if (tkip_encrypt_skb(tx, skb) < 0) 202 do {
204 return TX_DROP; 203 if (tkip_encrypt_skb(tx, skb) < 0)
205 204 return TX_DROP;
206 if (tx->extra_frag) { 205 } while ((skb = skb->next));
207 for (i = 0; i < tx->num_extra_frag; i++) {
208 if (tkip_encrypt_skb(tx, tx->extra_frag[i]))
209 return TX_DROP;
210 }
211 }
212 206
213 return TX_CONTINUE; 207 return TX_CONTINUE;
214} 208}
@@ -428,19 +422,13 @@ ieee80211_tx_result
428ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx) 422ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx)
429{ 423{
430 struct sk_buff *skb = tx->skb; 424 struct sk_buff *skb = tx->skb;
431 int i;
432 425
433 ieee80211_tx_set_protected(tx); 426 ieee80211_tx_set_protected(tx);
434 427
435 if (ccmp_encrypt_skb(tx, skb) < 0) 428 do {
436 return TX_DROP; 429 if (ccmp_encrypt_skb(tx, skb) < 0)
437 430 return TX_DROP;
438 if (tx->extra_frag) { 431 } while ((skb = skb->next));
439 for (i = 0; i < tx->num_extra_frag; i++) {
440 if (ccmp_encrypt_skb(tx, tx->extra_frag[i]))
441 return TX_DROP;
442 }
443 }
444 432
445 return TX_CONTINUE; 433 return TX_CONTINUE;
446} 434}
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 2562d05dbaf5..2c967e4f706c 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -374,7 +374,7 @@ config NETFILTER_XT_TARGET_HL
374 374
375config NETFILTER_XT_TARGET_LED 375config NETFILTER_XT_TARGET_LED
376 tristate '"LED" target support' 376 tristate '"LED" target support'
377 depends on LEDS_CLASS 377 depends on LEDS_CLASS && LED_TRIGGERS
378 depends on NETFILTER_ADVANCED 378 depends on NETFILTER_ADVANCED
379 help 379 help
380 This option adds a `LED' target, which allows you to blink LEDs in 380 This option adds a `LED' target, which allows you to blink LEDs in
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index dfb447b584da..8020db6274b8 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -29,6 +29,7 @@
29#include <linux/netdevice.h> 29#include <linux/netdevice.h>
30#include <linux/socket.h> 30#include <linux/socket.h>
31#include <linux/mm.h> 31#include <linux/mm.h>
32#include <linux/rculist_nulls.h>
32 33
33#include <net/netfilter/nf_conntrack.h> 34#include <net/netfilter/nf_conntrack.h>
34#include <net/netfilter/nf_conntrack_l3proto.h> 35#include <net/netfilter/nf_conntrack_l3proto.h>
@@ -163,8 +164,8 @@ static void
163clean_from_lists(struct nf_conn *ct) 164clean_from_lists(struct nf_conn *ct)
164{ 165{
165 pr_debug("clean_from_lists(%p)\n", ct); 166 pr_debug("clean_from_lists(%p)\n", ct);
166 hlist_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode); 167 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
167 hlist_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnode); 168 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
168 169
169 /* Destroy all pending expectations */ 170 /* Destroy all pending expectations */
170 nf_ct_remove_expectations(ct); 171 nf_ct_remove_expectations(ct);
@@ -204,8 +205,8 @@ destroy_conntrack(struct nf_conntrack *nfct)
204 205
205 /* We overload first tuple to link into unconfirmed list. */ 206 /* We overload first tuple to link into unconfirmed list. */
206 if (!nf_ct_is_confirmed(ct)) { 207 if (!nf_ct_is_confirmed(ct)) {
207 BUG_ON(hlist_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode)); 208 BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
208 hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode); 209 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
209 } 210 }
210 211
211 NF_CT_STAT_INC(net, delete); 212 NF_CT_STAT_INC(net, delete);
@@ -242,18 +243,26 @@ static void death_by_timeout(unsigned long ul_conntrack)
242 nf_ct_put(ct); 243 nf_ct_put(ct);
243} 244}
244 245
246/*
247 * Warning :
248 * - Caller must take a reference on returned object
249 * and recheck nf_ct_tuple_equal(tuple, &h->tuple)
250 * OR
251 * - Caller must lock nf_conntrack_lock before calling this function
252 */
245struct nf_conntrack_tuple_hash * 253struct nf_conntrack_tuple_hash *
246__nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple) 254__nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple)
247{ 255{
248 struct nf_conntrack_tuple_hash *h; 256 struct nf_conntrack_tuple_hash *h;
249 struct hlist_node *n; 257 struct hlist_nulls_node *n;
250 unsigned int hash = hash_conntrack(tuple); 258 unsigned int hash = hash_conntrack(tuple);
251 259
252 /* Disable BHs the entire time since we normally need to disable them 260 /* Disable BHs the entire time since we normally need to disable them
253 * at least once for the stats anyway. 261 * at least once for the stats anyway.
254 */ 262 */
255 local_bh_disable(); 263 local_bh_disable();
256 hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnode) { 264begin:
265 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
257 if (nf_ct_tuple_equal(tuple, &h->tuple)) { 266 if (nf_ct_tuple_equal(tuple, &h->tuple)) {
258 NF_CT_STAT_INC(net, found); 267 NF_CT_STAT_INC(net, found);
259 local_bh_enable(); 268 local_bh_enable();
@@ -261,6 +270,13 @@ __nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple)
261 } 270 }
262 NF_CT_STAT_INC(net, searched); 271 NF_CT_STAT_INC(net, searched);
263 } 272 }
273 /*
274 * if the nulls value we got at the end of this lookup is
275 * not the expected one, we must restart lookup.
276 * We probably met an item that was moved to another chain.
277 */
278 if (get_nulls_value(n) != hash)
279 goto begin;
264 local_bh_enable(); 280 local_bh_enable();
265 281
266 return NULL; 282 return NULL;
@@ -275,11 +291,18 @@ nf_conntrack_find_get(struct net *net, const struct nf_conntrack_tuple *tuple)
275 struct nf_conn *ct; 291 struct nf_conn *ct;
276 292
277 rcu_read_lock(); 293 rcu_read_lock();
294begin:
278 h = __nf_conntrack_find(net, tuple); 295 h = __nf_conntrack_find(net, tuple);
279 if (h) { 296 if (h) {
280 ct = nf_ct_tuplehash_to_ctrack(h); 297 ct = nf_ct_tuplehash_to_ctrack(h);
281 if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use))) 298 if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
282 h = NULL; 299 h = NULL;
300 else {
301 if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple))) {
302 nf_ct_put(ct);
303 goto begin;
304 }
305 }
283 } 306 }
284 rcu_read_unlock(); 307 rcu_read_unlock();
285 308
@@ -293,9 +316,9 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct,
293{ 316{
294 struct net *net = nf_ct_net(ct); 317 struct net *net = nf_ct_net(ct);
295 318
296 hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode, 319 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
297 &net->ct.hash[hash]); 320 &net->ct.hash[hash]);
298 hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnode, 321 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
299 &net->ct.hash[repl_hash]); 322 &net->ct.hash[repl_hash]);
300} 323}
301 324
@@ -318,7 +341,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
318 struct nf_conntrack_tuple_hash *h; 341 struct nf_conntrack_tuple_hash *h;
319 struct nf_conn *ct; 342 struct nf_conn *ct;
320 struct nf_conn_help *help; 343 struct nf_conn_help *help;
321 struct hlist_node *n; 344 struct hlist_nulls_node *n;
322 enum ip_conntrack_info ctinfo; 345 enum ip_conntrack_info ctinfo;
323 struct net *net; 346 struct net *net;
324 347
@@ -350,17 +373,17 @@ __nf_conntrack_confirm(struct sk_buff *skb)
350 /* See if there's one in the list already, including reverse: 373 /* See if there's one in the list already, including reverse:
351 NAT could have grabbed it without realizing, since we're 374 NAT could have grabbed it without realizing, since we're
352 not in the hash. If there is, we lost race. */ 375 not in the hash. If there is, we lost race. */
353 hlist_for_each_entry(h, n, &net->ct.hash[hash], hnode) 376 hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
354 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, 377 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
355 &h->tuple)) 378 &h->tuple))
356 goto out; 379 goto out;
357 hlist_for_each_entry(h, n, &net->ct.hash[repl_hash], hnode) 380 hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode)
358 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, 381 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
359 &h->tuple)) 382 &h->tuple))
360 goto out; 383 goto out;
361 384
362 /* Remove from unconfirmed list */ 385 /* Remove from unconfirmed list */
363 hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode); 386 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
364 387
365 __nf_conntrack_hash_insert(ct, hash, repl_hash); 388 __nf_conntrack_hash_insert(ct, hash, repl_hash);
366 /* Timer relative to confirmation time, not original 389 /* Timer relative to confirmation time, not original
@@ -399,14 +422,14 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
399{ 422{
400 struct net *net = nf_ct_net(ignored_conntrack); 423 struct net *net = nf_ct_net(ignored_conntrack);
401 struct nf_conntrack_tuple_hash *h; 424 struct nf_conntrack_tuple_hash *h;
402 struct hlist_node *n; 425 struct hlist_nulls_node *n;
403 unsigned int hash = hash_conntrack(tuple); 426 unsigned int hash = hash_conntrack(tuple);
404 427
405 /* Disable BHs the entire time since we need to disable them at 428 /* Disable BHs the entire time since we need to disable them at
406 * least once for the stats anyway. 429 * least once for the stats anyway.
407 */ 430 */
408 rcu_read_lock_bh(); 431 rcu_read_lock_bh();
409 hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnode) { 432 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
410 if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack && 433 if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack &&
411 nf_ct_tuple_equal(tuple, &h->tuple)) { 434 nf_ct_tuple_equal(tuple, &h->tuple)) {
412 NF_CT_STAT_INC(net, found); 435 NF_CT_STAT_INC(net, found);
@@ -430,14 +453,14 @@ static noinline int early_drop(struct net *net, unsigned int hash)
430 /* Use oldest entry, which is roughly LRU */ 453 /* Use oldest entry, which is roughly LRU */
431 struct nf_conntrack_tuple_hash *h; 454 struct nf_conntrack_tuple_hash *h;
432 struct nf_conn *ct = NULL, *tmp; 455 struct nf_conn *ct = NULL, *tmp;
433 struct hlist_node *n; 456 struct hlist_nulls_node *n;
434 unsigned int i, cnt = 0; 457 unsigned int i, cnt = 0;
435 int dropped = 0; 458 int dropped = 0;
436 459
437 rcu_read_lock(); 460 rcu_read_lock();
438 for (i = 0; i < nf_conntrack_htable_size; i++) { 461 for (i = 0; i < nf_conntrack_htable_size; i++) {
439 hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash], 462 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash],
440 hnode) { 463 hnnode) {
441 tmp = nf_ct_tuplehash_to_ctrack(h); 464 tmp = nf_ct_tuplehash_to_ctrack(h);
442 if (!test_bit(IPS_ASSURED_BIT, &tmp->status)) 465 if (!test_bit(IPS_ASSURED_BIT, &tmp->status))
443 ct = tmp; 466 ct = tmp;
@@ -508,27 +531,19 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
508#ifdef CONFIG_NET_NS 531#ifdef CONFIG_NET_NS
509 ct->ct_net = net; 532 ct->ct_net = net;
510#endif 533#endif
511 INIT_RCU_HEAD(&ct->rcu);
512 534
513 return ct; 535 return ct;
514} 536}
515EXPORT_SYMBOL_GPL(nf_conntrack_alloc); 537EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
516 538
517static void nf_conntrack_free_rcu(struct rcu_head *head)
518{
519 struct nf_conn *ct = container_of(head, struct nf_conn, rcu);
520
521 nf_ct_ext_free(ct);
522 kmem_cache_free(nf_conntrack_cachep, ct);
523}
524
525void nf_conntrack_free(struct nf_conn *ct) 539void nf_conntrack_free(struct nf_conn *ct)
526{ 540{
527 struct net *net = nf_ct_net(ct); 541 struct net *net = nf_ct_net(ct);
528 542
529 nf_ct_ext_destroy(ct); 543 nf_ct_ext_destroy(ct);
530 atomic_dec(&net->ct.count); 544 atomic_dec(&net->ct.count);
531 call_rcu(&ct->rcu, nf_conntrack_free_rcu); 545 nf_ct_ext_free(ct);
546 kmem_cache_free(nf_conntrack_cachep, ct);
532} 547}
533EXPORT_SYMBOL_GPL(nf_conntrack_free); 548EXPORT_SYMBOL_GPL(nf_conntrack_free);
534 549
@@ -594,7 +609,7 @@ init_conntrack(struct net *net,
594 } 609 }
595 610
596 /* Overload tuple linked list to put us in unconfirmed list. */ 611 /* Overload tuple linked list to put us in unconfirmed list. */
597 hlist_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode, 612 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
598 &net->ct.unconfirmed); 613 &net->ct.unconfirmed);
599 614
600 spin_unlock_bh(&nf_conntrack_lock); 615 spin_unlock_bh(&nf_conntrack_lock);
@@ -906,6 +921,12 @@ int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
906 return 0; 921 return 0;
907} 922}
908EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple); 923EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
924
925int nf_ct_port_nlattr_tuple_size(void)
926{
927 return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
928}
929EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
909#endif 930#endif
910 931
911/* Used by ipt_REJECT and ip6t_REJECT. */ 932/* Used by ipt_REJECT and ip6t_REJECT. */
@@ -934,17 +955,17 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
934{ 955{
935 struct nf_conntrack_tuple_hash *h; 956 struct nf_conntrack_tuple_hash *h;
936 struct nf_conn *ct; 957 struct nf_conn *ct;
937 struct hlist_node *n; 958 struct hlist_nulls_node *n;
938 959
939 spin_lock_bh(&nf_conntrack_lock); 960 spin_lock_bh(&nf_conntrack_lock);
940 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) { 961 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
941 hlist_for_each_entry(h, n, &net->ct.hash[*bucket], hnode) { 962 hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
942 ct = nf_ct_tuplehash_to_ctrack(h); 963 ct = nf_ct_tuplehash_to_ctrack(h);
943 if (iter(ct, data)) 964 if (iter(ct, data))
944 goto found; 965 goto found;
945 } 966 }
946 } 967 }
947 hlist_for_each_entry(h, n, &net->ct.unconfirmed, hnode) { 968 hlist_nulls_for_each_entry(h, n, &net->ct.unconfirmed, hnnode) {
948 ct = nf_ct_tuplehash_to_ctrack(h); 969 ct = nf_ct_tuplehash_to_ctrack(h);
949 if (iter(ct, data)) 970 if (iter(ct, data))
950 set_bit(IPS_DYING_BIT, &ct->status); 971 set_bit(IPS_DYING_BIT, &ct->status);
@@ -992,7 +1013,7 @@ static int kill_all(struct nf_conn *i, void *data)
992 return 1; 1013 return 1;
993} 1014}
994 1015
995void nf_ct_free_hashtable(struct hlist_head *hash, int vmalloced, unsigned int size) 1016void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size)
996{ 1017{
997 if (vmalloced) 1018 if (vmalloced)
998 vfree(hash); 1019 vfree(hash);
@@ -1060,26 +1081,28 @@ void nf_conntrack_cleanup(struct net *net)
1060 } 1081 }
1061} 1082}
1062 1083
1063struct hlist_head *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced) 1084void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls)
1064{ 1085{
1065 struct hlist_head *hash; 1086 struct hlist_nulls_head *hash;
1066 unsigned int size, i; 1087 unsigned int nr_slots, i;
1088 size_t sz;
1067 1089
1068 *vmalloced = 0; 1090 *vmalloced = 0;
1069 1091
1070 size = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_head)); 1092 BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
1071 hash = (void*)__get_free_pages(GFP_KERNEL|__GFP_NOWARN, 1093 nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
1072 get_order(sizeof(struct hlist_head) 1094 sz = nr_slots * sizeof(struct hlist_nulls_head);
1073 * size)); 1095 hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1096 get_order(sz));
1074 if (!hash) { 1097 if (!hash) {
1075 *vmalloced = 1; 1098 *vmalloced = 1;
1076 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n"); 1099 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
1077 hash = vmalloc(sizeof(struct hlist_head) * size); 1100 hash = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
1078 } 1101 }
1079 1102
1080 if (hash) 1103 if (hash && nulls)
1081 for (i = 0; i < size; i++) 1104 for (i = 0; i < nr_slots; i++)
1082 INIT_HLIST_HEAD(&hash[i]); 1105 INIT_HLIST_NULLS_HEAD(&hash[i], i);
1083 1106
1084 return hash; 1107 return hash;
1085} 1108}
@@ -1090,7 +1113,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1090 int i, bucket, vmalloced, old_vmalloced; 1113 int i, bucket, vmalloced, old_vmalloced;
1091 unsigned int hashsize, old_size; 1114 unsigned int hashsize, old_size;
1092 int rnd; 1115 int rnd;
1093 struct hlist_head *hash, *old_hash; 1116 struct hlist_nulls_head *hash, *old_hash;
1094 struct nf_conntrack_tuple_hash *h; 1117 struct nf_conntrack_tuple_hash *h;
1095 1118
1096 /* On boot, we can set this without any fancy locking. */ 1119 /* On boot, we can set this without any fancy locking. */
@@ -1101,7 +1124,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1101 if (!hashsize) 1124 if (!hashsize)
1102 return -EINVAL; 1125 return -EINVAL;
1103 1126
1104 hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced); 1127 hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced, 1);
1105 if (!hash) 1128 if (!hash)
1106 return -ENOMEM; 1129 return -ENOMEM;
1107 1130
@@ -1116,12 +1139,12 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1116 */ 1139 */
1117 spin_lock_bh(&nf_conntrack_lock); 1140 spin_lock_bh(&nf_conntrack_lock);
1118 for (i = 0; i < nf_conntrack_htable_size; i++) { 1141 for (i = 0; i < nf_conntrack_htable_size; i++) {
1119 while (!hlist_empty(&init_net.ct.hash[i])) { 1142 while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
1120 h = hlist_entry(init_net.ct.hash[i].first, 1143 h = hlist_nulls_entry(init_net.ct.hash[i].first,
1121 struct nf_conntrack_tuple_hash, hnode); 1144 struct nf_conntrack_tuple_hash, hnnode);
1122 hlist_del_rcu(&h->hnode); 1145 hlist_nulls_del_rcu(&h->hnnode);
1123 bucket = __hash_conntrack(&h->tuple, hashsize, rnd); 1146 bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
1124 hlist_add_head(&h->hnode, &hash[bucket]); 1147 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
1125 } 1148 }
1126 } 1149 }
1127 old_size = nf_conntrack_htable_size; 1150 old_size = nf_conntrack_htable_size;
@@ -1172,7 +1195,7 @@ static int nf_conntrack_init_init_net(void)
1172 1195
1173 nf_conntrack_cachep = kmem_cache_create("nf_conntrack", 1196 nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
1174 sizeof(struct nf_conn), 1197 sizeof(struct nf_conn),
1175 0, 0, NULL); 1198 0, SLAB_DESTROY_BY_RCU, NULL);
1176 if (!nf_conntrack_cachep) { 1199 if (!nf_conntrack_cachep) {
1177 printk(KERN_ERR "Unable to create nf_conn slab cache\n"); 1200 printk(KERN_ERR "Unable to create nf_conn slab cache\n");
1178 ret = -ENOMEM; 1201 ret = -ENOMEM;
@@ -1202,7 +1225,7 @@ static int nf_conntrack_init_net(struct net *net)
1202 int ret; 1225 int ret;
1203 1226
1204 atomic_set(&net->ct.count, 0); 1227 atomic_set(&net->ct.count, 0);
1205 INIT_HLIST_HEAD(&net->ct.unconfirmed); 1228 INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, 0);
1206 net->ct.stat = alloc_percpu(struct ip_conntrack_stat); 1229 net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
1207 if (!net->ct.stat) { 1230 if (!net->ct.stat) {
1208 ret = -ENOMEM; 1231 ret = -ENOMEM;
@@ -1212,7 +1235,7 @@ static int nf_conntrack_init_net(struct net *net)
1212 if (ret < 0) 1235 if (ret < 0)
1213 goto err_ecache; 1236 goto err_ecache;
1214 net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1237 net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size,
1215 &net->ct.hash_vmalloc); 1238 &net->ct.hash_vmalloc, 1);
1216 if (!net->ct.hash) { 1239 if (!net->ct.hash) {
1217 ret = -ENOMEM; 1240 ret = -ENOMEM;
1218 printk(KERN_ERR "Unable to create nf_conntrack_hash\n"); 1241 printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 357ba39d4c8d..3940f996a2e4 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -604,7 +604,7 @@ int nf_conntrack_expect_init(struct net *net)
604 604
605 net->ct.expect_count = 0; 605 net->ct.expect_count = 0;
606 net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 606 net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize,
607 &net->ct.expect_vmalloc); 607 &net->ct.expect_vmalloc, 0);
608 if (net->ct.expect_hash == NULL) 608 if (net->ct.expect_hash == NULL)
609 goto err1; 609 goto err1;
610 610
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index a51bdac9f3a0..30b8e9009f99 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -142,6 +142,7 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
142 142
143 BUG_ON(me->expect_policy == NULL); 143 BUG_ON(me->expect_policy == NULL);
144 BUG_ON(me->expect_class_max >= NF_CT_MAX_EXPECT_CLASSES); 144 BUG_ON(me->expect_class_max >= NF_CT_MAX_EXPECT_CLASSES);
145 BUG_ON(strlen(me->name) > NF_CT_HELPER_NAME_LEN - 1);
145 146
146 mutex_lock(&nf_ct_helper_mutex); 147 mutex_lock(&nf_ct_helper_mutex);
147 hlist_add_head_rcu(&me->hnode, &nf_ct_helper_hash[h]); 148 hlist_add_head_rcu(&me->hnode, &nf_ct_helper_hash[h]);
@@ -158,6 +159,7 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
158 struct nf_conntrack_tuple_hash *h; 159 struct nf_conntrack_tuple_hash *h;
159 struct nf_conntrack_expect *exp; 160 struct nf_conntrack_expect *exp;
160 const struct hlist_node *n, *next; 161 const struct hlist_node *n, *next;
162 const struct hlist_nulls_node *nn;
161 unsigned int i; 163 unsigned int i;
162 164
163 /* Get rid of expectations */ 165 /* Get rid of expectations */
@@ -174,10 +176,10 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
174 } 176 }
175 177
176 /* Get rid of expecteds, set helpers to NULL. */ 178 /* Get rid of expecteds, set helpers to NULL. */
177 hlist_for_each_entry(h, n, &net->ct.unconfirmed, hnode) 179 hlist_for_each_entry(h, nn, &net->ct.unconfirmed, hnnode)
178 unhelp(h, me); 180 unhelp(h, me);
179 for (i = 0; i < nf_conntrack_htable_size; i++) { 181 for (i = 0; i < nf_conntrack_htable_size; i++) {
180 hlist_for_each_entry(h, n, &net->ct.hash[i], hnode) 182 hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode)
181 unhelp(h, me); 183 unhelp(h, me);
182 } 184 }
183} 185}
@@ -217,7 +219,7 @@ int nf_conntrack_helper_init(void)
217 219
218 nf_ct_helper_hsize = 1; /* gets rounded up to use one page */ 220 nf_ct_helper_hsize = 1; /* gets rounded up to use one page */
219 nf_ct_helper_hash = nf_ct_alloc_hashtable(&nf_ct_helper_hsize, 221 nf_ct_helper_hash = nf_ct_alloc_hashtable(&nf_ct_helper_hsize,
220 &nf_ct_helper_vmalloc); 222 &nf_ct_helper_vmalloc, 0);
221 if (!nf_ct_helper_hash) 223 if (!nf_ct_helper_hash)
222 return -ENOMEM; 224 return -ENOMEM;
223 225
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 7a16bd462f82..c6439c77953c 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -19,6 +19,7 @@
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/rculist.h> 21#include <linux/rculist.h>
22#include <linux/rculist_nulls.h>
22#include <linux/types.h> 23#include <linux/types.h>
23#include <linux/timer.h> 24#include <linux/timer.h>
24#include <linux/skbuff.h> 25#include <linux/skbuff.h>
@@ -404,6 +405,78 @@ nla_put_failure:
404} 405}
405 406
406#ifdef CONFIG_NF_CONNTRACK_EVENTS 407#ifdef CONFIG_NF_CONNTRACK_EVENTS
408/*
409 * The general structure of a ctnetlink event is
410 *
411 * CTA_TUPLE_ORIG
412 * <l3/l4-proto-attributes>
413 * CTA_TUPLE_REPLY
414 * <l3/l4-proto-attributes>
415 * CTA_ID
416 * ...
417 * CTA_PROTOINFO
418 * <l4-proto-attributes>
419 * CTA_TUPLE_MASTER
420 * <l3/l4-proto-attributes>
421 *
422 * Therefore the formular is
423 *
424 * size = sizeof(headers) + sizeof(generic_nlas) + 3 * sizeof(tuple_nlas)
425 * + sizeof(protoinfo_nlas)
426 */
427static struct sk_buff *
428ctnetlink_alloc_skb(const struct nf_conntrack_tuple *tuple, gfp_t gfp)
429{
430 struct nf_conntrack_l3proto *l3proto;
431 struct nf_conntrack_l4proto *l4proto;
432 int len;
433
434#define NLA_TYPE_SIZE(type) nla_total_size(sizeof(type))
435
436 /* proto independant part */
437 len = NLMSG_SPACE(sizeof(struct nfgenmsg))
438 + 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
439 + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
440 + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
441 + 3 * NLA_TYPE_SIZE(u_int8_t) /* CTA_PROTO_NUM */
442 + NLA_TYPE_SIZE(u_int32_t) /* CTA_ID */
443 + NLA_TYPE_SIZE(u_int32_t) /* CTA_STATUS */
444#ifdef CONFIG_NF_CT_ACCT
445 + 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */
446 + 2 * NLA_TYPE_SIZE(uint64_t) /* CTA_COUNTERS_PACKETS */
447 + 2 * NLA_TYPE_SIZE(uint64_t) /* CTA_COUNTERS_BYTES */
448#endif
449 + NLA_TYPE_SIZE(u_int32_t) /* CTA_TIMEOUT */
450 + nla_total_size(0) /* CTA_PROTOINFO */
451 + nla_total_size(0) /* CTA_HELP */
452 + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
453#ifdef CONFIG_NF_CONNTRACK_SECMARK
454 + NLA_TYPE_SIZE(u_int32_t) /* CTA_SECMARK */
455#endif
456#ifdef CONFIG_NF_NAT_NEEDED
457 + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
458 + 2 * NLA_TYPE_SIZE(u_int32_t) /* CTA_NAT_SEQ_CORRECTION_POS */
459 + 2 * NLA_TYPE_SIZE(u_int32_t) /* CTA_NAT_SEQ_CORRECTION_BEFORE */
460 + 2 * NLA_TYPE_SIZE(u_int32_t) /* CTA_NAT_SEQ_CORRECTION_AFTER */
461#endif
462#ifdef CONFIG_NF_CONNTRACK_MARK
463 + NLA_TYPE_SIZE(u_int32_t) /* CTA_MARK */
464#endif
465 ;
466
467#undef NLA_TYPE_SIZE
468
469 rcu_read_lock();
470 l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
471 len += l3proto->nla_size;
472
473 l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum);
474 len += l4proto->nla_size;
475 rcu_read_unlock();
476
477 return alloc_skb(len, gfp);
478}
479
407static int ctnetlink_conntrack_event(struct notifier_block *this, 480static int ctnetlink_conntrack_event(struct notifier_block *this,
408 unsigned long events, void *ptr) 481 unsigned long events, void *ptr)
409{ 482{
@@ -437,7 +510,7 @@ static int ctnetlink_conntrack_event(struct notifier_block *this,
437 if (!item->report && !nfnetlink_has_listeners(group)) 510 if (!item->report && !nfnetlink_has_listeners(group))
438 return NOTIFY_DONE; 511 return NOTIFY_DONE;
439 512
440 skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC); 513 skb = ctnetlink_alloc_skb(tuple(ct, IP_CT_DIR_ORIGINAL), GFP_ATOMIC);
441 if (!skb) 514 if (!skb)
442 return NOTIFY_DONE; 515 return NOTIFY_DONE;
443 516
@@ -536,7 +609,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
536{ 609{
537 struct nf_conn *ct, *last; 610 struct nf_conn *ct, *last;
538 struct nf_conntrack_tuple_hash *h; 611 struct nf_conntrack_tuple_hash *h;
539 struct hlist_node *n; 612 struct hlist_nulls_node *n;
540 struct nfgenmsg *nfmsg = NLMSG_DATA(cb->nlh); 613 struct nfgenmsg *nfmsg = NLMSG_DATA(cb->nlh);
541 u_int8_t l3proto = nfmsg->nfgen_family; 614 u_int8_t l3proto = nfmsg->nfgen_family;
542 615
@@ -544,27 +617,27 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
544 last = (struct nf_conn *)cb->args[1]; 617 last = (struct nf_conn *)cb->args[1];
545 for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) { 618 for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) {
546restart: 619restart:
547 hlist_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]], 620 hlist_nulls_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]],
548 hnode) { 621 hnnode) {
549 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) 622 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
550 continue; 623 continue;
551 ct = nf_ct_tuplehash_to_ctrack(h); 624 ct = nf_ct_tuplehash_to_ctrack(h);
625 if (!atomic_inc_not_zero(&ct->ct_general.use))
626 continue;
552 /* Dump entries of a given L3 protocol number. 627 /* Dump entries of a given L3 protocol number.
553 * If it is not specified, ie. l3proto == 0, 628 * If it is not specified, ie. l3proto == 0,
554 * then dump everything. */ 629 * then dump everything. */
555 if (l3proto && nf_ct_l3num(ct) != l3proto) 630 if (l3proto && nf_ct_l3num(ct) != l3proto)
556 continue; 631 goto releasect;
557 if (cb->args[1]) { 632 if (cb->args[1]) {
558 if (ct != last) 633 if (ct != last)
559 continue; 634 goto releasect;
560 cb->args[1] = 0; 635 cb->args[1] = 0;
561 } 636 }
562 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, 637 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
563 cb->nlh->nlmsg_seq, 638 cb->nlh->nlmsg_seq,
564 IPCTNL_MSG_CT_NEW, 639 IPCTNL_MSG_CT_NEW,
565 1, ct) < 0) { 640 1, ct) < 0) {
566 if (!atomic_inc_not_zero(&ct->ct_general.use))
567 continue;
568 cb->args[1] = (unsigned long)ct; 641 cb->args[1] = (unsigned long)ct;
569 goto out; 642 goto out;
570 } 643 }
@@ -577,6 +650,8 @@ restart:
577 if (acct) 650 if (acct)
578 memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX])); 651 memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX]));
579 } 652 }
653releasect:
654 nf_ct_put(ct);
580 } 655 }
581 if (cb->args[1]) { 656 if (cb->args[1]) {
582 cb->args[1] = 0; 657 cb->args[1] = 0;
@@ -1242,13 +1317,12 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
1242 if (err < 0) 1317 if (err < 0)
1243 goto err2; 1318 goto err2;
1244 1319
1245 master_h = __nf_conntrack_find(&init_net, &master); 1320 master_h = nf_conntrack_find_get(&init_net, &master);
1246 if (master_h == NULL) { 1321 if (master_h == NULL) {
1247 err = -ENOENT; 1322 err = -ENOENT;
1248 goto err2; 1323 goto err2;
1249 } 1324 }
1250 master_ct = nf_ct_tuplehash_to_ctrack(master_h); 1325 master_ct = nf_ct_tuplehash_to_ctrack(master_h);
1251 nf_conntrack_get(&master_ct->ct_general);
1252 __set_bit(IPS_EXPECTED_BIT, &ct->status); 1326 __set_bit(IPS_EXPECTED_BIT, &ct->status);
1253 ct->master = master_ct; 1327 ct->master = master_ct;
1254 } 1328 }
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index 9a62b4efa0e1..1a4568bf7ea5 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -167,6 +167,9 @@ int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto)
167 if (proto->l3proto >= AF_MAX) 167 if (proto->l3proto >= AF_MAX)
168 return -EBUSY; 168 return -EBUSY;
169 169
170 if (proto->tuple_to_nlattr && !proto->nlattr_tuple_size)
171 return -EINVAL;
172
170 mutex_lock(&nf_ct_proto_mutex); 173 mutex_lock(&nf_ct_proto_mutex);
171 if (nf_ct_l3protos[proto->l3proto] != &nf_conntrack_l3proto_generic) { 174 if (nf_ct_l3protos[proto->l3proto] != &nf_conntrack_l3proto_generic) {
172 ret = -EBUSY; 175 ret = -EBUSY;
@@ -177,6 +180,9 @@ int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto)
177 if (ret < 0) 180 if (ret < 0)
178 goto out_unlock; 181 goto out_unlock;
179 182
183 if (proto->nlattr_tuple_size)
184 proto->nla_size = 3 * proto->nlattr_tuple_size();
185
180 rcu_assign_pointer(nf_ct_l3protos[proto->l3proto], proto); 186 rcu_assign_pointer(nf_ct_l3protos[proto->l3proto], proto);
181 187
182out_unlock: 188out_unlock:
@@ -263,6 +269,10 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto)
263 if (l4proto->l3proto >= PF_MAX) 269 if (l4proto->l3proto >= PF_MAX)
264 return -EBUSY; 270 return -EBUSY;
265 271
272 if ((l4proto->to_nlattr && !l4proto->nlattr_size)
273 || (l4proto->tuple_to_nlattr && !l4proto->nlattr_tuple_size))
274 return -EINVAL;
275
266 mutex_lock(&nf_ct_proto_mutex); 276 mutex_lock(&nf_ct_proto_mutex);
267 if (!nf_ct_protos[l4proto->l3proto]) { 277 if (!nf_ct_protos[l4proto->l3proto]) {
268 /* l3proto may be loaded latter. */ 278 /* l3proto may be loaded latter. */
@@ -290,6 +300,12 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto)
290 if (ret < 0) 300 if (ret < 0)
291 goto out_unlock; 301 goto out_unlock;
292 302
303 l4proto->nla_size = 0;
304 if (l4proto->nlattr_size)
305 l4proto->nla_size += l4proto->nlattr_size();
306 if (l4proto->nlattr_tuple_size)
307 l4proto->nla_size += 3 * l4proto->nlattr_tuple_size();
308
293 rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto], 309 rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
294 l4proto); 310 l4proto);
295 311
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index d3d5a7fd73ce..50dac8dbe7d8 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -669,6 +669,12 @@ static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct)
669 write_unlock_bh(&dccp_lock); 669 write_unlock_bh(&dccp_lock);
670 return 0; 670 return 0;
671} 671}
672
673static int dccp_nlattr_size(void)
674{
675 return nla_total_size(0) /* CTA_PROTOINFO_DCCP */
676 + nla_policy_len(dccp_nla_policy, CTA_PROTOINFO_DCCP_MAX + 1);
677}
672#endif 678#endif
673 679
674#ifdef CONFIG_SYSCTL 680#ifdef CONFIG_SYSCTL
@@ -749,8 +755,10 @@ static struct nf_conntrack_l4proto dccp_proto4 __read_mostly = {
749 .print_conntrack = dccp_print_conntrack, 755 .print_conntrack = dccp_print_conntrack,
750#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 756#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
751 .to_nlattr = dccp_to_nlattr, 757 .to_nlattr = dccp_to_nlattr,
758 .nlattr_size = dccp_nlattr_size,
752 .from_nlattr = nlattr_to_dccp, 759 .from_nlattr = nlattr_to_dccp,
753 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, 760 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
761 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
754 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 762 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
755 .nla_policy = nf_ct_port_nla_policy, 763 .nla_policy = nf_ct_port_nla_policy,
756#endif 764#endif
@@ -771,6 +779,7 @@ static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = {
771 .to_nlattr = dccp_to_nlattr, 779 .to_nlattr = dccp_to_nlattr,
772 .from_nlattr = nlattr_to_dccp, 780 .from_nlattr = nlattr_to_dccp,
773 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, 781 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
782 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
774 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 783 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
775 .nla_policy = nf_ct_port_nla_policy, 784 .nla_policy = nf_ct_port_nla_policy,
776#endif 785#endif
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index 1b279f9d6bf3..117b80112fcb 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -293,6 +293,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = {
293 .me = THIS_MODULE, 293 .me = THIS_MODULE,
294#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 294#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
295 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, 295 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
296 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
296 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 297 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
297 .nla_policy = nf_ct_port_nla_policy, 298 .nla_policy = nf_ct_port_nla_policy,
298#endif 299#endif
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 74e037901199..101b4ad9e817 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -537,6 +537,12 @@ static int nlattr_to_sctp(struct nlattr *cda[], struct nf_conn *ct)
537 537
538 return 0; 538 return 0;
539} 539}
540
541static int sctp_nlattr_size(void)
542{
543 return nla_total_size(0) /* CTA_PROTOINFO_SCTP */
544 + nla_policy_len(sctp_nla_policy, CTA_PROTOINFO_SCTP_MAX + 1);
545}
540#endif 546#endif
541 547
542#ifdef CONFIG_SYSCTL 548#ifdef CONFIG_SYSCTL
@@ -668,8 +674,10 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
668 .me = THIS_MODULE, 674 .me = THIS_MODULE,
669#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 675#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
670 .to_nlattr = sctp_to_nlattr, 676 .to_nlattr = sctp_to_nlattr,
677 .nlattr_size = sctp_nlattr_size,
671 .from_nlattr = nlattr_to_sctp, 678 .from_nlattr = nlattr_to_sctp,
672 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, 679 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
680 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
673 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 681 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
674 .nla_policy = nf_ct_port_nla_policy, 682 .nla_policy = nf_ct_port_nla_policy,
675#endif 683#endif
@@ -696,8 +704,10 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = {
696 .me = THIS_MODULE, 704 .me = THIS_MODULE,
697#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 705#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
698 .to_nlattr = sctp_to_nlattr, 706 .to_nlattr = sctp_to_nlattr,
707 .nlattr_size = sctp_nlattr_size,
699 .from_nlattr = nlattr_to_sctp, 708 .from_nlattr = nlattr_to_sctp,
700 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, 709 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
710 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
701 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 711 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
702 .nla_policy = nf_ct_port_nla_policy, 712 .nla_policy = nf_ct_port_nla_policy,
703#endif 713#endif
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 0aeb8b09a1f7..b5ccf2b4b2e7 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -1184,6 +1184,17 @@ static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
1184 1184
1185 return 0; 1185 return 0;
1186} 1186}
1187
1188static int tcp_nlattr_size(void)
1189{
1190 return nla_total_size(0) /* CTA_PROTOINFO_TCP */
1191 + nla_policy_len(tcp_nla_policy, CTA_PROTOINFO_TCP_MAX + 1);
1192}
1193
1194static int tcp_nlattr_tuple_size(void)
1195{
1196 return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
1197}
1187#endif 1198#endif
1188 1199
1189#ifdef CONFIG_SYSCTL 1200#ifdef CONFIG_SYSCTL
@@ -1399,9 +1410,11 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly =
1399 .error = tcp_error, 1410 .error = tcp_error,
1400#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 1411#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
1401 .to_nlattr = tcp_to_nlattr, 1412 .to_nlattr = tcp_to_nlattr,
1413 .nlattr_size = tcp_nlattr_size,
1402 .from_nlattr = nlattr_to_tcp, 1414 .from_nlattr = nlattr_to_tcp,
1403 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, 1415 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
1404 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 1416 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
1417 .nlattr_tuple_size = tcp_nlattr_tuple_size,
1405 .nla_policy = nf_ct_port_nla_policy, 1418 .nla_policy = nf_ct_port_nla_policy,
1406#endif 1419#endif
1407#ifdef CONFIG_SYSCTL 1420#ifdef CONFIG_SYSCTL
@@ -1429,9 +1442,11 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 __read_mostly =
1429 .error = tcp_error, 1442 .error = tcp_error,
1430#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 1443#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
1431 .to_nlattr = tcp_to_nlattr, 1444 .to_nlattr = tcp_to_nlattr,
1445 .nlattr_size = tcp_nlattr_size,
1432 .from_nlattr = nlattr_to_tcp, 1446 .from_nlattr = nlattr_to_tcp,
1433 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, 1447 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
1434 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 1448 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
1449 .nlattr_tuple_size = tcp_nlattr_tuple_size,
1435 .nla_policy = nf_ct_port_nla_policy, 1450 .nla_policy = nf_ct_port_nla_policy,
1436#endif 1451#endif
1437#ifdef CONFIG_SYSCTL 1452#ifdef CONFIG_SYSCTL
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index d4021179e24e..70809d117b91 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -195,6 +195,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly =
195#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 195#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
196 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, 196 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
197 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 197 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
198 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
198 .nla_policy = nf_ct_port_nla_policy, 199 .nla_policy = nf_ct_port_nla_policy,
199#endif 200#endif
200#ifdef CONFIG_SYSCTL 201#ifdef CONFIG_SYSCTL
@@ -222,6 +223,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly =
222#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 223#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
223 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, 224 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
224 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 225 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
226 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
225 .nla_policy = nf_ct_port_nla_policy, 227 .nla_policy = nf_ct_port_nla_policy,
226#endif 228#endif
227#ifdef CONFIG_SYSCTL 229#ifdef CONFIG_SYSCTL
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c
index 4579d8de13b1..4614696c1b88 100644
--- a/net/netfilter/nf_conntrack_proto_udplite.c
+++ b/net/netfilter/nf_conntrack_proto_udplite.c
@@ -180,6 +180,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly =
180 .error = udplite_error, 180 .error = udplite_error,
181#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 181#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
182 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, 182 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
183 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
183 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 184 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
184 .nla_policy = nf_ct_port_nla_policy, 185 .nla_policy = nf_ct_port_nla_policy,
185#endif 186#endif
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 4da54b0b9233..193515381970 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -44,40 +44,42 @@ struct ct_iter_state {
44 unsigned int bucket; 44 unsigned int bucket;
45}; 45};
46 46
47static struct hlist_node *ct_get_first(struct seq_file *seq) 47static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
48{ 48{
49 struct net *net = seq_file_net(seq); 49 struct net *net = seq_file_net(seq);
50 struct ct_iter_state *st = seq->private; 50 struct ct_iter_state *st = seq->private;
51 struct hlist_node *n; 51 struct hlist_nulls_node *n;
52 52
53 for (st->bucket = 0; 53 for (st->bucket = 0;
54 st->bucket < nf_conntrack_htable_size; 54 st->bucket < nf_conntrack_htable_size;
55 st->bucket++) { 55 st->bucket++) {
56 n = rcu_dereference(net->ct.hash[st->bucket].first); 56 n = rcu_dereference(net->ct.hash[st->bucket].first);
57 if (n) 57 if (!is_a_nulls(n))
58 return n; 58 return n;
59 } 59 }
60 return NULL; 60 return NULL;
61} 61}
62 62
63static struct hlist_node *ct_get_next(struct seq_file *seq, 63static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
64 struct hlist_node *head) 64 struct hlist_nulls_node *head)
65{ 65{
66 struct net *net = seq_file_net(seq); 66 struct net *net = seq_file_net(seq);
67 struct ct_iter_state *st = seq->private; 67 struct ct_iter_state *st = seq->private;
68 68
69 head = rcu_dereference(head->next); 69 head = rcu_dereference(head->next);
70 while (head == NULL) { 70 while (is_a_nulls(head)) {
71 if (++st->bucket >= nf_conntrack_htable_size) 71 if (likely(get_nulls_value(head) == st->bucket)) {
72 return NULL; 72 if (++st->bucket >= nf_conntrack_htable_size)
73 return NULL;
74 }
73 head = rcu_dereference(net->ct.hash[st->bucket].first); 75 head = rcu_dereference(net->ct.hash[st->bucket].first);
74 } 76 }
75 return head; 77 return head;
76} 78}
77 79
78static struct hlist_node *ct_get_idx(struct seq_file *seq, loff_t pos) 80static struct hlist_nulls_node *ct_get_idx(struct seq_file *seq, loff_t pos)
79{ 81{
80 struct hlist_node *head = ct_get_first(seq); 82 struct hlist_nulls_node *head = ct_get_first(seq);
81 83
82 if (head) 84 if (head)
83 while (pos && (head = ct_get_next(seq, head))) 85 while (pos && (head = ct_get_next(seq, head)))
@@ -107,67 +109,74 @@ static void ct_seq_stop(struct seq_file *s, void *v)
107/* return 0 on success, 1 in case of error */ 109/* return 0 on success, 1 in case of error */
108static int ct_seq_show(struct seq_file *s, void *v) 110static int ct_seq_show(struct seq_file *s, void *v)
109{ 111{
110 const struct nf_conntrack_tuple_hash *hash = v; 112 struct nf_conntrack_tuple_hash *hash = v;
111 const struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash); 113 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash);
112 const struct nf_conntrack_l3proto *l3proto; 114 const struct nf_conntrack_l3proto *l3proto;
113 const struct nf_conntrack_l4proto *l4proto; 115 const struct nf_conntrack_l4proto *l4proto;
116 int ret = 0;
114 117
115 NF_CT_ASSERT(ct); 118 NF_CT_ASSERT(ct);
119 if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
120 return 0;
116 121
117 /* we only want to print DIR_ORIGINAL */ 122 /* we only want to print DIR_ORIGINAL */
118 if (NF_CT_DIRECTION(hash)) 123 if (NF_CT_DIRECTION(hash))
119 return 0; 124 goto release;
120 125
121 l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct)); 126 l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
122 NF_CT_ASSERT(l3proto); 127 NF_CT_ASSERT(l3proto);
123 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); 128 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
124 NF_CT_ASSERT(l4proto); 129 NF_CT_ASSERT(l4proto);
125 130
131 ret = -ENOSPC;
126 if (seq_printf(s, "%-8s %u %-8s %u %ld ", 132 if (seq_printf(s, "%-8s %u %-8s %u %ld ",
127 l3proto->name, nf_ct_l3num(ct), 133 l3proto->name, nf_ct_l3num(ct),
128 l4proto->name, nf_ct_protonum(ct), 134 l4proto->name, nf_ct_protonum(ct),
129 timer_pending(&ct->timeout) 135 timer_pending(&ct->timeout)
130 ? (long)(ct->timeout.expires - jiffies)/HZ : 0) != 0) 136 ? (long)(ct->timeout.expires - jiffies)/HZ : 0) != 0)
131 return -ENOSPC; 137 goto release;
132 138
133 if (l4proto->print_conntrack && l4proto->print_conntrack(s, ct)) 139 if (l4proto->print_conntrack && l4proto->print_conntrack(s, ct))
134 return -ENOSPC; 140 goto release;
135 141
136 if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, 142 if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
137 l3proto, l4proto)) 143 l3proto, l4proto))
138 return -ENOSPC; 144 goto release;
139 145
140 if (seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL)) 146 if (seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL))
141 return -ENOSPC; 147 goto release;
142 148
143 if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status))) 149 if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status)))
144 if (seq_printf(s, "[UNREPLIED] ")) 150 if (seq_printf(s, "[UNREPLIED] "))
145 return -ENOSPC; 151 goto release;
146 152
147 if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, 153 if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
148 l3proto, l4proto)) 154 l3proto, l4proto))
149 return -ENOSPC; 155 goto release;
150 156
151 if (seq_print_acct(s, ct, IP_CT_DIR_REPLY)) 157 if (seq_print_acct(s, ct, IP_CT_DIR_REPLY))
152 return -ENOSPC; 158 goto release;
153 159
154 if (test_bit(IPS_ASSURED_BIT, &ct->status)) 160 if (test_bit(IPS_ASSURED_BIT, &ct->status))
155 if (seq_printf(s, "[ASSURED] ")) 161 if (seq_printf(s, "[ASSURED] "))
156 return -ENOSPC; 162 goto release;
157 163
158#if defined(CONFIG_NF_CONNTRACK_MARK) 164#if defined(CONFIG_NF_CONNTRACK_MARK)
159 if (seq_printf(s, "mark=%u ", ct->mark)) 165 if (seq_printf(s, "mark=%u ", ct->mark))
160 return -ENOSPC; 166 goto release;
161#endif 167#endif
162 168
163#ifdef CONFIG_NF_CONNTRACK_SECMARK 169#ifdef CONFIG_NF_CONNTRACK_SECMARK
164 if (seq_printf(s, "secmark=%u ", ct->secmark)) 170 if (seq_printf(s, "secmark=%u ", ct->secmark))
165 return -ENOSPC; 171 goto release;
166#endif 172#endif
167 173
168 if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use))) 174 if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
169 return -ENOSPC; 175 goto release;
170 176
177 ret = 0;
178release:
179 nf_ct_put(ct);
171 return 0; 180 return 0;
172} 181}
173 182
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index 7f404cc64c83..680980954395 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -108,7 +108,7 @@ static int count_them(struct xt_connlimit_data *data,
108 const struct nf_conntrack_tuple_hash *found; 108 const struct nf_conntrack_tuple_hash *found;
109 struct xt_connlimit_conn *conn; 109 struct xt_connlimit_conn *conn;
110 struct xt_connlimit_conn *tmp; 110 struct xt_connlimit_conn *tmp;
111 const struct nf_conn *found_ct; 111 struct nf_conn *found_ct;
112 struct list_head *hash; 112 struct list_head *hash;
113 bool addit = true; 113 bool addit = true;
114 int matches = 0; 114 int matches = 0;
@@ -123,7 +123,7 @@ static int count_them(struct xt_connlimit_data *data,
123 123
124 /* check the saved connections */ 124 /* check the saved connections */
125 list_for_each_entry_safe(conn, tmp, hash, list) { 125 list_for_each_entry_safe(conn, tmp, hash, list) {
126 found = __nf_conntrack_find(&init_net, &conn->tuple); 126 found = nf_conntrack_find_get(&init_net, &conn->tuple);
127 found_ct = NULL; 127 found_ct = NULL;
128 128
129 if (found != NULL) 129 if (found != NULL)
@@ -151,6 +151,7 @@ static int count_them(struct xt_connlimit_data *data,
151 * we do not care about connections which are 151 * we do not care about connections which are
152 * closed already -> ditch it 152 * closed already -> ditch it
153 */ 153 */
154 nf_ct_put(found_ct);
154 list_del(&conn->list); 155 list_del(&conn->list);
155 kfree(conn); 156 kfree(conn);
156 continue; 157 continue;
@@ -160,6 +161,7 @@ static int count_them(struct xt_connlimit_data *data,
160 match->family)) 161 match->family))
161 /* same source network -> be counted! */ 162 /* same source network -> be counted! */
162 ++matches; 163 ++matches;
164 nf_ct_put(found_ct);
163 } 165 }
164 166
165 rcu_read_unlock(); 167 rcu_read_unlock();
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
index 44a234ef4439..8d28ca5848bc 100644
--- a/net/netfilter/xt_physdev.c
+++ b/net/netfilter/xt_physdev.c
@@ -20,23 +20,6 @@ MODULE_DESCRIPTION("Xtables: Bridge physical device match");
20MODULE_ALIAS("ipt_physdev"); 20MODULE_ALIAS("ipt_physdev");
21MODULE_ALIAS("ip6t_physdev"); 21MODULE_ALIAS("ip6t_physdev");
22 22
23static unsigned long ifname_compare(const char *_a, const char *_b, const char *_mask)
24{
25 const unsigned long *a = (const unsigned long *)_a;
26 const unsigned long *b = (const unsigned long *)_b;
27 const unsigned long *mask = (const unsigned long *)_mask;
28 unsigned long ret;
29
30 ret = (a[0] ^ b[0]) & mask[0];
31 if (IFNAMSIZ > sizeof(unsigned long))
32 ret |= (a[1] ^ b[1]) & mask[1];
33 if (IFNAMSIZ > 2 * sizeof(unsigned long))
34 ret |= (a[2] ^ b[2]) & mask[2];
35 if (IFNAMSIZ > 3 * sizeof(unsigned long))
36 ret |= (a[3] ^ b[3]) & mask[3];
37 BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
38 return ret;
39}
40 23
41static bool 24static bool
42physdev_mt(const struct sk_buff *skb, const struct xt_match_param *par) 25physdev_mt(const struct sk_buff *skb, const struct xt_match_param *par)
@@ -85,7 +68,7 @@ physdev_mt(const struct sk_buff *skb, const struct xt_match_param *par)
85 if (!(info->bitmask & XT_PHYSDEV_OP_IN)) 68 if (!(info->bitmask & XT_PHYSDEV_OP_IN))
86 goto match_outdev; 69 goto match_outdev;
87 indev = nf_bridge->physindev ? nf_bridge->physindev->name : nulldevname; 70 indev = nf_bridge->physindev ? nf_bridge->physindev->name : nulldevname;
88 ret = ifname_compare(indev, info->physindev, info->in_mask); 71 ret = ifname_compare_aligned(indev, info->physindev, info->in_mask);
89 72
90 if (!ret ^ !(info->invert & XT_PHYSDEV_OP_IN)) 73 if (!ret ^ !(info->invert & XT_PHYSDEV_OP_IN))
91 return false; 74 return false;
@@ -95,7 +78,7 @@ match_outdev:
95 return true; 78 return true;
96 outdev = nf_bridge->physoutdev ? 79 outdev = nf_bridge->physoutdev ?
97 nf_bridge->physoutdev->name : nulldevname; 80 nf_bridge->physoutdev->name : nulldevname;
98 ret = ifname_compare(outdev, info->physoutdev, info->out_mask); 81 ret = ifname_compare_aligned(outdev, info->physoutdev, info->out_mask);
99 82
100 return (!!ret ^ !(info->invert & XT_PHYSDEV_OP_OUT)); 83 return (!!ret ^ !(info->invert & XT_PHYSDEV_OP_OUT));
101} 84}
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 6d9c58ec56ac..4e705f87969f 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1037,10 +1037,6 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
1037 unsigned char *asmptr; 1037 unsigned char *asmptr;
1038 int size; 1038 int size;
1039 1039
1040 /* Netrom empty data frame has no meaning : don't send */
1041 if (len == 0)
1042 return 0;
1043
1044 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT)) 1040 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
1045 return -EINVAL; 1041 return -EINVAL;
1046 1042
@@ -1086,7 +1082,11 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
1086 1082
1087 SOCK_DEBUG(sk, "NET/ROM: sendto: Addresses built.\n"); 1083 SOCK_DEBUG(sk, "NET/ROM: sendto: Addresses built.\n");
1088 1084
1089 /* Build a packet */ 1085 /* Build a packet - the conventional user limit is 236 bytes. We can
1086 do ludicrously large NetROM frames but must not overflow */
1087 if (len > 65536)
1088 return -EMSGSIZE;
1089
1090 SOCK_DEBUG(sk, "NET/ROM: sendto: building packet.\n"); 1090 SOCK_DEBUG(sk, "NET/ROM: sendto: building packet.\n");
1091 size = len + NR_NETWORK_LEN + NR_TRANSPORT_LEN; 1091 size = len + NR_NETWORK_LEN + NR_TRANSPORT_LEN;
1092 1092
@@ -1171,11 +1171,6 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
1171 skb_reset_transport_header(skb); 1171 skb_reset_transport_header(skb);
1172 copied = skb->len; 1172 copied = skb->len;
1173 1173
1174 /* NetRom empty data frame has no meaning : ignore it */
1175 if (copied == 0) {
1176 goto out;
1177 }
1178
1179 if (copied > size) { 1174 if (copied > size) {
1180 copied = size; 1175 copied = size;
1181 msg->msg_flags |= MSG_TRUNC; 1176 msg->msg_flags |= MSG_TRUNC;
@@ -1191,7 +1186,7 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
1191 1186
1192 msg->msg_namelen = sizeof(*sax); 1187 msg->msg_namelen = sizeof(*sax);
1193 1188
1194out: skb_free_datagram(sk, skb); 1189 skb_free_datagram(sk, skb);
1195 1190
1196 release_sock(sk); 1191 release_sock(sk);
1197 return copied; 1192 return copied;
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 650139626581..0f36e8d59b29 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1124,6 +1124,10 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
1124 1124
1125 /* Build a packet */ 1125 /* Build a packet */
1126 SOCK_DEBUG(sk, "ROSE: sendto: building packet.\n"); 1126 SOCK_DEBUG(sk, "ROSE: sendto: building packet.\n");
1127 /* Sanity check the packet size */
1128 if (len > 65535)
1129 return -EMSGSIZE;
1130
1127 size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN; 1131 size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN;
1128 1132
1129 if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL) 1133 if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
diff --git a/net/socket.c b/net/socket.c
index af0205ff56f2..0b14b79c03af 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -328,7 +328,7 @@ static char *sockfs_dname(struct dentry *dentry, char *buffer, int buflen)
328 dentry->d_inode->i_ino); 328 dentry->d_inode->i_ino);
329} 329}
330 330
331static struct dentry_operations sockfs_dentry_operations = { 331static const struct dentry_operations sockfs_dentry_operations = {
332 .d_delete = sockfs_delete_dentry, 332 .d_delete = sockfs_delete_dentry,
333 .d_dname = sockfs_dname, 333 .d_dname = sockfs_dname,
334}; 334};
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 577385a4a5dc..9ced0628d69c 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -480,7 +480,7 @@ static int rpc_delete_dentry(struct dentry *dentry)
480 return 1; 480 return 1;
481} 481}
482 482
483static struct dentry_operations rpc_dentry_operations = { 483static const struct dentry_operations rpc_dentry_operations = {
484 .d_delete = rpc_delete_dentry, 484 .d_delete = rpc_delete_dentry,
485}; 485};
486 486
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 092ae6faccca..3c3bc9e579ed 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -10,51 +10,19 @@ config CFG80211_REG_DEBUG
10 10
11 If unsure, say N. 11 If unsure, say N.
12 12
13config NL80211
14 bool "nl80211 new netlink interface support"
15 depends on CFG80211
16 default y
17 ---help---
18 This option turns on the new netlink interface
19 (nl80211) support in cfg80211.
20
21 If =n, drivers using mac80211 will be configured via
22 wireless extension support provided by that subsystem.
23
24 If unsure, say Y.
25
26config WIRELESS_OLD_REGULATORY 13config WIRELESS_OLD_REGULATORY
27 bool "Old wireless static regulatory definitions" 14 bool "Old wireless static regulatory definitions"
28 default y 15 default n
29 ---help--- 16 ---help---
30 This option enables the old static regulatory information 17 This option enables the old static regulatory information
31 and uses it within the new framework. This is available 18 and uses it within the new framework. This option is available
32 temporarily as an option to help prevent immediate issues 19 for historical reasons and it is advised to leave it off.
33 due to the switch to the new regulatory framework which 20
34 does require a new userspace application which has the 21 For details see:
35 database of regulatory information (CRDA) and another for 22
36 setting regulatory domains (iw). 23 http://wireless.kernel.org/en/developers/Regulatory
37 24
38 For more information see: 25 Say N and if you say Y, please tell us why. The default is N.
39
40 http://wireless.kernel.org/en/developers/Regulatory/CRDA
41 http://wireless.kernel.org/en/users/Documentation/iw
42
43 It is important to note though that if you *do* have CRDA present
44 and if this option is enabled CRDA *will* be called to update the
45 regulatory domain (for US and JP only). Support for letting the user
46 set the regulatory domain through iw is also supported. This option
47 mainly exists to leave around for a kernel release some old static
48 regulatory domains that were defined and to keep around the old
49 ieee80211_regdom module parameter. This is being phased out and you
50 should stop using them ASAP.
51
52 Note: You will need CRDA if you want 802.11d support
53
54 Say Y unless you have installed a new userspace application.
55 Also say Y if have one currently depending on the ieee80211_regdom
56 module parameter and cannot port it to use the new userspace
57 interfaces.
58 26
59config WIRELESS_EXT 27config WIRELESS_EXT
60 bool "Wireless extensions" 28 bool "Wireless extensions"
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index dad43c24f695..6d1e7b27b752 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -5,8 +5,7 @@ obj-$(CONFIG_LIB80211_CRYPT_WEP) += lib80211_crypt_wep.o
5obj-$(CONFIG_LIB80211_CRYPT_CCMP) += lib80211_crypt_ccmp.o 5obj-$(CONFIG_LIB80211_CRYPT_CCMP) += lib80211_crypt_ccmp.o
6obj-$(CONFIG_LIB80211_CRYPT_TKIP) += lib80211_crypt_tkip.o 6obj-$(CONFIG_LIB80211_CRYPT_TKIP) += lib80211_crypt_tkip.o
7 7
8cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o 8cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o mlme.o
9cfg80211-$(CONFIG_WIRELESS_EXT) += wext-compat.o 9cfg80211-$(CONFIG_WIRELESS_EXT) += wext-compat.o
10cfg80211-$(CONFIG_NL80211) += nl80211.o
11 10
12ccflags-y += -D__CHECK_ENDIAN__ 11ccflags-y += -D__CHECK_ENDIAN__
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 17fe39049740..d1f556535f6d 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -87,7 +87,7 @@ struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx)
87} 87}
88 88
89/* requires cfg80211_mutex to be held! */ 89/* requires cfg80211_mutex to be held! */
90static struct cfg80211_registered_device * 90struct cfg80211_registered_device *
91__cfg80211_drv_from_info(struct genl_info *info) 91__cfg80211_drv_from_info(struct genl_info *info)
92{ 92{
93 int ifindex; 93 int ifindex;
@@ -176,13 +176,14 @@ void cfg80211_put_dev(struct cfg80211_registered_device *drv)
176 mutex_unlock(&drv->mtx); 176 mutex_unlock(&drv->mtx);
177} 177}
178 178
179/* requires cfg80211_mutex to be held */
179int cfg80211_dev_rename(struct cfg80211_registered_device *rdev, 180int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
180 char *newname) 181 char *newname)
181{ 182{
182 struct cfg80211_registered_device *drv; 183 struct cfg80211_registered_device *drv;
183 int wiphy_idx, taken = -1, result, digits; 184 int wiphy_idx, taken = -1, result, digits;
184 185
185 mutex_lock(&cfg80211_mutex); 186 assert_cfg80211_lock();
186 187
187 /* prohibit calling the thing phy%d when %d is not its number */ 188 /* prohibit calling the thing phy%d when %d is not its number */
188 sscanf(newname, PHY_NAME "%d%n", &wiphy_idx, &taken); 189 sscanf(newname, PHY_NAME "%d%n", &wiphy_idx, &taken);
@@ -195,30 +196,23 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
195 * deny the name if it is phy<idx> where <idx> is printed 196 * deny the name if it is phy<idx> where <idx> is printed
196 * without leading zeroes. taken == strlen(newname) here 197 * without leading zeroes. taken == strlen(newname) here
197 */ 198 */
198 result = -EINVAL;
199 if (taken == strlen(PHY_NAME) + digits) 199 if (taken == strlen(PHY_NAME) + digits)
200 goto out_unlock; 200 return -EINVAL;
201 } 201 }
202 202
203 203
204 /* Ignore nop renames */ 204 /* Ignore nop renames */
205 result = 0;
206 if (strcmp(newname, dev_name(&rdev->wiphy.dev)) == 0) 205 if (strcmp(newname, dev_name(&rdev->wiphy.dev)) == 0)
207 goto out_unlock; 206 return 0;
208 207
209 /* Ensure another device does not already have this name. */ 208 /* Ensure another device does not already have this name. */
210 list_for_each_entry(drv, &cfg80211_drv_list, list) { 209 list_for_each_entry(drv, &cfg80211_drv_list, list)
211 result = -EINVAL;
212 if (strcmp(newname, dev_name(&drv->wiphy.dev)) == 0) 210 if (strcmp(newname, dev_name(&drv->wiphy.dev)) == 0)
213 goto out_unlock; 211 return -EINVAL;
214 }
215 212
216 /* this will only check for collisions in sysfs
217 * which is not even always compiled in.
218 */
219 result = device_rename(&rdev->wiphy.dev, newname); 213 result = device_rename(&rdev->wiphy.dev, newname);
220 if (result) 214 if (result)
221 goto out_unlock; 215 return result;
222 216
223 if (rdev->wiphy.debugfsdir && 217 if (rdev->wiphy.debugfsdir &&
224 !debugfs_rename(rdev->wiphy.debugfsdir->d_parent, 218 !debugfs_rename(rdev->wiphy.debugfsdir->d_parent,
@@ -228,13 +222,9 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
228 printk(KERN_ERR "cfg80211: failed to rename debugfs dir to %s!\n", 222 printk(KERN_ERR "cfg80211: failed to rename debugfs dir to %s!\n",
229 newname); 223 newname);
230 224
231 result = 0; 225 nl80211_notify_dev_rename(rdev);
232out_unlock:
233 mutex_unlock(&cfg80211_mutex);
234 if (result == 0)
235 nl80211_notify_dev_rename(rdev);
236 226
237 return result; 227 return 0;
238} 228}
239 229
240/* exported functions */ 230/* exported functions */
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 6acd483a61f8..d43daa236ef9 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -90,6 +90,8 @@ struct cfg80211_internal_bss {
90 struct rb_node rbn; 90 struct rb_node rbn;
91 unsigned long ts; 91 unsigned long ts;
92 struct kref ref; 92 struct kref ref;
93 bool hold;
94
93 /* must be last because of priv member */ 95 /* must be last because of priv member */
94 struct cfg80211_bss pub; 96 struct cfg80211_bss pub;
95}; 97};
@@ -97,6 +99,9 @@ struct cfg80211_internal_bss {
97struct cfg80211_registered_device *cfg80211_drv_by_wiphy_idx(int wiphy_idx); 99struct cfg80211_registered_device *cfg80211_drv_by_wiphy_idx(int wiphy_idx);
98int get_wiphy_idx(struct wiphy *wiphy); 100int get_wiphy_idx(struct wiphy *wiphy);
99 101
102struct cfg80211_registered_device *
103__cfg80211_drv_from_info(struct genl_info *info);
104
100/* 105/*
101 * This function returns a pointer to the driver 106 * This function returns a pointer to the driver
102 * that the genl_info item that is passed refers to. 107 * that the genl_info item that is passed refers to.
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
new file mode 100644
index 000000000000..bec5721b6f99
--- /dev/null
+++ b/net/wireless/mlme.c
@@ -0,0 +1,46 @@
1/*
2 * cfg80211 MLME SAP interface
3 *
4 * Copyright (c) 2009, Jouni Malinen <j@w1.fi>
5 */
6
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/netdevice.h>
10#include <linux/nl80211.h>
11#include <net/cfg80211.h>
12#include "core.h"
13#include "nl80211.h"
14
15void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len)
16{
17 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
18 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
19 nl80211_send_rx_auth(rdev, dev, buf, len);
20}
21EXPORT_SYMBOL(cfg80211_send_rx_auth);
22
23void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len)
24{
25 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
26 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
27 nl80211_send_rx_assoc(rdev, dev, buf, len);
28}
29EXPORT_SYMBOL(cfg80211_send_rx_assoc);
30
31void cfg80211_send_rx_deauth(struct net_device *dev, const u8 *buf, size_t len)
32{
33 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
34 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
35 nl80211_send_rx_deauth(rdev, dev, buf, len);
36}
37EXPORT_SYMBOL(cfg80211_send_rx_deauth);
38
39void cfg80211_send_rx_disassoc(struct net_device *dev, const u8 *buf,
40 size_t len)
41{
42 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
43 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
44 nl80211_send_rx_disassoc(rdev, dev, buf, len);
45}
46EXPORT_SYMBOL(cfg80211_send_rx_disassoc);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index ab9d8f14e151..353e1a4ece83 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -111,6 +111,11 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = {
111 .len = IEEE80211_MAX_DATA_LEN }, 111 .len = IEEE80211_MAX_DATA_LEN },
112 [NL80211_ATTR_SCAN_FREQUENCIES] = { .type = NLA_NESTED }, 112 [NL80211_ATTR_SCAN_FREQUENCIES] = { .type = NLA_NESTED },
113 [NL80211_ATTR_SCAN_SSIDS] = { .type = NLA_NESTED }, 113 [NL80211_ATTR_SCAN_SSIDS] = { .type = NLA_NESTED },
114
115 [NL80211_ATTR_SSID] = { .type = NLA_BINARY,
116 .len = IEEE80211_MAX_SSID_LEN },
117 [NL80211_ATTR_AUTH_TYPE] = { .type = NLA_U32 },
118 [NL80211_ATTR_REASON_CODE] = { .type = NLA_U16 },
114}; 119};
115 120
116/* message building helper */ 121/* message building helper */
@@ -131,6 +136,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
131 struct nlattr *nl_freqs, *nl_freq; 136 struct nlattr *nl_freqs, *nl_freq;
132 struct nlattr *nl_rates, *nl_rate; 137 struct nlattr *nl_rates, *nl_rate;
133 struct nlattr *nl_modes; 138 struct nlattr *nl_modes;
139 struct nlattr *nl_cmds;
134 enum ieee80211_band band; 140 enum ieee80211_band band;
135 struct ieee80211_channel *chan; 141 struct ieee80211_channel *chan;
136 struct ieee80211_rate *rate; 142 struct ieee80211_rate *rate;
@@ -242,6 +248,35 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
242 } 248 }
243 nla_nest_end(msg, nl_bands); 249 nla_nest_end(msg, nl_bands);
244 250
251 nl_cmds = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_COMMANDS);
252 if (!nl_cmds)
253 goto nla_put_failure;
254
255 i = 0;
256#define CMD(op, n) \
257 do { \
258 if (dev->ops->op) { \
259 i++; \
260 NLA_PUT_U32(msg, i, NL80211_CMD_ ## n); \
261 } \
262 } while (0)
263
264 CMD(add_virtual_intf, NEW_INTERFACE);
265 CMD(change_virtual_intf, SET_INTERFACE);
266 CMD(add_key, NEW_KEY);
267 CMD(add_beacon, NEW_BEACON);
268 CMD(add_station, NEW_STATION);
269 CMD(add_mpath, NEW_MPATH);
270 CMD(set_mesh_params, SET_MESH_PARAMS);
271 CMD(change_bss, SET_BSS);
272 CMD(auth, AUTHENTICATE);
273 CMD(assoc, ASSOCIATE);
274 CMD(deauth, DEAUTHENTICATE);
275 CMD(disassoc, DISASSOCIATE);
276
277#undef CMD
278 nla_nest_end(msg, nl_cmds);
279
245 return genlmsg_end(msg, hdr); 280 return genlmsg_end(msg, hdr);
246 281
247 nla_put_failure: 282 nla_put_failure:
@@ -331,16 +366,26 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
331 int result = 0, rem_txq_params = 0; 366 int result = 0, rem_txq_params = 0;
332 struct nlattr *nl_txq_params; 367 struct nlattr *nl_txq_params;
333 368
334 rdev = cfg80211_get_dev_from_info(info); 369 rtnl_lock();
335 if (IS_ERR(rdev)) 370
336 return PTR_ERR(rdev); 371 mutex_lock(&cfg80211_mutex);
337 372
338 if (info->attrs[NL80211_ATTR_WIPHY_NAME]) { 373 rdev = __cfg80211_drv_from_info(info);
374 if (IS_ERR(rdev)) {
375 result = PTR_ERR(rdev);
376 goto unlock;
377 }
378
379 mutex_lock(&rdev->mtx);
380
381 if (info->attrs[NL80211_ATTR_WIPHY_NAME])
339 result = cfg80211_dev_rename( 382 result = cfg80211_dev_rename(
340 rdev, nla_data(info->attrs[NL80211_ATTR_WIPHY_NAME])); 383 rdev, nla_data(info->attrs[NL80211_ATTR_WIPHY_NAME]));
341 if (result) 384
342 goto bad_res; 385 mutex_unlock(&cfg80211_mutex);
343 } 386
387 if (result)
388 goto bad_res;
344 389
345 if (info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS]) { 390 if (info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS]) {
346 struct ieee80211_txq_params txq_params; 391 struct ieee80211_txq_params txq_params;
@@ -436,7 +481,9 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
436 481
437 482
438 bad_res: 483 bad_res:
439 cfg80211_put_dev(rdev); 484 mutex_unlock(&rdev->mtx);
485 unlock:
486 rtnl_unlock();
440 return result; 487 return result;
441} 488}
442 489
@@ -572,21 +619,31 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
572 enum nl80211_iftype type; 619 enum nl80211_iftype type;
573 struct net_device *dev; 620 struct net_device *dev;
574 u32 _flags, *flags = NULL; 621 u32 _flags, *flags = NULL;
622 bool change = false;
575 623
576 memset(&params, 0, sizeof(params)); 624 memset(&params, 0, sizeof(params));
577 625
626 rtnl_lock();
627
578 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); 628 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
579 if (err) 629 if (err)
580 return err; 630 goto unlock_rtnl;
631
581 ifindex = dev->ifindex; 632 ifindex = dev->ifindex;
582 type = dev->ieee80211_ptr->iftype; 633 type = dev->ieee80211_ptr->iftype;
583 dev_put(dev); 634 dev_put(dev);
584 635
585 err = -EINVAL;
586 if (info->attrs[NL80211_ATTR_IFTYPE]) { 636 if (info->attrs[NL80211_ATTR_IFTYPE]) {
587 type = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]); 637 enum nl80211_iftype ntype;
588 if (type > NL80211_IFTYPE_MAX) 638
639 ntype = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]);
640 if (type != ntype)
641 change = true;
642 type = ntype;
643 if (type > NL80211_IFTYPE_MAX) {
644 err = -EINVAL;
589 goto unlock; 645 goto unlock;
646 }
590 } 647 }
591 648
592 if (!drv->ops->change_virtual_intf || 649 if (!drv->ops->change_virtual_intf ||
@@ -602,6 +659,7 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
602 } 659 }
603 params.mesh_id = nla_data(info->attrs[NL80211_ATTR_MESH_ID]); 660 params.mesh_id = nla_data(info->attrs[NL80211_ATTR_MESH_ID]);
604 params.mesh_id_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]); 661 params.mesh_id_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]);
662 change = true;
605 } 663 }
606 664
607 if (info->attrs[NL80211_ATTR_MNTR_FLAGS]) { 665 if (info->attrs[NL80211_ATTR_MNTR_FLAGS]) {
@@ -611,20 +669,26 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
611 } 669 }
612 err = parse_monitor_flags(info->attrs[NL80211_ATTR_MNTR_FLAGS], 670 err = parse_monitor_flags(info->attrs[NL80211_ATTR_MNTR_FLAGS],
613 &_flags); 671 &_flags);
614 if (!err) 672 if (err)
615 flags = &_flags; 673 goto unlock;
674
675 flags = &_flags;
676 change = true;
616 } 677 }
617 rtnl_lock(); 678
618 err = drv->ops->change_virtual_intf(&drv->wiphy, ifindex, 679 if (change)
619 type, flags, &params); 680 err = drv->ops->change_virtual_intf(&drv->wiphy, ifindex,
681 type, flags, &params);
682 else
683 err = 0;
620 684
621 dev = __dev_get_by_index(&init_net, ifindex); 685 dev = __dev_get_by_index(&init_net, ifindex);
622 WARN_ON(!dev || (!err && dev->ieee80211_ptr->iftype != type)); 686 WARN_ON(!dev || (!err && dev->ieee80211_ptr->iftype != type));
623 687
624 rtnl_unlock();
625
626 unlock: 688 unlock:
627 cfg80211_put_dev(drv); 689 cfg80211_put_dev(drv);
690 unlock_rtnl:
691 rtnl_unlock();
628 return err; 692 return err;
629} 693}
630 694
@@ -647,9 +711,13 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
647 return -EINVAL; 711 return -EINVAL;
648 } 712 }
649 713
714 rtnl_lock();
715
650 drv = cfg80211_get_dev_from_info(info); 716 drv = cfg80211_get_dev_from_info(info);
651 if (IS_ERR(drv)) 717 if (IS_ERR(drv)) {
652 return PTR_ERR(drv); 718 err = PTR_ERR(drv);
719 goto unlock_rtnl;
720 }
653 721
654 if (!drv->ops->add_virtual_intf || 722 if (!drv->ops->add_virtual_intf ||
655 !(drv->wiphy.interface_modes & (1 << type))) { 723 !(drv->wiphy.interface_modes & (1 << type))) {
@@ -663,18 +731,17 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
663 params.mesh_id_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]); 731 params.mesh_id_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]);
664 } 732 }
665 733
666 rtnl_lock();
667 err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ? 734 err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ?
668 info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL, 735 info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL,
669 &flags); 736 &flags);
670 err = drv->ops->add_virtual_intf(&drv->wiphy, 737 err = drv->ops->add_virtual_intf(&drv->wiphy,
671 nla_data(info->attrs[NL80211_ATTR_IFNAME]), 738 nla_data(info->attrs[NL80211_ATTR_IFNAME]),
672 type, err ? NULL : &flags, &params); 739 type, err ? NULL : &flags, &params);
673 rtnl_unlock();
674
675 740
676 unlock: 741 unlock:
677 cfg80211_put_dev(drv); 742 cfg80211_put_dev(drv);
743 unlock_rtnl:
744 rtnl_unlock();
678 return err; 745 return err;
679} 746}
680 747
@@ -684,9 +751,11 @@ static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info)
684 int ifindex, err; 751 int ifindex, err;
685 struct net_device *dev; 752 struct net_device *dev;
686 753
754 rtnl_lock();
755
687 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); 756 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
688 if (err) 757 if (err)
689 return err; 758 goto unlock_rtnl;
690 ifindex = dev->ifindex; 759 ifindex = dev->ifindex;
691 dev_put(dev); 760 dev_put(dev);
692 761
@@ -695,12 +764,12 @@ static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info)
695 goto out; 764 goto out;
696 } 765 }
697 766
698 rtnl_lock();
699 err = drv->ops->del_virtual_intf(&drv->wiphy, ifindex); 767 err = drv->ops->del_virtual_intf(&drv->wiphy, ifindex);
700 rtnl_unlock();
701 768
702 out: 769 out:
703 cfg80211_put_dev(drv); 770 cfg80211_put_dev(drv);
771 unlock_rtnl:
772 rtnl_unlock();
704 return err; 773 return err;
705} 774}
706 775
@@ -752,9 +821,11 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
752 if (info->attrs[NL80211_ATTR_MAC]) 821 if (info->attrs[NL80211_ATTR_MAC])
753 mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); 822 mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
754 823
824 rtnl_lock();
825
755 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); 826 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
756 if (err) 827 if (err)
757 return err; 828 goto unlock_rtnl;
758 829
759 if (!drv->ops->get_key) { 830 if (!drv->ops->get_key) {
760 err = -EOPNOTSUPP; 831 err = -EOPNOTSUPP;
@@ -782,10 +853,8 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
782 if (mac_addr) 853 if (mac_addr)
783 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr); 854 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr);
784 855
785 rtnl_lock();
786 err = drv->ops->get_key(&drv->wiphy, dev, key_idx, mac_addr, 856 err = drv->ops->get_key(&drv->wiphy, dev, key_idx, mac_addr,
787 &cookie, get_key_callback); 857 &cookie, get_key_callback);
788 rtnl_unlock();
789 858
790 if (err) 859 if (err)
791 goto out; 860 goto out;
@@ -803,6 +872,9 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
803 out: 872 out:
804 cfg80211_put_dev(drv); 873 cfg80211_put_dev(drv);
805 dev_put(dev); 874 dev_put(dev);
875 unlock_rtnl:
876 rtnl_unlock();
877
806 return err; 878 return err;
807} 879}
808 880
@@ -831,9 +903,11 @@ static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info)
831 !info->attrs[NL80211_ATTR_KEY_DEFAULT_MGMT]) 903 !info->attrs[NL80211_ATTR_KEY_DEFAULT_MGMT])
832 return -EINVAL; 904 return -EINVAL;
833 905
906 rtnl_lock();
907
834 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); 908 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
835 if (err) 909 if (err)
836 return err; 910 goto unlock_rtnl;
837 911
838 if (info->attrs[NL80211_ATTR_KEY_DEFAULT]) 912 if (info->attrs[NL80211_ATTR_KEY_DEFAULT])
839 func = drv->ops->set_default_key; 913 func = drv->ops->set_default_key;
@@ -845,13 +919,15 @@ static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info)
845 goto out; 919 goto out;
846 } 920 }
847 921
848 rtnl_lock();
849 err = func(&drv->wiphy, dev, key_idx); 922 err = func(&drv->wiphy, dev, key_idx);
850 rtnl_unlock();
851 923
852 out: 924 out:
853 cfg80211_put_dev(drv); 925 cfg80211_put_dev(drv);
854 dev_put(dev); 926 dev_put(dev);
927
928 unlock_rtnl:
929 rtnl_unlock();
930
855 return err; 931 return err;
856} 932}
857 933
@@ -921,22 +997,25 @@ static int nl80211_new_key(struct sk_buff *skb, struct genl_info *info)
921 return -EINVAL; 997 return -EINVAL;
922 } 998 }
923 999
1000 rtnl_lock();
1001
924 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); 1002 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
925 if (err) 1003 if (err)
926 return err; 1004 goto unlock_rtnl;
927 1005
928 if (!drv->ops->add_key) { 1006 if (!drv->ops->add_key) {
929 err = -EOPNOTSUPP; 1007 err = -EOPNOTSUPP;
930 goto out; 1008 goto out;
931 } 1009 }
932 1010
933 rtnl_lock();
934 err = drv->ops->add_key(&drv->wiphy, dev, key_idx, mac_addr, &params); 1011 err = drv->ops->add_key(&drv->wiphy, dev, key_idx, mac_addr, &params);
935 rtnl_unlock();
936 1012
937 out: 1013 out:
938 cfg80211_put_dev(drv); 1014 cfg80211_put_dev(drv);
939 dev_put(dev); 1015 dev_put(dev);
1016 unlock_rtnl:
1017 rtnl_unlock();
1018
940 return err; 1019 return err;
941} 1020}
942 1021
@@ -957,22 +1036,26 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)
957 if (info->attrs[NL80211_ATTR_MAC]) 1036 if (info->attrs[NL80211_ATTR_MAC])
958 mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); 1037 mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
959 1038
1039 rtnl_lock();
1040
960 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); 1041 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
961 if (err) 1042 if (err)
962 return err; 1043 goto unlock_rtnl;
963 1044
964 if (!drv->ops->del_key) { 1045 if (!drv->ops->del_key) {
965 err = -EOPNOTSUPP; 1046 err = -EOPNOTSUPP;
966 goto out; 1047 goto out;
967 } 1048 }
968 1049
969 rtnl_lock();
970 err = drv->ops->del_key(&drv->wiphy, dev, key_idx, mac_addr); 1050 err = drv->ops->del_key(&drv->wiphy, dev, key_idx, mac_addr);
971 rtnl_unlock();
972 1051
973 out: 1052 out:
974 cfg80211_put_dev(drv); 1053 cfg80211_put_dev(drv);
975 dev_put(dev); 1054 dev_put(dev);
1055
1056 unlock_rtnl:
1057 rtnl_unlock();
1058
976 return err; 1059 return err;
977} 1060}
978 1061
@@ -986,9 +1069,16 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
986 struct beacon_parameters params; 1069 struct beacon_parameters params;
987 int haveinfo = 0; 1070 int haveinfo = 0;
988 1071
1072 rtnl_lock();
1073
989 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); 1074 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
990 if (err) 1075 if (err)
991 return err; 1076 goto unlock_rtnl;
1077
1078 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP) {
1079 err = -EOPNOTSUPP;
1080 goto out;
1081 }
992 1082
993 switch (info->genlhdr->cmd) { 1083 switch (info->genlhdr->cmd) {
994 case NL80211_CMD_NEW_BEACON: 1084 case NL80211_CMD_NEW_BEACON:
@@ -1049,13 +1139,14 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
1049 goto out; 1139 goto out;
1050 } 1140 }
1051 1141
1052 rtnl_lock();
1053 err = call(&drv->wiphy, dev, &params); 1142 err = call(&drv->wiphy, dev, &params);
1054 rtnl_unlock();
1055 1143
1056 out: 1144 out:
1057 cfg80211_put_dev(drv); 1145 cfg80211_put_dev(drv);
1058 dev_put(dev); 1146 dev_put(dev);
1147 unlock_rtnl:
1148 rtnl_unlock();
1149
1059 return err; 1150 return err;
1060} 1151}
1061 1152
@@ -1065,22 +1156,29 @@ static int nl80211_del_beacon(struct sk_buff *skb, struct genl_info *info)
1065 int err; 1156 int err;
1066 struct net_device *dev; 1157 struct net_device *dev;
1067 1158
1159 rtnl_lock();
1160
1068 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); 1161 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
1069 if (err) 1162 if (err)
1070 return err; 1163 goto unlock_rtnl;
1071 1164
1072 if (!drv->ops->del_beacon) { 1165 if (!drv->ops->del_beacon) {
1073 err = -EOPNOTSUPP; 1166 err = -EOPNOTSUPP;
1074 goto out; 1167 goto out;
1075 } 1168 }
1076 1169
1077 rtnl_lock(); 1170 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP) {
1171 err = -EOPNOTSUPP;
1172 goto out;
1173 }
1078 err = drv->ops->del_beacon(&drv->wiphy, dev); 1174 err = drv->ops->del_beacon(&drv->wiphy, dev);
1079 rtnl_unlock();
1080 1175
1081 out: 1176 out:
1082 cfg80211_put_dev(drv); 1177 cfg80211_put_dev(drv);
1083 dev_put(dev); 1178 dev_put(dev);
1179 unlock_rtnl:
1180 rtnl_unlock();
1181
1084 return err; 1182 return err;
1085} 1183}
1086 1184
@@ -1246,30 +1344,32 @@ static int nl80211_dump_station(struct sk_buff *skb,
1246 return -EINVAL; 1344 return -EINVAL;
1247 } 1345 }
1248 1346
1249 netdev = dev_get_by_index(&init_net, ifidx); 1347 rtnl_lock();
1250 if (!netdev) 1348
1251 return -ENODEV; 1349 netdev = __dev_get_by_index(&init_net, ifidx);
1350 if (!netdev) {
1351 err = -ENODEV;
1352 goto out_rtnl;
1353 }
1252 1354
1253 dev = cfg80211_get_dev_from_ifindex(ifidx); 1355 dev = cfg80211_get_dev_from_ifindex(ifidx);
1254 if (IS_ERR(dev)) { 1356 if (IS_ERR(dev)) {
1255 err = PTR_ERR(dev); 1357 err = PTR_ERR(dev);
1256 goto out_put_netdev; 1358 goto out_rtnl;
1257 } 1359 }
1258 1360
1259 if (!dev->ops->dump_station) { 1361 if (!dev->ops->dump_station) {
1260 err = -ENOSYS; 1362 err = -EOPNOTSUPP;
1261 goto out_err; 1363 goto out_err;
1262 } 1364 }
1263 1365
1264 rtnl_lock();
1265
1266 while (1) { 1366 while (1) {
1267 err = dev->ops->dump_station(&dev->wiphy, netdev, sta_idx, 1367 err = dev->ops->dump_station(&dev->wiphy, netdev, sta_idx,
1268 mac_addr, &sinfo); 1368 mac_addr, &sinfo);
1269 if (err == -ENOENT) 1369 if (err == -ENOENT)
1270 break; 1370 break;
1271 if (err) 1371 if (err)
1272 goto out_err_rtnl; 1372 goto out_err;
1273 1373
1274 if (nl80211_send_station(skb, 1374 if (nl80211_send_station(skb,
1275 NETLINK_CB(cb->skb).pid, 1375 NETLINK_CB(cb->skb).pid,
@@ -1285,12 +1385,10 @@ static int nl80211_dump_station(struct sk_buff *skb,
1285 out: 1385 out:
1286 cb->args[1] = sta_idx; 1386 cb->args[1] = sta_idx;
1287 err = skb->len; 1387 err = skb->len;
1288 out_err_rtnl:
1289 rtnl_unlock();
1290 out_err: 1388 out_err:
1291 cfg80211_put_dev(dev); 1389 cfg80211_put_dev(dev);
1292 out_put_netdev: 1390 out_rtnl:
1293 dev_put(netdev); 1391 rtnl_unlock();
1294 1392
1295 return err; 1393 return err;
1296} 1394}
@@ -1311,19 +1409,18 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info)
1311 1409
1312 mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); 1410 mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
1313 1411
1412 rtnl_lock();
1413
1314 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); 1414 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
1315 if (err) 1415 if (err)
1316 return err; 1416 goto out_rtnl;
1317 1417
1318 if (!drv->ops->get_station) { 1418 if (!drv->ops->get_station) {
1319 err = -EOPNOTSUPP; 1419 err = -EOPNOTSUPP;
1320 goto out; 1420 goto out;
1321 } 1421 }
1322 1422
1323 rtnl_lock();
1324 err = drv->ops->get_station(&drv->wiphy, dev, mac_addr, &sinfo); 1423 err = drv->ops->get_station(&drv->wiphy, dev, mac_addr, &sinfo);
1325 rtnl_unlock();
1326
1327 if (err) 1424 if (err)
1328 goto out; 1425 goto out;
1329 1426
@@ -1340,10 +1437,12 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info)
1340 1437
1341 out_free: 1438 out_free:
1342 nlmsg_free(msg); 1439 nlmsg_free(msg);
1343
1344 out: 1440 out:
1345 cfg80211_put_dev(drv); 1441 cfg80211_put_dev(drv);
1346 dev_put(dev); 1442 dev_put(dev);
1443 out_rtnl:
1444 rtnl_unlock();
1445
1347 return err; 1446 return err;
1348} 1447}
1349 1448
@@ -1411,9 +1510,11 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
1411 params.plink_action = 1510 params.plink_action =
1412 nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]); 1511 nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]);
1413 1512
1513 rtnl_lock();
1514
1414 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); 1515 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
1415 if (err) 1516 if (err)
1416 return err; 1517 goto out_rtnl;
1417 1518
1418 err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, &params.vlan); 1519 err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, &params.vlan);
1419 if (err) 1520 if (err)
@@ -1424,15 +1525,16 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
1424 goto out; 1525 goto out;
1425 } 1526 }
1426 1527
1427 rtnl_lock();
1428 err = drv->ops->change_station(&drv->wiphy, dev, mac_addr, &params); 1528 err = drv->ops->change_station(&drv->wiphy, dev, mac_addr, &params);
1429 rtnl_unlock();
1430 1529
1431 out: 1530 out:
1432 if (params.vlan) 1531 if (params.vlan)
1433 dev_put(params.vlan); 1532 dev_put(params.vlan);
1434 cfg80211_put_dev(drv); 1533 cfg80211_put_dev(drv);
1435 dev_put(dev); 1534 dev_put(dev);
1535 out_rtnl:
1536 rtnl_unlock();
1537
1436 return err; 1538 return err;
1437} 1539}
1438 1540
@@ -1474,9 +1576,11 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
1474 &params.station_flags)) 1576 &params.station_flags))
1475 return -EINVAL; 1577 return -EINVAL;
1476 1578
1579 rtnl_lock();
1580
1477 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); 1581 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
1478 if (err) 1582 if (err)
1479 return err; 1583 goto out_rtnl;
1480 1584
1481 err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, &params.vlan); 1585 err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, &params.vlan);
1482 if (err) 1586 if (err)
@@ -1487,15 +1591,21 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
1487 goto out; 1591 goto out;
1488 } 1592 }
1489 1593
1490 rtnl_lock(); 1594 if (!netif_running(dev)) {
1595 err = -ENETDOWN;
1596 goto out;
1597 }
1598
1491 err = drv->ops->add_station(&drv->wiphy, dev, mac_addr, &params); 1599 err = drv->ops->add_station(&drv->wiphy, dev, mac_addr, &params);
1492 rtnl_unlock();
1493 1600
1494 out: 1601 out:
1495 if (params.vlan) 1602 if (params.vlan)
1496 dev_put(params.vlan); 1603 dev_put(params.vlan);
1497 cfg80211_put_dev(drv); 1604 cfg80211_put_dev(drv);
1498 dev_put(dev); 1605 dev_put(dev);
1606 out_rtnl:
1607 rtnl_unlock();
1608
1499 return err; 1609 return err;
1500} 1610}
1501 1611
@@ -1509,22 +1619,25 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info)
1509 if (info->attrs[NL80211_ATTR_MAC]) 1619 if (info->attrs[NL80211_ATTR_MAC])
1510 mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); 1620 mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
1511 1621
1622 rtnl_lock();
1623
1512 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); 1624 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
1513 if (err) 1625 if (err)
1514 return err; 1626 goto out_rtnl;
1515 1627
1516 if (!drv->ops->del_station) { 1628 if (!drv->ops->del_station) {
1517 err = -EOPNOTSUPP; 1629 err = -EOPNOTSUPP;
1518 goto out; 1630 goto out;
1519 } 1631 }
1520 1632
1521 rtnl_lock();
1522 err = drv->ops->del_station(&drv->wiphy, dev, mac_addr); 1633 err = drv->ops->del_station(&drv->wiphy, dev, mac_addr);
1523 rtnl_unlock();
1524 1634
1525 out: 1635 out:
1526 cfg80211_put_dev(drv); 1636 cfg80211_put_dev(drv);
1527 dev_put(dev); 1637 dev_put(dev);
1638 out_rtnl:
1639 rtnl_unlock();
1640
1528 return err; 1641 return err;
1529} 1642}
1530 1643
@@ -1605,22 +1718,29 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
1605 return -EINVAL; 1718 return -EINVAL;
1606 } 1719 }
1607 1720
1608 netdev = dev_get_by_index(&init_net, ifidx); 1721 rtnl_lock();
1609 if (!netdev) 1722
1610 return -ENODEV; 1723 netdev = __dev_get_by_index(&init_net, ifidx);
1724 if (!netdev) {
1725 err = -ENODEV;
1726 goto out_rtnl;
1727 }
1611 1728
1612 dev = cfg80211_get_dev_from_ifindex(ifidx); 1729 dev = cfg80211_get_dev_from_ifindex(ifidx);
1613 if (IS_ERR(dev)) { 1730 if (IS_ERR(dev)) {
1614 err = PTR_ERR(dev); 1731 err = PTR_ERR(dev);
1615 goto out_put_netdev; 1732 goto out_rtnl;
1616 } 1733 }
1617 1734
1618 if (!dev->ops->dump_mpath) { 1735 if (!dev->ops->dump_mpath) {
1619 err = -ENOSYS; 1736 err = -EOPNOTSUPP;
1620 goto out_err; 1737 goto out_err;
1621 } 1738 }
1622 1739
1623 rtnl_lock(); 1740 if (netdev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) {
1741 err = -EOPNOTSUPP;
1742 goto out;
1743 }
1624 1744
1625 while (1) { 1745 while (1) {
1626 err = dev->ops->dump_mpath(&dev->wiphy, netdev, path_idx, 1746 err = dev->ops->dump_mpath(&dev->wiphy, netdev, path_idx,
@@ -1628,7 +1748,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
1628 if (err == -ENOENT) 1748 if (err == -ENOENT)
1629 break; 1749 break;
1630 if (err) 1750 if (err)
1631 goto out_err_rtnl; 1751 goto out_err;
1632 1752
1633 if (nl80211_send_mpath(skb, NETLINK_CB(cb->skb).pid, 1753 if (nl80211_send_mpath(skb, NETLINK_CB(cb->skb).pid,
1634 cb->nlh->nlmsg_seq, NLM_F_MULTI, 1754 cb->nlh->nlmsg_seq, NLM_F_MULTI,
@@ -1643,12 +1763,10 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
1643 out: 1763 out:
1644 cb->args[1] = path_idx; 1764 cb->args[1] = path_idx;
1645 err = skb->len; 1765 err = skb->len;
1646 out_err_rtnl:
1647 rtnl_unlock();
1648 out_err: 1766 out_err:
1649 cfg80211_put_dev(dev); 1767 cfg80211_put_dev(dev);
1650 out_put_netdev: 1768 out_rtnl:
1651 dev_put(netdev); 1769 rtnl_unlock();
1652 1770
1653 return err; 1771 return err;
1654} 1772}
@@ -1670,19 +1788,23 @@ static int nl80211_get_mpath(struct sk_buff *skb, struct genl_info *info)
1670 1788
1671 dst = nla_data(info->attrs[NL80211_ATTR_MAC]); 1789 dst = nla_data(info->attrs[NL80211_ATTR_MAC]);
1672 1790
1791 rtnl_lock();
1792
1673 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); 1793 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
1674 if (err) 1794 if (err)
1675 return err; 1795 goto out_rtnl;
1676 1796
1677 if (!drv->ops->get_mpath) { 1797 if (!drv->ops->get_mpath) {
1678 err = -EOPNOTSUPP; 1798 err = -EOPNOTSUPP;
1679 goto out; 1799 goto out;
1680 } 1800 }
1681 1801
1682 rtnl_lock(); 1802 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) {
1683 err = drv->ops->get_mpath(&drv->wiphy, dev, dst, next_hop, &pinfo); 1803 err = -EOPNOTSUPP;
1684 rtnl_unlock(); 1804 goto out;
1805 }
1685 1806
1807 err = drv->ops->get_mpath(&drv->wiphy, dev, dst, next_hop, &pinfo);
1686 if (err) 1808 if (err)
1687 goto out; 1809 goto out;
1688 1810
@@ -1699,10 +1821,12 @@ static int nl80211_get_mpath(struct sk_buff *skb, struct genl_info *info)
1699 1821
1700 out_free: 1822 out_free:
1701 nlmsg_free(msg); 1823 nlmsg_free(msg);
1702
1703 out: 1824 out:
1704 cfg80211_put_dev(drv); 1825 cfg80211_put_dev(drv);
1705 dev_put(dev); 1826 dev_put(dev);
1827 out_rtnl:
1828 rtnl_unlock();
1829
1706 return err; 1830 return err;
1707} 1831}
1708 1832
@@ -1723,22 +1847,35 @@ static int nl80211_set_mpath(struct sk_buff *skb, struct genl_info *info)
1723 dst = nla_data(info->attrs[NL80211_ATTR_MAC]); 1847 dst = nla_data(info->attrs[NL80211_ATTR_MAC]);
1724 next_hop = nla_data(info->attrs[NL80211_ATTR_MPATH_NEXT_HOP]); 1848 next_hop = nla_data(info->attrs[NL80211_ATTR_MPATH_NEXT_HOP]);
1725 1849
1850 rtnl_lock();
1851
1726 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); 1852 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
1727 if (err) 1853 if (err)
1728 return err; 1854 goto out_rtnl;
1729 1855
1730 if (!drv->ops->change_mpath) { 1856 if (!drv->ops->change_mpath) {
1731 err = -EOPNOTSUPP; 1857 err = -EOPNOTSUPP;
1732 goto out; 1858 goto out;
1733 } 1859 }
1734 1860
1735 rtnl_lock(); 1861 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) {
1862 err = -EOPNOTSUPP;
1863 goto out;
1864 }
1865
1866 if (!netif_running(dev)) {
1867 err = -ENETDOWN;
1868 goto out;
1869 }
1870
1736 err = drv->ops->change_mpath(&drv->wiphy, dev, dst, next_hop); 1871 err = drv->ops->change_mpath(&drv->wiphy, dev, dst, next_hop);
1737 rtnl_unlock();
1738 1872
1739 out: 1873 out:
1740 cfg80211_put_dev(drv); 1874 cfg80211_put_dev(drv);
1741 dev_put(dev); 1875 dev_put(dev);
1876 out_rtnl:
1877 rtnl_unlock();
1878
1742 return err; 1879 return err;
1743} 1880}
1744static int nl80211_new_mpath(struct sk_buff *skb, struct genl_info *info) 1881static int nl80211_new_mpath(struct sk_buff *skb, struct genl_info *info)
@@ -1758,22 +1895,35 @@ static int nl80211_new_mpath(struct sk_buff *skb, struct genl_info *info)
1758 dst = nla_data(info->attrs[NL80211_ATTR_MAC]); 1895 dst = nla_data(info->attrs[NL80211_ATTR_MAC]);
1759 next_hop = nla_data(info->attrs[NL80211_ATTR_MPATH_NEXT_HOP]); 1896 next_hop = nla_data(info->attrs[NL80211_ATTR_MPATH_NEXT_HOP]);
1760 1897
1898 rtnl_lock();
1899
1761 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); 1900 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
1762 if (err) 1901 if (err)
1763 return err; 1902 goto out_rtnl;
1764 1903
1765 if (!drv->ops->add_mpath) { 1904 if (!drv->ops->add_mpath) {
1766 err = -EOPNOTSUPP; 1905 err = -EOPNOTSUPP;
1767 goto out; 1906 goto out;
1768 } 1907 }
1769 1908
1770 rtnl_lock(); 1909 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) {
1910 err = -EOPNOTSUPP;
1911 goto out;
1912 }
1913
1914 if (!netif_running(dev)) {
1915 err = -ENETDOWN;
1916 goto out;
1917 }
1918
1771 err = drv->ops->add_mpath(&drv->wiphy, dev, dst, next_hop); 1919 err = drv->ops->add_mpath(&drv->wiphy, dev, dst, next_hop);
1772 rtnl_unlock();
1773 1920
1774 out: 1921 out:
1775 cfg80211_put_dev(drv); 1922 cfg80211_put_dev(drv);
1776 dev_put(dev); 1923 dev_put(dev);
1924 out_rtnl:
1925 rtnl_unlock();
1926
1777 return err; 1927 return err;
1778} 1928}
1779 1929
@@ -1787,22 +1937,25 @@ static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info)
1787 if (info->attrs[NL80211_ATTR_MAC]) 1937 if (info->attrs[NL80211_ATTR_MAC])
1788 dst = nla_data(info->attrs[NL80211_ATTR_MAC]); 1938 dst = nla_data(info->attrs[NL80211_ATTR_MAC]);
1789 1939
1940 rtnl_lock();
1941
1790 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); 1942 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
1791 if (err) 1943 if (err)
1792 return err; 1944 goto out_rtnl;
1793 1945
1794 if (!drv->ops->del_mpath) { 1946 if (!drv->ops->del_mpath) {
1795 err = -EOPNOTSUPP; 1947 err = -EOPNOTSUPP;
1796 goto out; 1948 goto out;
1797 } 1949 }
1798 1950
1799 rtnl_lock();
1800 err = drv->ops->del_mpath(&drv->wiphy, dev, dst); 1951 err = drv->ops->del_mpath(&drv->wiphy, dev, dst);
1801 rtnl_unlock();
1802 1952
1803 out: 1953 out:
1804 cfg80211_put_dev(drv); 1954 cfg80211_put_dev(drv);
1805 dev_put(dev); 1955 dev_put(dev);
1956 out_rtnl:
1957 rtnl_unlock();
1958
1806 return err; 1959 return err;
1807} 1960}
1808 1961
@@ -1835,22 +1988,30 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
1835 nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); 1988 nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]);
1836 } 1989 }
1837 1990
1991 rtnl_lock();
1992
1838 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); 1993 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
1839 if (err) 1994 if (err)
1840 return err; 1995 goto out_rtnl;
1841 1996
1842 if (!drv->ops->change_bss) { 1997 if (!drv->ops->change_bss) {
1843 err = -EOPNOTSUPP; 1998 err = -EOPNOTSUPP;
1844 goto out; 1999 goto out;
1845 } 2000 }
1846 2001
1847 rtnl_lock(); 2002 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP) {
2003 err = -EOPNOTSUPP;
2004 goto out;
2005 }
2006
1848 err = drv->ops->change_bss(&drv->wiphy, dev, &params); 2007 err = drv->ops->change_bss(&drv->wiphy, dev, &params);
1849 rtnl_unlock();
1850 2008
1851 out: 2009 out:
1852 cfg80211_put_dev(drv); 2010 cfg80211_put_dev(drv);
1853 dev_put(dev); 2011 dev_put(dev);
2012 out_rtnl:
2013 rtnl_unlock();
2014
1854 return err; 2015 return err;
1855} 2016}
1856 2017
@@ -1945,10 +2106,12 @@ static int nl80211_get_mesh_params(struct sk_buff *skb,
1945 struct nlattr *pinfoattr; 2106 struct nlattr *pinfoattr;
1946 struct sk_buff *msg; 2107 struct sk_buff *msg;
1947 2108
2109 rtnl_lock();
2110
1948 /* Look up our device */ 2111 /* Look up our device */
1949 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); 2112 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
1950 if (err) 2113 if (err)
1951 return err; 2114 goto out_rtnl;
1952 2115
1953 if (!drv->ops->get_mesh_params) { 2116 if (!drv->ops->get_mesh_params) {
1954 err = -EOPNOTSUPP; 2117 err = -EOPNOTSUPP;
@@ -1956,9 +2119,7 @@ static int nl80211_get_mesh_params(struct sk_buff *skb,
1956 } 2119 }
1957 2120
1958 /* Get the mesh params */ 2121 /* Get the mesh params */
1959 rtnl_lock();
1960 err = drv->ops->get_mesh_params(&drv->wiphy, dev, &cur_params); 2122 err = drv->ops->get_mesh_params(&drv->wiphy, dev, &cur_params);
1961 rtnl_unlock();
1962 if (err) 2123 if (err)
1963 goto out; 2124 goto out;
1964 2125
@@ -2007,13 +2168,16 @@ static int nl80211_get_mesh_params(struct sk_buff *skb,
2007 err = genlmsg_unicast(msg, info->snd_pid); 2168 err = genlmsg_unicast(msg, info->snd_pid);
2008 goto out; 2169 goto out;
2009 2170
2010nla_put_failure: 2171 nla_put_failure:
2011 genlmsg_cancel(msg, hdr); 2172 genlmsg_cancel(msg, hdr);
2012 err = -EMSGSIZE; 2173 err = -EMSGSIZE;
2013out: 2174 out:
2014 /* Cleanup */ 2175 /* Cleanup */
2015 cfg80211_put_dev(drv); 2176 cfg80211_put_dev(drv);
2016 dev_put(dev); 2177 dev_put(dev);
2178 out_rtnl:
2179 rtnl_unlock();
2180
2017 return err; 2181 return err;
2018} 2182}
2019 2183
@@ -2060,9 +2224,11 @@ static int nl80211_set_mesh_params(struct sk_buff *skb, struct genl_info *info)
2060 parent_attr, nl80211_meshconf_params_policy)) 2224 parent_attr, nl80211_meshconf_params_policy))
2061 return -EINVAL; 2225 return -EINVAL;
2062 2226
2227 rtnl_lock();
2228
2063 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); 2229 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
2064 if (err) 2230 if (err)
2065 return err; 2231 goto out_rtnl;
2066 2232
2067 if (!drv->ops->set_mesh_params) { 2233 if (!drv->ops->set_mesh_params) {
2068 err = -EOPNOTSUPP; 2234 err = -EOPNOTSUPP;
@@ -2109,14 +2275,15 @@ static int nl80211_set_mesh_params(struct sk_buff *skb, struct genl_info *info)
2109 nla_get_u16); 2275 nla_get_u16);
2110 2276
2111 /* Apply changes */ 2277 /* Apply changes */
2112 rtnl_lock();
2113 err = drv->ops->set_mesh_params(&drv->wiphy, dev, &cfg, mask); 2278 err = drv->ops->set_mesh_params(&drv->wiphy, dev, &cfg, mask);
2114 rtnl_unlock();
2115 2279
2116 out: 2280 out:
2117 /* cleanup */ 2281 /* cleanup */
2118 cfg80211_put_dev(drv); 2282 cfg80211_put_dev(drv);
2119 dev_put(dev); 2283 dev_put(dev);
2284 out_rtnl:
2285 rtnl_unlock();
2286
2120 return err; 2287 return err;
2121} 2288}
2122 2289
@@ -2262,43 +2429,6 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
2262 return -EINVAL; 2429 return -EINVAL;
2263} 2430}
2264 2431
2265static int nl80211_set_mgmt_extra_ie(struct sk_buff *skb,
2266 struct genl_info *info)
2267{
2268 struct cfg80211_registered_device *drv;
2269 int err;
2270 struct net_device *dev;
2271 struct mgmt_extra_ie_params params;
2272
2273 memset(&params, 0, sizeof(params));
2274
2275 if (!info->attrs[NL80211_ATTR_MGMT_SUBTYPE])
2276 return -EINVAL;
2277 params.subtype = nla_get_u8(info->attrs[NL80211_ATTR_MGMT_SUBTYPE]);
2278 if (params.subtype > 15)
2279 return -EINVAL; /* FC Subtype field is 4 bits (0..15) */
2280
2281 if (info->attrs[NL80211_ATTR_IE]) {
2282 params.ies = nla_data(info->attrs[NL80211_ATTR_IE]);
2283 params.ies_len = nla_len(info->attrs[NL80211_ATTR_IE]);
2284 }
2285
2286 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
2287 if (err)
2288 return err;
2289
2290 if (drv->ops->set_mgmt_extra_ie) {
2291 rtnl_lock();
2292 err = drv->ops->set_mgmt_extra_ie(&drv->wiphy, dev, &params);
2293 rtnl_unlock();
2294 } else
2295 err = -EOPNOTSUPP;
2296
2297 cfg80211_put_dev(drv);
2298 dev_put(dev);
2299 return err;
2300}
2301
2302static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) 2432static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
2303{ 2433{
2304 struct cfg80211_registered_device *drv; 2434 struct cfg80211_registered_device *drv;
@@ -2312,9 +2442,11 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
2312 enum ieee80211_band band; 2442 enum ieee80211_band band;
2313 size_t ie_len; 2443 size_t ie_len;
2314 2444
2445 rtnl_lock();
2446
2315 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); 2447 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
2316 if (err) 2448 if (err)
2317 return err; 2449 goto out_rtnl;
2318 2450
2319 wiphy = &drv->wiphy; 2451 wiphy = &drv->wiphy;
2320 2452
@@ -2323,11 +2455,14 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
2323 goto out; 2455 goto out;
2324 } 2456 }
2325 2457
2326 rtnl_lock(); 2458 if (!netif_running(dev)) {
2459 err = -ENETDOWN;
2460 goto out;
2461 }
2327 2462
2328 if (drv->scan_req) { 2463 if (drv->scan_req) {
2329 err = -EBUSY; 2464 err = -EBUSY;
2330 goto out_unlock; 2465 goto out;
2331 } 2466 }
2332 2467
2333 if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { 2468 if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
@@ -2335,7 +2470,7 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
2335 n_channels++; 2470 n_channels++;
2336 if (!n_channels) { 2471 if (!n_channels) {
2337 err = -EINVAL; 2472 err = -EINVAL;
2338 goto out_unlock; 2473 goto out;
2339 } 2474 }
2340 } else { 2475 } else {
2341 for (band = 0; band < IEEE80211_NUM_BANDS; band++) 2476 for (band = 0; band < IEEE80211_NUM_BANDS; band++)
@@ -2349,7 +2484,7 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
2349 2484
2350 if (n_ssids > wiphy->max_scan_ssids) { 2485 if (n_ssids > wiphy->max_scan_ssids) {
2351 err = -EINVAL; 2486 err = -EINVAL;
2352 goto out_unlock; 2487 goto out;
2353 } 2488 }
2354 2489
2355 if (info->attrs[NL80211_ATTR_IE]) 2490 if (info->attrs[NL80211_ATTR_IE])
@@ -2363,7 +2498,7 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
2363 + ie_len, GFP_KERNEL); 2498 + ie_len, GFP_KERNEL);
2364 if (!request) { 2499 if (!request) {
2365 err = -ENOMEM; 2500 err = -ENOMEM;
2366 goto out_unlock; 2501 goto out;
2367 } 2502 }
2368 2503
2369 request->channels = (void *)((char *)request + sizeof(*request)); 2504 request->channels = (void *)((char *)request + sizeof(*request));
@@ -2434,11 +2569,12 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
2434 drv->scan_req = NULL; 2569 drv->scan_req = NULL;
2435 kfree(request); 2570 kfree(request);
2436 } 2571 }
2437 out_unlock:
2438 rtnl_unlock();
2439 out: 2572 out:
2440 cfg80211_put_dev(drv); 2573 cfg80211_put_dev(drv);
2441 dev_put(dev); 2574 dev_put(dev);
2575 out_rtnl:
2576 rtnl_unlock();
2577
2442 return err; 2578 return err;
2443} 2579}
2444 2580
@@ -2558,6 +2694,288 @@ static int nl80211_dump_scan(struct sk_buff *skb,
2558 return err; 2694 return err;
2559} 2695}
2560 2696
2697static bool nl80211_valid_auth_type(enum nl80211_auth_type auth_type)
2698{
2699 return auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM ||
2700 auth_type == NL80211_AUTHTYPE_SHARED_KEY ||
2701 auth_type == NL80211_AUTHTYPE_FT ||
2702 auth_type == NL80211_AUTHTYPE_NETWORK_EAP;
2703}
2704
2705static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
2706{
2707 struct cfg80211_registered_device *drv;
2708 struct net_device *dev;
2709 struct cfg80211_auth_request req;
2710 struct wiphy *wiphy;
2711 int err;
2712
2713 rtnl_lock();
2714
2715 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
2716 if (err)
2717 goto unlock_rtnl;
2718
2719 if (!drv->ops->auth) {
2720 err = -EOPNOTSUPP;
2721 goto out;
2722 }
2723
2724 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) {
2725 err = -EOPNOTSUPP;
2726 goto out;
2727 }
2728
2729 if (!netif_running(dev)) {
2730 err = -ENETDOWN;
2731 goto out;
2732 }
2733
2734 if (!info->attrs[NL80211_ATTR_MAC]) {
2735 err = -EINVAL;
2736 goto out;
2737 }
2738
2739 wiphy = &drv->wiphy;
2740 memset(&req, 0, sizeof(req));
2741
2742 req.peer_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
2743
2744 if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
2745 req.chan = ieee80211_get_channel(
2746 wiphy,
2747 nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]));
2748 if (!req.chan) {
2749 err = -EINVAL;
2750 goto out;
2751 }
2752 }
2753
2754 if (info->attrs[NL80211_ATTR_SSID]) {
2755 req.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
2756 req.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
2757 }
2758
2759 if (info->attrs[NL80211_ATTR_IE]) {
2760 req.ie = nla_data(info->attrs[NL80211_ATTR_IE]);
2761 req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
2762 }
2763
2764 if (info->attrs[NL80211_ATTR_AUTH_TYPE]) {
2765 req.auth_type =
2766 nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]);
2767 if (!nl80211_valid_auth_type(req.auth_type)) {
2768 err = -EINVAL;
2769 goto out;
2770 }
2771 }
2772
2773 err = drv->ops->auth(&drv->wiphy, dev, &req);
2774
2775out:
2776 cfg80211_put_dev(drv);
2777 dev_put(dev);
2778unlock_rtnl:
2779 rtnl_unlock();
2780 return err;
2781}
2782
2783static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
2784{
2785 struct cfg80211_registered_device *drv;
2786 struct net_device *dev;
2787 struct cfg80211_assoc_request req;
2788 struct wiphy *wiphy;
2789 int err;
2790
2791 rtnl_lock();
2792
2793 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
2794 if (err)
2795 goto unlock_rtnl;
2796
2797 if (!drv->ops->assoc) {
2798 err = -EOPNOTSUPP;
2799 goto out;
2800 }
2801
2802 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) {
2803 err = -EOPNOTSUPP;
2804 goto out;
2805 }
2806
2807 if (!netif_running(dev)) {
2808 err = -ENETDOWN;
2809 goto out;
2810 }
2811
2812 if (!info->attrs[NL80211_ATTR_MAC] ||
2813 !info->attrs[NL80211_ATTR_SSID]) {
2814 err = -EINVAL;
2815 goto out;
2816 }
2817
2818 wiphy = &drv->wiphy;
2819 memset(&req, 0, sizeof(req));
2820
2821 req.peer_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
2822
2823 if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
2824 req.chan = ieee80211_get_channel(
2825 wiphy,
2826 nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]));
2827 if (!req.chan) {
2828 err = -EINVAL;
2829 goto out;
2830 }
2831 }
2832
2833 req.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
2834 req.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
2835
2836 if (info->attrs[NL80211_ATTR_IE]) {
2837 req.ie = nla_data(info->attrs[NL80211_ATTR_IE]);
2838 req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
2839 }
2840
2841 err = drv->ops->assoc(&drv->wiphy, dev, &req);
2842
2843out:
2844 cfg80211_put_dev(drv);
2845 dev_put(dev);
2846unlock_rtnl:
2847 rtnl_unlock();
2848 return err;
2849}
2850
2851static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info)
2852{
2853 struct cfg80211_registered_device *drv;
2854 struct net_device *dev;
2855 struct cfg80211_deauth_request req;
2856 struct wiphy *wiphy;
2857 int err;
2858
2859 rtnl_lock();
2860
2861 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
2862 if (err)
2863 goto unlock_rtnl;
2864
2865 if (!drv->ops->deauth) {
2866 err = -EOPNOTSUPP;
2867 goto out;
2868 }
2869
2870 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) {
2871 err = -EOPNOTSUPP;
2872 goto out;
2873 }
2874
2875 if (!netif_running(dev)) {
2876 err = -ENETDOWN;
2877 goto out;
2878 }
2879
2880 if (!info->attrs[NL80211_ATTR_MAC]) {
2881 err = -EINVAL;
2882 goto out;
2883 }
2884
2885 wiphy = &drv->wiphy;
2886 memset(&req, 0, sizeof(req));
2887
2888 req.peer_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
2889
2890 if (info->attrs[NL80211_ATTR_REASON_CODE]) {
2891 req.reason_code =
2892 nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]);
2893 if (req.reason_code == 0) {
2894 /* Reason Code 0 is reserved */
2895 err = -EINVAL;
2896 goto out;
2897 }
2898 }
2899
2900 if (info->attrs[NL80211_ATTR_IE]) {
2901 req.ie = nla_data(info->attrs[NL80211_ATTR_IE]);
2902 req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
2903 }
2904
2905 err = drv->ops->deauth(&drv->wiphy, dev, &req);
2906
2907out:
2908 cfg80211_put_dev(drv);
2909 dev_put(dev);
2910unlock_rtnl:
2911 rtnl_unlock();
2912 return err;
2913}
2914
2915static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info)
2916{
2917 struct cfg80211_registered_device *drv;
2918 struct net_device *dev;
2919 struct cfg80211_disassoc_request req;
2920 struct wiphy *wiphy;
2921 int err;
2922
2923 rtnl_lock();
2924
2925 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
2926 if (err)
2927 goto unlock_rtnl;
2928
2929 if (!drv->ops->disassoc) {
2930 err = -EOPNOTSUPP;
2931 goto out;
2932 }
2933
2934 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) {
2935 err = -EOPNOTSUPP;
2936 goto out;
2937 }
2938
2939 if (!netif_running(dev)) {
2940 err = -ENETDOWN;
2941 goto out;
2942 }
2943
2944 if (!info->attrs[NL80211_ATTR_MAC]) {
2945 err = -EINVAL;
2946 goto out;
2947 }
2948
2949 wiphy = &drv->wiphy;
2950 memset(&req, 0, sizeof(req));
2951
2952 req.peer_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
2953
2954 if (info->attrs[NL80211_ATTR_REASON_CODE]) {
2955 req.reason_code =
2956 nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]);
2957 if (req.reason_code == 0) {
2958 /* Reason Code 0 is reserved */
2959 err = -EINVAL;
2960 goto out;
2961 }
2962 }
2963
2964 if (info->attrs[NL80211_ATTR_IE]) {
2965 req.ie = nla_data(info->attrs[NL80211_ATTR_IE]);
2966 req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
2967 }
2968
2969 err = drv->ops->disassoc(&drv->wiphy, dev, &req);
2970
2971out:
2972 cfg80211_put_dev(drv);
2973 dev_put(dev);
2974unlock_rtnl:
2975 rtnl_unlock();
2976 return err;
2977}
2978
2561static struct genl_ops nl80211_ops[] = { 2979static struct genl_ops nl80211_ops[] = {
2562 { 2980 {
2563 .cmd = NL80211_CMD_GET_WIPHY, 2981 .cmd = NL80211_CMD_GET_WIPHY,
@@ -2725,12 +3143,6 @@ static struct genl_ops nl80211_ops[] = {
2725 .flags = GENL_ADMIN_PERM, 3143 .flags = GENL_ADMIN_PERM,
2726 }, 3144 },
2727 { 3145 {
2728 .cmd = NL80211_CMD_SET_MGMT_EXTRA_IE,
2729 .doit = nl80211_set_mgmt_extra_ie,
2730 .policy = nl80211_policy,
2731 .flags = GENL_ADMIN_PERM,
2732 },
2733 {
2734 .cmd = NL80211_CMD_TRIGGER_SCAN, 3146 .cmd = NL80211_CMD_TRIGGER_SCAN,
2735 .doit = nl80211_trigger_scan, 3147 .doit = nl80211_trigger_scan,
2736 .policy = nl80211_policy, 3148 .policy = nl80211_policy,
@@ -2741,6 +3153,33 @@ static struct genl_ops nl80211_ops[] = {
2741 .policy = nl80211_policy, 3153 .policy = nl80211_policy,
2742 .dumpit = nl80211_dump_scan, 3154 .dumpit = nl80211_dump_scan,
2743 }, 3155 },
3156 {
3157 .cmd = NL80211_CMD_AUTHENTICATE,
3158 .doit = nl80211_authenticate,
3159 .policy = nl80211_policy,
3160 .flags = GENL_ADMIN_PERM,
3161 },
3162 {
3163 .cmd = NL80211_CMD_ASSOCIATE,
3164 .doit = nl80211_associate,
3165 .policy = nl80211_policy,
3166 .flags = GENL_ADMIN_PERM,
3167 },
3168 {
3169 .cmd = NL80211_CMD_DEAUTHENTICATE,
3170 .doit = nl80211_deauthenticate,
3171 .policy = nl80211_policy,
3172 .flags = GENL_ADMIN_PERM,
3173 },
3174 {
3175 .cmd = NL80211_CMD_DISASSOCIATE,
3176 .doit = nl80211_disassociate,
3177 .policy = nl80211_policy,
3178 .flags = GENL_ADMIN_PERM,
3179 },
3180};
3181static struct genl_multicast_group nl80211_mlme_mcgrp = {
3182 .name = "mlme",
2744}; 3183};
2745 3184
2746/* multicast groups */ 3185/* multicast groups */
@@ -2887,6 +3326,71 @@ nla_put_failure:
2887 nlmsg_free(msg); 3326 nlmsg_free(msg);
2888} 3327}
2889 3328
3329static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev,
3330 struct net_device *netdev,
3331 const u8 *buf, size_t len,
3332 enum nl80211_commands cmd)
3333{
3334 struct sk_buff *msg;
3335 void *hdr;
3336
3337 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
3338 if (!msg)
3339 return;
3340
3341 hdr = nl80211hdr_put(msg, 0, 0, 0, cmd);
3342 if (!hdr) {
3343 nlmsg_free(msg);
3344 return;
3345 }
3346
3347 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
3348 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
3349 NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf);
3350
3351 if (genlmsg_end(msg, hdr) < 0) {
3352 nlmsg_free(msg);
3353 return;
3354 }
3355
3356 genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, GFP_KERNEL);
3357 return;
3358
3359 nla_put_failure:
3360 genlmsg_cancel(msg, hdr);
3361 nlmsg_free(msg);
3362}
3363
3364void nl80211_send_rx_auth(struct cfg80211_registered_device *rdev,
3365 struct net_device *netdev, const u8 *buf, size_t len)
3366{
3367 nl80211_send_mlme_event(rdev, netdev, buf, len,
3368 NL80211_CMD_AUTHENTICATE);
3369}
3370
3371void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev,
3372 struct net_device *netdev, const u8 *buf,
3373 size_t len)
3374{
3375 nl80211_send_mlme_event(rdev, netdev, buf, len, NL80211_CMD_ASSOCIATE);
3376}
3377
3378void nl80211_send_rx_deauth(struct cfg80211_registered_device *rdev,
3379 struct net_device *netdev, const u8 *buf,
3380 size_t len)
3381{
3382 nl80211_send_mlme_event(rdev, netdev, buf, len,
3383 NL80211_CMD_DEAUTHENTICATE);
3384}
3385
3386void nl80211_send_rx_disassoc(struct cfg80211_registered_device *rdev,
3387 struct net_device *netdev, const u8 *buf,
3388 size_t len)
3389{
3390 nl80211_send_mlme_event(rdev, netdev, buf, len,
3391 NL80211_CMD_DISASSOCIATE);
3392}
3393
2890/* initialisation/exit functions */ 3394/* initialisation/exit functions */
2891 3395
2892int nl80211_init(void) 3396int nl80211_init(void)
@@ -2915,6 +3419,10 @@ int nl80211_init(void)
2915 if (err) 3419 if (err)
2916 goto err_out; 3420 goto err_out;
2917 3421
3422 err = genl_register_mc_group(&nl80211_fam, &nl80211_mlme_mcgrp);
3423 if (err)
3424 goto err_out;
3425
2918 return 0; 3426 return 0;
2919 err_out: 3427 err_out:
2920 genl_unregister_family(&nl80211_fam); 3428 genl_unregister_family(&nl80211_fam);
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index e65a3c38c52f..b77af4ab80be 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -3,7 +3,6 @@
3 3
4#include "core.h" 4#include "core.h"
5 5
6#ifdef CONFIG_NL80211
7extern int nl80211_init(void); 6extern int nl80211_init(void);
8extern void nl80211_exit(void); 7extern void nl80211_exit(void);
9extern void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev); 8extern void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev);
@@ -12,30 +11,17 @@ extern void nl80211_send_scan_done(struct cfg80211_registered_device *rdev,
12extern void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev, 11extern void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev,
13 struct net_device *netdev); 12 struct net_device *netdev);
14extern void nl80211_send_reg_change_event(struct regulatory_request *request); 13extern void nl80211_send_reg_change_event(struct regulatory_request *request);
15#else 14extern void nl80211_send_rx_auth(struct cfg80211_registered_device *rdev,
16static inline int nl80211_init(void) 15 struct net_device *netdev,
17{ 16 const u8 *buf, size_t len);
18 return 0; 17extern void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev,
19} 18 struct net_device *netdev,
20static inline void nl80211_exit(void) 19 const u8 *buf, size_t len);
21{ 20extern void nl80211_send_rx_deauth(struct cfg80211_registered_device *rdev,
22} 21 struct net_device *netdev,
23static inline void nl80211_notify_dev_rename( 22 const u8 *buf, size_t len);
24 struct cfg80211_registered_device *rdev) 23extern void nl80211_send_rx_disassoc(struct cfg80211_registered_device *rdev,
25{ 24 struct net_device *netdev,
26} 25 const u8 *buf, size_t len);
27static inline void
28nl80211_send_scan_done(struct cfg80211_registered_device *rdev,
29 struct net_device *netdev)
30{}
31static inline void nl80211_send_scan_aborted(
32 struct cfg80211_registered_device *rdev,
33 struct net_device *netdev)
34{}
35static inline void
36nl80211_send_reg_change_event(struct regulatory_request *request)
37{
38}
39#endif /* CONFIG_NL80211 */
40 26
41#endif /* __NET_WIRELESS_NL80211_H */ 27#endif /* __NET_WIRELESS_NL80211_H */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index eb8b8ed16155..6327e1617acb 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -122,9 +122,14 @@ static const struct ieee80211_regdomain *cfg80211_world_regdom =
122 122
123#ifdef CONFIG_WIRELESS_OLD_REGULATORY 123#ifdef CONFIG_WIRELESS_OLD_REGULATORY
124static char *ieee80211_regdom = "US"; 124static char *ieee80211_regdom = "US";
125#else
126static char *ieee80211_regdom = "00";
127#endif
128
125module_param(ieee80211_regdom, charp, 0444); 129module_param(ieee80211_regdom, charp, 0444);
126MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code"); 130MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code");
127 131
132#ifdef CONFIG_WIRELESS_OLD_REGULATORY
128/* 133/*
129 * We assume 40 MHz bandwidth for the old regulatory work. 134 * We assume 40 MHz bandwidth for the old regulatory work.
130 * We make emphasis we are using the exact same frequencies 135 * We make emphasis we are using the exact same frequencies
@@ -1415,16 +1420,6 @@ new_request:
1415 return r; 1420 return r;
1416 } 1421 }
1417 1422
1418 /*
1419 * Note: When CONFIG_WIRELESS_OLD_REGULATORY is enabled
1420 * AND if CRDA is NOT present nothing will happen, if someone
1421 * wants to bother with 11d with OLD_REG you can add a timer.
1422 * If after x amount of time nothing happens you can call:
1423 *
1424 * return set_regdom(country_ie_regdomain);
1425 *
1426 * to intersect with the static rd
1427 */
1428 return call_crda(last_request->alpha2); 1423 return call_crda(last_request->alpha2);
1429} 1424}
1430 1425
@@ -1601,6 +1596,10 @@ static bool reg_same_country_ie_hint(struct wiphy *wiphy,
1601 1596
1602 assert_cfg80211_lock(); 1597 assert_cfg80211_lock();
1603 1598
1599 if (unlikely(last_request->initiator !=
1600 NL80211_REGDOM_SET_BY_COUNTRY_IE))
1601 return false;
1602
1604 request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx); 1603 request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
1605 1604
1606 if (!request_wiphy) 1605 if (!request_wiphy)
@@ -1663,7 +1662,9 @@ void regulatory_hint_11d(struct wiphy *wiphy,
1663 * we optimize an early check to exit out early if we don't have to 1662 * we optimize an early check to exit out early if we don't have to
1664 * do anything 1663 * do anything
1665 */ 1664 */
1666 if (likely(wiphy_idx_valid(last_request->wiphy_idx))) { 1665 if (likely(last_request->initiator ==
1666 NL80211_REGDOM_SET_BY_COUNTRY_IE &&
1667 wiphy_idx_valid(last_request->wiphy_idx))) {
1667 struct cfg80211_registered_device *drv_last_ie; 1668 struct cfg80211_registered_device *drv_last_ie;
1668 1669
1669 drv_last_ie = 1670 drv_last_ie =
@@ -2022,28 +2023,21 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
2022 */ 2023 */
2023 2024
2024 BUG_ON(!country_ie_regdomain); 2025 BUG_ON(!country_ie_regdomain);
2026 BUG_ON(rd == country_ie_regdomain);
2025 2027
2026 if (rd != country_ie_regdomain) { 2028 /*
2027 /* 2029 * Intersect what CRDA returned and our what we
2028 * Intersect what CRDA returned and our what we 2030 * had built from the Country IE received
2029 * had built from the Country IE received 2031 */
2030 */
2031 2032
2032 intersected_rd = regdom_intersect(rd, country_ie_regdomain); 2033 intersected_rd = regdom_intersect(rd, country_ie_regdomain);
2033 2034
2034 reg_country_ie_process_debug(rd, country_ie_regdomain, 2035 reg_country_ie_process_debug(rd,
2035 intersected_rd); 2036 country_ie_regdomain,
2037 intersected_rd);
2036 2038
2037 kfree(country_ie_regdomain); 2039 kfree(country_ie_regdomain);
2038 country_ie_regdomain = NULL; 2040 country_ie_regdomain = NULL;
2039 } else {
2040 /*
2041 * This would happen when CRDA was not present and
2042 * OLD_REGULATORY was enabled. We intersect our Country
2043 * IE rd and what was set on cfg80211 originally
2044 */
2045 intersected_rd = regdom_intersect(rd, cfg80211_regdomain);
2046 }
2047 2041
2048 if (!intersected_rd) 2042 if (!intersected_rd)
2049 return -EINVAL; 2043 return -EINVAL;
@@ -2135,15 +2129,18 @@ int regulatory_init(void)
2135 /* 2129 /*
2136 * The old code still requests for a new regdomain and if 2130 * The old code still requests for a new regdomain and if
2137 * you have CRDA you get it updated, otherwise you get 2131 * you have CRDA you get it updated, otherwise you get
2138 * stuck with the static values. We ignore "EU" code as 2132 * stuck with the static values. Since "EU" is not a valid
2139 * that is not a valid ISO / IEC 3166 alpha2 2133 * ISO / IEC 3166 alpha2 code we can't expect userpace to
2134 * give us a regulatory domain for it. We need last_request
2135 * iniitalized though so lets just send a request which we
2136 * know will be ignored... this crap will be removed once
2137 * OLD_REG dies.
2140 */ 2138 */
2141 if (ieee80211_regdom[0] != 'E' || ieee80211_regdom[1] != 'U') 2139 err = regulatory_hint_core(ieee80211_regdom);
2142 err = regulatory_hint_core(ieee80211_regdom);
2143#else 2140#else
2144 cfg80211_regdomain = cfg80211_world_regdom; 2141 cfg80211_regdomain = cfg80211_world_regdom;
2145 2142
2146 err = regulatory_hint_core("00"); 2143 err = regulatory_hint_core(ieee80211_regdom);
2147#endif 2144#endif
2148 if (err) { 2145 if (err) {
2149 if (err == -ENOMEM) 2146 if (err == -ENOMEM)
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 280dbcd02c15..2a00e362f5fe 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -80,7 +80,8 @@ void cfg80211_bss_expire(struct cfg80211_registered_device *dev)
80 bool expired = false; 80 bool expired = false;
81 81
82 list_for_each_entry_safe(bss, tmp, &dev->bss_list, list) { 82 list_for_each_entry_safe(bss, tmp, &dev->bss_list, list) {
83 if (!time_after(jiffies, bss->ts + IEEE80211_SCAN_RESULT_EXPIRE)) 83 if (bss->hold ||
84 !time_after(jiffies, bss->ts + IEEE80211_SCAN_RESULT_EXPIRE))
84 continue; 85 continue;
85 list_del(&bss->list); 86 list_del(&bss->list);
86 rb_erase(&bss->rbn, &dev->bss_tree); 87 rb_erase(&bss->rbn, &dev->bss_tree);
@@ -471,6 +472,30 @@ void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
471} 472}
472EXPORT_SYMBOL(cfg80211_unlink_bss); 473EXPORT_SYMBOL(cfg80211_unlink_bss);
473 474
475void cfg80211_hold_bss(struct cfg80211_bss *pub)
476{
477 struct cfg80211_internal_bss *bss;
478
479 if (!pub)
480 return;
481
482 bss = container_of(pub, struct cfg80211_internal_bss, pub);
483 bss->hold = true;
484}
485EXPORT_SYMBOL(cfg80211_hold_bss);
486
487void cfg80211_unhold_bss(struct cfg80211_bss *pub)
488{
489 struct cfg80211_internal_bss *bss;
490
491 if (!pub)
492 return;
493
494 bss = container_of(pub, struct cfg80211_internal_bss, pub);
495 bss->hold = false;
496}
497EXPORT_SYMBOL(cfg80211_unhold_bss);
498
474#ifdef CONFIG_WIRELESS_EXT 499#ifdef CONFIG_WIRELESS_EXT
475int cfg80211_wext_siwscan(struct net_device *dev, 500int cfg80211_wext_siwscan(struct net_device *dev,
476 struct iw_request_info *info, 501 struct iw_request_info *info,
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index b84a9b4fe96a..0fd1db6e95bb 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -66,6 +66,7 @@ int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info,
66 struct cfg80211_registered_device *rdev; 66 struct cfg80211_registered_device *rdev;
67 struct vif_params vifparams; 67 struct vif_params vifparams;
68 enum nl80211_iftype type; 68 enum nl80211_iftype type;
69 int ret;
69 70
70 if (!wdev) 71 if (!wdev)
71 return -EOPNOTSUPP; 72 return -EOPNOTSUPP;
@@ -96,10 +97,16 @@ int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info,
96 return -EINVAL; 97 return -EINVAL;
97 } 98 }
98 99
100 if (type == wdev->iftype)
101 return 0;
102
99 memset(&vifparams, 0, sizeof(vifparams)); 103 memset(&vifparams, 0, sizeof(vifparams));
100 104
101 return rdev->ops->change_virtual_intf(wdev->wiphy, dev->ifindex, type, 105 ret = rdev->ops->change_virtual_intf(wdev->wiphy, dev->ifindex, type,
102 NULL, &vifparams); 106 NULL, &vifparams);
107 WARN_ON(!ret && wdev->iftype != type);
108
109 return ret;
103} 110}
104EXPORT_SYMBOL(cfg80211_wext_siwmode); 111EXPORT_SYMBOL(cfg80211_wext_siwmode);
105 112
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 9ca17b1ce52e..ed80af8ca5fb 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -1035,6 +1035,12 @@ static int x25_sendmsg(struct kiocb *iocb, struct socket *sock,
1035 sx25.sx25_addr = x25->dest_addr; 1035 sx25.sx25_addr = x25->dest_addr;
1036 } 1036 }
1037 1037
1038 /* Sanity check the packet size */
1039 if (len > 65535) {
1040 rc = -EMSGSIZE;
1041 goto out;
1042 }
1043
1038 SOCK_DEBUG(sk, "x25_sendmsg: sendto: Addresses built.\n"); 1044 SOCK_DEBUG(sk, "x25_sendmsg: sendto: Addresses built.\n");
1039 1045
1040 /* Build a packet */ 1046 /* Build a packet */
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 62a5425cc6aa..82271720d970 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1615,7 +1615,7 @@ void xfrm_state_walk_done(struct xfrm_state_walk *walk)
1615 1615
1616 spin_lock_bh(&xfrm_state_lock); 1616 spin_lock_bh(&xfrm_state_lock);
1617 list_del(&walk->all); 1617 list_del(&walk->all);
1618 spin_lock_bh(&xfrm_state_lock); 1618 spin_unlock_bh(&xfrm_state_lock);
1619} 1619}
1620EXPORT_SYMBOL(xfrm_state_walk_done); 1620EXPORT_SYMBOL(xfrm_state_walk_done);
1621 1621