aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/s390/s390dbf.txt21
-rw-r--r--arch/alpha/kernel/Makefile2
-rw-r--r--arch/alpha/kernel/alpha_ksyms.c9
-rw-r--r--arch/alpha/kernel/semaphore.c224
-rw-r--r--arch/arm/kernel/Makefile2
-rw-r--r--arch/arm/kernel/semaphore.c221
-rw-r--r--arch/avr32/kernel/Makefile2
-rw-r--r--arch/avr32/kernel/semaphore.c148
-rw-r--r--arch/blackfin/Kconfig4
-rw-r--r--arch/blackfin/kernel/bfin_ksyms.c5
-rw-r--r--arch/cris/kernel/Makefile3
-rw-r--r--arch/cris/kernel/crisksyms.c7
-rw-r--r--arch/cris/kernel/semaphore.c129
-rw-r--r--arch/frv/kernel/Makefile2
-rw-r--r--arch/frv/kernel/frv_ksyms.c1
-rw-r--r--arch/frv/kernel/semaphore.c155
-rw-r--r--arch/h8300/kernel/Makefile2
-rw-r--r--arch/h8300/kernel/h8300_ksyms.c1
-rw-r--r--arch/h8300/kernel/semaphore.c132
-rw-r--r--arch/ia64/kernel/Makefile2
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c6
-rw-r--r--arch/ia64/kernel/semaphore.c165
-rw-r--r--arch/m32r/kernel/Makefile2
-rw-r--r--arch/m32r/kernel/m32r_ksyms.c5
-rw-r--r--arch/m32r/kernel/semaphore.c185
-rw-r--r--arch/m68k/kernel/Makefile2
-rw-r--r--arch/m68k/kernel/m68k_ksyms.c6
-rw-r--r--arch/m68k/kernel/semaphore.c132
-rw-r--r--arch/m68k/lib/Makefile2
-rw-r--r--arch/m68k/lib/semaphore.S53
-rw-r--r--arch/m68knommu/kernel/Makefile2
-rw-r--r--arch/m68knommu/kernel/m68k_ksyms.c6
-rw-r--r--arch/m68knommu/kernel/semaphore.c133
-rw-r--r--arch/m68knommu/lib/Makefile2
-rw-r--r--arch/m68knommu/lib/semaphore.S66
-rw-r--r--arch/mips/kernel/Makefile2
-rw-r--r--arch/mips/kernel/semaphore.c168
-rw-r--r--arch/mn10300/kernel/Makefile2
-rw-r--r--arch/mn10300/kernel/semaphore.c149
-rw-r--r--arch/parisc/kernel/Makefile2
-rw-r--r--arch/parisc/kernel/parisc_ksyms.c5
-rw-r--r--arch/parisc/kernel/semaphore.c102
-rw-r--r--arch/powerpc/kernel/Makefile2
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c1
-rw-r--r--arch/powerpc/kernel/semaphore.c135
-rw-r--r--arch/ppc/kernel/semaphore.c131
-rw-r--r--arch/s390/Kconfig33
-rw-r--r--arch/s390/crypto/aes_s390.c8
-rw-r--r--arch/s390/crypto/des_s390.c8
-rw-r--r--arch/s390/crypto/sha1_s390.c8
-rw-r--r--arch/s390/crypto/sha256_s390.c8
-rw-r--r--arch/s390/defconfig1
-rw-r--r--arch/s390/kernel/Makefile4
-rw-r--r--arch/s390/kernel/compat_linux.h73
-rw-r--r--arch/s390/kernel/compat_signal.c11
-rw-r--r--arch/s390/kernel/debug.c53
-rw-r--r--arch/s390/kernel/early.c1
-rw-r--r--arch/s390/kernel/entry.h60
-rw-r--r--arch/s390/kernel/entry64.S2
-rw-r--r--arch/s390/kernel/ipl.c2
-rw-r--r--arch/s390/kernel/kprobes.c2
-rw-r--r--arch/s390/kernel/process.c77
-rw-r--r--arch/s390/kernel/ptrace.c1
-rw-r--r--arch/s390/kernel/s390_ext.c14
-rw-r--r--arch/s390/kernel/s390_ksyms.c7
-rw-r--r--arch/s390/kernel/semaphore.c108
-rw-r--r--arch/s390/kernel/setup.c15
-rw-r--r--arch/s390/kernel/signal.c16
-rw-r--r--arch/s390/kernel/smp.c91
-rw-r--r--arch/s390/kernel/sys_s390.c2
-rw-r--r--arch/s390/kernel/time.c259
-rw-r--r--arch/s390/kernel/topology.c314
-rw-r--r--arch/s390/kernel/traps.c17
-rw-r--r--arch/s390/lib/delay.c14
-rw-r--r--arch/s390/lib/uaccess_pt.c59
-rw-r--r--arch/s390/mm/extmem.c67
-rw-r--r--arch/s390/mm/fault.c21
-rw-r--r--arch/s390/mm/init.c1
-rw-r--r--arch/sh/kernel/Makefile_322
-rw-r--r--arch/sh/kernel/Makefile_642
-rw-r--r--arch/sh/kernel/semaphore.c139
-rw-r--r--arch/sh/kernel/sh_ksyms_32.c7
-rw-r--r--arch/sh/kernel/sh_ksyms_64.c4
-rw-r--r--arch/sparc/kernel/Makefile2
-rw-r--r--arch/sparc/kernel/semaphore.c155
-rw-r--r--arch/sparc/kernel/sparc_ksyms.c5
-rw-r--r--arch/sparc64/kernel/Makefile2
-rw-r--r--arch/sparc64/kernel/semaphore.c254
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c6
-rw-r--r--arch/um/Kconfig.i3864
-rw-r--r--arch/um/Kconfig.x86_644
-rw-r--r--arch/um/sys-i386/ksyms.c12
-rw-r--r--arch/um/sys-ppc/Makefile8
-rw-r--r--arch/um/sys-x86_64/ksyms.c13
-rw-r--r--arch/v850/kernel/Makefile2
-rw-r--r--arch/v850/kernel/semaphore.c166
-rw-r--r--arch/v850/kernel/v850_ksyms.c7
-rw-r--r--arch/x86/Kconfig3
-rw-r--r--arch/x86/kernel/i386_ksyms_32.c5
-rw-r--r--arch/x86/kernel/x8664_ksyms_64.c6
-rw-r--r--arch/x86/lib/semaphore_32.S83
-rw-r--r--arch/x86/lib/thunk_64.S5
-rw-r--r--arch/xtensa/kernel/Makefile2
-rw-r--r--arch/xtensa/kernel/semaphore.c226
-rw-r--r--arch/xtensa/kernel/xtensa_ksyms.c9
-rw-r--r--drivers/acpi/osl.c89
-rw-r--r--drivers/char/hw_random/core.c4
-rw-r--r--drivers/crypto/Kconfig1
-rw-r--r--drivers/infiniband/core/cm.c63
-rw-r--r--drivers/infiniband/core/cma.c2
-rw-r--r--drivers/infiniband/core/fmr_pool.c3
-rw-r--r--drivers/infiniband/core/ucma.c2
-rw-r--r--drivers/infiniband/core/uverbs.h4
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c14
-rw-r--r--drivers/infiniband/core/uverbs_main.c28
-rw-r--r--drivers/infiniband/core/verbs.c14
-rw-r--r--drivers/infiniband/hw/amso1100/c2.c80
-rw-r--r--drivers/infiniband/hw/amso1100/c2.h16
-rw-r--r--drivers/infiniband/hw/amso1100/c2_ae.c10
-rw-r--r--drivers/infiniband/hw/amso1100/c2_alloc.c12
-rw-r--r--drivers/infiniband/hw/amso1100/c2_cq.c4
-rw-r--r--drivers/infiniband/hw/amso1100/c2_intr.c6
-rw-r--r--drivers/infiniband/hw/amso1100/c2_mm.c2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_mq.c4
-rw-r--r--drivers/infiniband/hw/amso1100/c2_mq.h2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.c85
-rw-r--r--drivers/infiniband/hw/amso1100/c2_qp.c30
-rw-r--r--drivers/infiniband/hw/amso1100/c2_rnic.c31
-rw-r--r--drivers/infiniband/hw/amso1100/c2_vq.c2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_wr.h212
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_dbg.c24
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c84
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_resource.c12
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.h2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c166
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.h4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cq.c4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_ev.c12
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_mem.c6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c79
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.h4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c42
-rw-r--r--drivers/infiniband/hw/ehca/ehca_av.c31
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c19
-rw-r--r--drivers/infiniband/hw/ehca/ehca_hca.c129
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c19
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c42
-rw-r--r--drivers/infiniband/hw/ehca/ehca_pd.c11
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c51
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_tools.h16
-rw-r--r--drivers/infiniband/hw/ehca/ehca_uverbs.c19
-rw-r--r--drivers/infiniband/hw/ipath/Makefile3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_7220.h57
-rw-r--r--drivers/infiniband/hw/ipath/ipath_common.h54
-rw-r--r--drivers/infiniband/hw/ipath/ipath_debug.h2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_diag.c35
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c1041
-rw-r--r--drivers/infiniband/hw/ipath/ipath_eeprom.c428
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c176
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6110.c51
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6120.c203
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba7220.c2571
-rw-r--r--drivers/infiniband/hw/ipath/ipath_init_chip.c312
-rw-r--r--drivers/infiniband/hw/ipath/ipath_intr.c656
-rw-r--r--drivers/infiniband/hw/ipath/ipath_kernel.h304
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mad.c110
-rw-r--r--drivers/infiniband/hw/ipath/ipath_qp.c59
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c67
-rw-r--r--drivers/infiniband/hw/ipath/ipath_registers.h168
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ruc.c22
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sd7220.c1462
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sd7220_img.c1082
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sdma.c790
-rw-r--r--drivers/infiniband/hw/ipath/ipath_srq.c5
-rw-r--r--drivers/infiniband/hw/ipath/ipath_stats.c33
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sysfs.c104
-rw-r--r--drivers/infiniband/hw/ipath/ipath_uc.c8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ud.c7
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_sdma.c879
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_sdma.h54
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c413
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.h32
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c319
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c2
-rw-r--r--drivers/infiniband/hw/mlx4/main.c25
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h15
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c117
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.h1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c14
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h14
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c15
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_profile.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_profile.h2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c28
-rw-r--r--drivers/infiniband/hw/mthca/mthca_wqe.h18
-rw-r--r--drivers/infiniband/hw/nes/nes.c15
-rw-r--r--drivers/infiniband/hw/nes/nes.h32
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c131
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.h35
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c49
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c26
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c29
-rw-r--r--drivers/infiniband/ulp/ipoib/Makefile3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h10
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c15
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c99
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c126
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c33
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c3
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c4
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c5
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c7
-rw-r--r--drivers/net/mlx4/catas.c2
-rw-r--r--drivers/net/mlx4/cmd.c3
-rw-r--r--drivers/net/mlx4/cq.c72
-rw-r--r--drivers/net/mlx4/eq.c5
-rw-r--r--drivers/net/mlx4/fw.c13
-rw-r--r--drivers/net/mlx4/fw.h1
-rw-r--r--drivers/net/mlx4/intf.c8
-rw-r--r--drivers/net/mlx4/main.c6
-rw-r--r--drivers/net/mlx4/mcg.c12
-rw-r--r--drivers/net/mlx4/mlx4.h4
-rw-r--r--drivers/s390/block/Kconfig1
-rw-r--r--drivers/s390/block/dasd.c10
-rw-r--r--drivers/s390/block/dasd_3990_erp.c34
-rw-r--r--drivers/s390/block/dasd_alias.c49
-rw-r--r--drivers/s390/block/dasd_eckd.c7
-rw-r--r--drivers/s390/block/dasd_fba.c3
-rw-r--r--drivers/s390/block/dasd_int.h6
-rw-r--r--drivers/s390/block/dcssblk.c53
-rw-r--r--drivers/s390/char/monreader.c54
-rw-r--r--drivers/s390/char/sclp_tty.c2
-rw-r--r--drivers/s390/char/sclp_vt220.c13
-rw-r--r--drivers/s390/char/tape_34xx.c2
-rw-r--r--drivers/s390/char/tape_char.c4
-rw-r--r--drivers/s390/char/tape_class.c5
-rw-r--r--drivers/s390/char/tape_class.h2
-rw-r--r--drivers/s390/char/vmur.c24
-rw-r--r--drivers/s390/char/vmur.h4
-rw-r--r--drivers/s390/char/vmwatchdog.c4
-rw-r--r--drivers/s390/char/zcore.c2
-rw-r--r--drivers/s390/cio/chsc.c15
-rw-r--r--drivers/s390/cio/cio.c30
-rw-r--r--drivers/s390/cio/cio.h1
-rw-r--r--drivers/s390/cio/css.c16
-rw-r--r--drivers/s390/cio/css.h1
-rw-r--r--drivers/s390/cio/device.c1
-rw-r--r--drivers/s390/cio/device.h1
-rw-r--r--drivers/s390/cio/device_ops.c9
-rw-r--r--drivers/s390/cio/device_status.c8
-rw-r--r--drivers/s390/cio/qdio.c180
-rw-r--r--drivers/s390/cio/qdio.h28
-rw-r--r--drivers/s390/crypto/ap_bus.c189
-rw-r--r--drivers/s390/crypto/ap_bus.h15
-rw-r--r--drivers/s390/crypto/zcrypt_api.c181
-rw-r--r--drivers/s390/crypto/zcrypt_api.h16
-rw-r--r--drivers/s390/crypto/zcrypt_cca_key.h4
-rw-r--r--drivers/s390/crypto/zcrypt_error.h2
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.c4
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c201
-rw-r--r--drivers/s390/net/claw.c344
-rw-r--r--drivers/s390/net/netiucv.c97
-rw-r--r--drivers/s390/s390mach.c8
-rw-r--r--drivers/s390/s390mach.h4
-rw-r--r--drivers/s390/scsi/zfcp_def.h2
-rw-r--r--drivers/s390/sysinfo.c116
-rw-r--r--include/asm-alpha/semaphore.h150
-rw-r--r--include/asm-arm/semaphore-helper.h84
-rw-r--r--include/asm-arm/semaphore.h99
-rw-r--r--include/asm-avr32/semaphore.h109
-rw-r--r--include/asm-blackfin/semaphore-helper.h82
-rw-r--r--include/asm-blackfin/semaphore.h106
-rw-r--r--include/asm-cris/semaphore-helper.h78
-rw-r--r--include/asm-cris/semaphore.h134
-rw-r--r--include/asm-frv/semaphore.h156
-rw-r--r--include/asm-h8300/semaphore-helper.h85
-rw-r--r--include/asm-h8300/semaphore.h191
-rw-r--r--include/asm-ia64/semaphore.h100
-rw-r--r--include/asm-m32r/semaphore.h145
-rw-r--r--include/asm-m68k/semaphore-helper.h142
-rw-r--r--include/asm-m68k/semaphore.h164
-rw-r--r--include/asm-m68knommu/semaphore-helper.h82
-rw-r--r--include/asm-m68knommu/semaphore.h154
-rw-r--r--include/asm-mips/semaphore.h109
-rw-r--r--include/asm-mn10300/semaphore.h170
-rw-r--r--include/asm-parisc/semaphore-helper.h89
-rw-r--r--include/asm-parisc/semaphore.h146
-rw-r--r--include/asm-powerpc/semaphore.h95
-rw-r--r--include/asm-s390/cio.h4
-rw-r--r--include/asm-s390/cpu.h8
-rw-r--r--include/asm-s390/debug.h5
-rw-r--r--include/asm-s390/extmem.h11
-rw-r--r--include/asm-s390/hardirq.h2
-rw-r--r--include/asm-s390/lowcore.h11
-rw-r--r--include/asm-s390/processor.h7
-rw-r--r--include/asm-s390/semaphore.h108
-rw-r--r--include/asm-s390/smp.h3
-rw-r--r--include/asm-s390/sysinfo.h116
-rw-r--r--include/asm-s390/system.h2
-rw-r--r--include/asm-s390/timex.h13
-rw-r--r--include/asm-s390/tlbflush.h36
-rw-r--r--include/asm-s390/topology.h23
-rw-r--r--include/asm-sh/semaphore-helper.h89
-rw-r--r--include/asm-sh/semaphore.h116
-rw-r--r--include/asm-sparc/semaphore.h193
-rw-r--r--include/asm-sparc64/semaphore.h54
-rw-r--r--include/asm-um/semaphore.h7
-rw-r--r--include/asm-v850/semaphore.h85
-rw-r--r--include/asm-x86/semaphore.h6
-rw-r--r--include/asm-x86/semaphore_32.h175
-rw-r--r--include/asm-x86/semaphore_64.h180
-rw-r--r--include/asm-xtensa/semaphore.h100
-rw-r--r--include/linux/interrupt.h19
-rw-r--r--include/linux/irq.h10
-rw-r--r--include/linux/mlx4/cmd.h2
-rw-r--r--include/linux/mlx4/cq.h19
-rw-r--r--include/linux/mlx4/device.h1
-rw-r--r--include/linux/mlx4/driver.h3
-rw-r--r--include/linux/mlx4/qp.h15
-rw-r--r--include/linux/quota.h8
-rw-r--r--include/linux/semaphore.h51
-rw-r--r--include/linux/slub_def.h2
-rw-r--r--include/rdma/ib_user_verbs.h5
-rw-r--r--include/rdma/ib_verbs.h35
-rw-r--r--init/Kconfig2
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/cgroup.c7
-rw-r--r--kernel/semaphore.c264
-rw-r--r--kernel/signal.c71
-rw-r--r--kernel/time/tick-broadcast.c2
-rw-r--r--kernel/time/tick-common.c4
-rw-r--r--kernel/time/tick-oneshot.c2
-rw-r--r--lib/Kconfig.debug12
-rw-r--r--lib/Makefile1
-rw-r--r--lib/kernel_lock.c1
-rw-r--r--lib/semaphore-sleepers.c176
-rw-r--r--mm/slub.c97
-rw-r--r--net/sunrpc/xprtrdma/verbs.c1
-rw-r--r--security/Kconfig10
-rw-r--r--security/commoncap.c4
-rw-r--r--security/keys/internal.h8
-rw-r--r--security/root_plug.c2
-rw-r--r--security/security.c8
-rw-r--r--security/selinux/Kconfig2
-rw-r--r--security/selinux/Makefile1
-rw-r--r--security/selinux/avc.c13
-rw-r--r--security/selinux/hooks.c165
-rw-r--r--security/selinux/include/av_perm_to_string.h5
-rw-r--r--security/selinux/include/av_permissions.h5
-rw-r--r--security/selinux/include/netlabel.h16
-rw-r--r--security/selinux/include/netport.h31
-rw-r--r--security/selinux/include/objsec.h15
-rw-r--r--security/selinux/include/security.h15
-rw-r--r--security/selinux/netlabel.c82
-rw-r--r--security/selinux/netlink.c2
-rw-r--r--security/selinux/netport.c286
-rw-r--r--security/selinux/selinuxfs.c11
-rw-r--r--security/selinux/ss/avtab.c40
-rw-r--r--security/selinux/ss/conditional.c16
-rw-r--r--security/selinux/ss/ebitmap.c14
-rw-r--r--security/selinux/ss/policydb.c65
-rw-r--r--security/selinux/ss/policydb.h2
-rw-r--r--security/selinux/ss/services.c69
372 files changed, 15045 insertions, 11499 deletions
diff --git a/Documentation/s390/s390dbf.txt b/Documentation/s390/s390dbf.txt
index 0eb7c58916de..e05420973698 100644
--- a/Documentation/s390/s390dbf.txt
+++ b/Documentation/s390/s390dbf.txt
@@ -115,6 +115,27 @@ Return Value: Handle for generated debug area
115Description: Allocates memory for a debug log 115Description: Allocates memory for a debug log
116 Must not be called within an interrupt handler 116 Must not be called within an interrupt handler
117 117
118----------------------------------------------------------------------------
119debug_info_t *debug_register_mode(char *name, int pages, int nr_areas,
120 int buf_size, mode_t mode, uid_t uid,
121 gid_t gid);
122
123Parameter: name: Name of debug log (e.g. used for debugfs entry)
124 pages: Number of pages, which will be allocated per area
125 nr_areas: Number of debug areas
126 buf_size: Size of data area in each debug entry
127 mode: File mode for debugfs files. E.g. S_IRWXUGO
128 uid: User ID for debugfs files. Currently only 0 is
129 supported.
130 gid: Group ID for debugfs files. Currently only 0 is
131 supported.
132
133Return Value: Handle for generated debug area
134 NULL if register failed
135
136Description: Allocates memory for a debug log
137 Must not be called within an interrupt handler
138
118--------------------------------------------------------------------------- 139---------------------------------------------------------------------------
119void debug_unregister (debug_info_t * id); 140void debug_unregister (debug_info_t * id);
120 141
diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile
index dccf05245d4d..ac706c1d7ada 100644
--- a/arch/alpha/kernel/Makefile
+++ b/arch/alpha/kernel/Makefile
@@ -7,7 +7,7 @@ EXTRA_AFLAGS := $(KBUILD_CFLAGS)
7EXTRA_CFLAGS := -Werror -Wno-sign-compare 7EXTRA_CFLAGS := -Werror -Wno-sign-compare
8 8
9obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \ 9obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \
10 irq_alpha.o signal.o setup.o ptrace.o time.o semaphore.o \ 10 irq_alpha.o signal.o setup.o ptrace.o time.o \
11 alpha_ksyms.o systbls.o err_common.o io.o 11 alpha_ksyms.o systbls.o err_common.o io.o
12 12
13obj-$(CONFIG_VGA_HOSE) += console.o 13obj-$(CONFIG_VGA_HOSE) += console.o
diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c
index e9762a33b043..d96e742d4dc2 100644
--- a/arch/alpha/kernel/alpha_ksyms.c
+++ b/arch/alpha/kernel/alpha_ksyms.c
@@ -77,15 +77,6 @@ EXPORT_SYMBOL(__do_clear_user);
77EXPORT_SYMBOL(__strncpy_from_user); 77EXPORT_SYMBOL(__strncpy_from_user);
78EXPORT_SYMBOL(__strnlen_user); 78EXPORT_SYMBOL(__strnlen_user);
79 79
80/* Semaphore helper functions. */
81EXPORT_SYMBOL(__down_failed);
82EXPORT_SYMBOL(__down_failed_interruptible);
83EXPORT_SYMBOL(__up_wakeup);
84EXPORT_SYMBOL(down);
85EXPORT_SYMBOL(down_interruptible);
86EXPORT_SYMBOL(down_trylock);
87EXPORT_SYMBOL(up);
88
89/* 80/*
90 * SMP-specific symbols. 81 * SMP-specific symbols.
91 */ 82 */
diff --git a/arch/alpha/kernel/semaphore.c b/arch/alpha/kernel/semaphore.c
deleted file mode 100644
index 8d2982aa1b8d..000000000000
--- a/arch/alpha/kernel/semaphore.c
+++ /dev/null
@@ -1,224 +0,0 @@
1/*
2 * Alpha semaphore implementation.
3 *
4 * (C) Copyright 1996 Linus Torvalds
5 * (C) Copyright 1999, 2000 Richard Henderson
6 */
7
8#include <linux/errno.h>
9#include <linux/sched.h>
10#include <linux/init.h>
11
12/*
13 * This is basically the PPC semaphore scheme ported to use
14 * the Alpha ll/sc sequences, so see the PPC code for
15 * credits.
16 */
17
18/*
19 * Atomically update sem->count.
20 * This does the equivalent of the following:
21 *
22 * old_count = sem->count;
23 * tmp = MAX(old_count, 0) + incr;
24 * sem->count = tmp;
25 * return old_count;
26 */
27static inline int __sem_update_count(struct semaphore *sem, int incr)
28{
29 long old_count, tmp = 0;
30
31 __asm__ __volatile__(
32 "1: ldl_l %0,%2\n"
33 " cmovgt %0,%0,%1\n"
34 " addl %1,%3,%1\n"
35 " stl_c %1,%2\n"
36 " beq %1,2f\n"
37 " mb\n"
38 ".subsection 2\n"
39 "2: br 1b\n"
40 ".previous"
41 : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
42 : "Ir" (incr), "1" (tmp), "m" (sem->count));
43
44 return old_count;
45}
46
47/*
48 * Perform the "down" function. Return zero for semaphore acquired,
49 * return negative for signalled out of the function.
50 *
51 * If called from down, the return is ignored and the wait loop is
52 * not interruptible. This means that a task waiting on a semaphore
53 * using "down()" cannot be killed until someone does an "up()" on
54 * the semaphore.
55 *
56 * If called from down_interruptible, the return value gets checked
57 * upon return. If the return value is negative then the task continues
58 * with the negative value in the return register (it can be tested by
59 * the caller).
60 *
61 * Either form may be used in conjunction with "up()".
62 */
63
64void __sched
65__down_failed(struct semaphore *sem)
66{
67 struct task_struct *tsk = current;
68 DECLARE_WAITQUEUE(wait, tsk);
69
70#ifdef CONFIG_DEBUG_SEMAPHORE
71 printk("%s(%d): down failed(%p)\n",
72 tsk->comm, task_pid_nr(tsk), sem);
73#endif
74
75 tsk->state = TASK_UNINTERRUPTIBLE;
76 wmb();
77 add_wait_queue_exclusive(&sem->wait, &wait);
78
79 /*
80 * Try to get the semaphore. If the count is > 0, then we've
81 * got the semaphore; we decrement count and exit the loop.
82 * If the count is 0 or negative, we set it to -1, indicating
83 * that we are asleep, and then sleep.
84 */
85 while (__sem_update_count(sem, -1) <= 0) {
86 schedule();
87 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
88 }
89 remove_wait_queue(&sem->wait, &wait);
90 tsk->state = TASK_RUNNING;
91
92 /*
93 * If there are any more sleepers, wake one of them up so
94 * that it can either get the semaphore, or set count to -1
95 * indicating that there are still processes sleeping.
96 */
97 wake_up(&sem->wait);
98
99#ifdef CONFIG_DEBUG_SEMAPHORE
100 printk("%s(%d): down acquired(%p)\n",
101 tsk->comm, task_pid_nr(tsk), sem);
102#endif
103}
104
105int __sched
106__down_failed_interruptible(struct semaphore *sem)
107{
108 struct task_struct *tsk = current;
109 DECLARE_WAITQUEUE(wait, tsk);
110 long ret = 0;
111
112#ifdef CONFIG_DEBUG_SEMAPHORE
113 printk("%s(%d): down failed(%p)\n",
114 tsk->comm, task_pid_nr(tsk), sem);
115#endif
116
117 tsk->state = TASK_INTERRUPTIBLE;
118 wmb();
119 add_wait_queue_exclusive(&sem->wait, &wait);
120
121 while (__sem_update_count(sem, -1) <= 0) {
122 if (signal_pending(current)) {
123 /*
124 * A signal is pending - give up trying.
125 * Set sem->count to 0 if it is negative,
126 * since we are no longer sleeping.
127 */
128 __sem_update_count(sem, 0);
129 ret = -EINTR;
130 break;
131 }
132 schedule();
133 set_task_state(tsk, TASK_INTERRUPTIBLE);
134 }
135
136 remove_wait_queue(&sem->wait, &wait);
137 tsk->state = TASK_RUNNING;
138 wake_up(&sem->wait);
139
140#ifdef CONFIG_DEBUG_SEMAPHORE
141 printk("%s(%d): down %s(%p)\n",
142 current->comm, task_pid_nr(current),
143 (ret < 0 ? "interrupted" : "acquired"), sem);
144#endif
145 return ret;
146}
147
148void
149__up_wakeup(struct semaphore *sem)
150{
151 /*
152 * Note that we incremented count in up() before we came here,
153 * but that was ineffective since the result was <= 0, and
154 * any negative value of count is equivalent to 0.
155 * This ends up setting count to 1, unless count is now > 0
156 * (i.e. because some other cpu has called up() in the meantime),
157 * in which case we just increment count.
158 */
159 __sem_update_count(sem, 1);
160 wake_up(&sem->wait);
161}
162
163void __sched
164down(struct semaphore *sem)
165{
166#ifdef WAITQUEUE_DEBUG
167 CHECK_MAGIC(sem->__magic);
168#endif
169#ifdef CONFIG_DEBUG_SEMAPHORE
170 printk("%s(%d): down(%p) <count=%d> from %p\n",
171 current->comm, task_pid_nr(current), sem,
172 atomic_read(&sem->count), __builtin_return_address(0));
173#endif
174 __down(sem);
175}
176
177int __sched
178down_interruptible(struct semaphore *sem)
179{
180#ifdef WAITQUEUE_DEBUG
181 CHECK_MAGIC(sem->__magic);
182#endif
183#ifdef CONFIG_DEBUG_SEMAPHORE
184 printk("%s(%d): down(%p) <count=%d> from %p\n",
185 current->comm, task_pid_nr(current), sem,
186 atomic_read(&sem->count), __builtin_return_address(0));
187#endif
188 return __down_interruptible(sem);
189}
190
191int
192down_trylock(struct semaphore *sem)
193{
194 int ret;
195
196#ifdef WAITQUEUE_DEBUG
197 CHECK_MAGIC(sem->__magic);
198#endif
199
200 ret = __down_trylock(sem);
201
202#ifdef CONFIG_DEBUG_SEMAPHORE
203 printk("%s(%d): down_trylock %s from %p\n",
204 current->comm, task_pid_nr(current),
205 ret ? "failed" : "acquired",
206 __builtin_return_address(0));
207#endif
208
209 return ret;
210}
211
212void
213up(struct semaphore *sem)
214{
215#ifdef WAITQUEUE_DEBUG
216 CHECK_MAGIC(sem->__magic);
217#endif
218#ifdef CONFIG_DEBUG_SEMAPHORE
219 printk("%s(%d): up(%p) <count=%d> from %p\n",
220 current->comm, task_pid_nr(current), sem,
221 atomic_read(&sem->count), __builtin_return_address(0));
222#endif
223 __up(sem);
224}
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 00d44c6fbfe9..6235f72a14f0 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -7,7 +7,7 @@ AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
7# Object file lists. 7# Object file lists.
8 8
9obj-y := compat.o entry-armv.o entry-common.o irq.o \ 9obj-y := compat.o entry-armv.o entry-common.o irq.o \
10 process.o ptrace.o semaphore.o setup.o signal.o \ 10 process.o ptrace.o setup.o signal.o \
11 sys_arm.o stacktrace.o time.o traps.o 11 sys_arm.o stacktrace.o time.o traps.o
12 12
13obj-$(CONFIG_ISA_DMA_API) += dma.o 13obj-$(CONFIG_ISA_DMA_API) += dma.o
diff --git a/arch/arm/kernel/semaphore.c b/arch/arm/kernel/semaphore.c
deleted file mode 100644
index 981fe5c6ccbe..000000000000
--- a/arch/arm/kernel/semaphore.c
+++ /dev/null
@@ -1,221 +0,0 @@
1/*
2 * ARM semaphore implementation, taken from
3 *
4 * i386 semaphore implementation.
5 *
6 * (C) Copyright 1999 Linus Torvalds
7 *
8 * Modified for ARM by Russell King
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/errno.h>
17#include <linux/init.h>
18
19#include <asm/semaphore.h>
20
21/*
22 * Semaphores are implemented using a two-way counter:
23 * The "count" variable is decremented for each process
24 * that tries to acquire the semaphore, while the "sleeping"
25 * variable is a count of such acquires.
26 *
27 * Notably, the inline "up()" and "down()" functions can
28 * efficiently test if they need to do any extra work (up
29 * needs to do something only if count was negative before
30 * the increment operation.
31 *
32 * "sleeping" and the contention routine ordering is
33 * protected by the semaphore spinlock.
34 *
35 * Note that these functions are only called when there is
36 * contention on the lock, and as such all this is the
37 * "non-critical" part of the whole semaphore business. The
38 * critical part is the inline stuff in <asm/semaphore.h>
39 * where we want to avoid any extra jumps and calls.
40 */
41
42/*
43 * Logic:
44 * - only on a boundary condition do we need to care. When we go
45 * from a negative count to a non-negative, we wake people up.
46 * - when we go from a non-negative count to a negative do we
47 * (a) synchronize with the "sleeper" count and (b) make sure
48 * that we're on the wakeup list before we synchronize so that
49 * we cannot lose wakeup events.
50 */
51
52void __up(struct semaphore *sem)
53{
54 wake_up(&sem->wait);
55}
56
57static DEFINE_SPINLOCK(semaphore_lock);
58
59void __sched __down(struct semaphore * sem)
60{
61 struct task_struct *tsk = current;
62 DECLARE_WAITQUEUE(wait, tsk);
63 tsk->state = TASK_UNINTERRUPTIBLE;
64 add_wait_queue_exclusive(&sem->wait, &wait);
65
66 spin_lock_irq(&semaphore_lock);
67 sem->sleepers++;
68 for (;;) {
69 int sleepers = sem->sleepers;
70
71 /*
72 * Add "everybody else" into it. They aren't
73 * playing, because we own the spinlock.
74 */
75 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
76 sem->sleepers = 0;
77 break;
78 }
79 sem->sleepers = 1; /* us - see -1 above */
80 spin_unlock_irq(&semaphore_lock);
81
82 schedule();
83 tsk->state = TASK_UNINTERRUPTIBLE;
84 spin_lock_irq(&semaphore_lock);
85 }
86 spin_unlock_irq(&semaphore_lock);
87 remove_wait_queue(&sem->wait, &wait);
88 tsk->state = TASK_RUNNING;
89 wake_up(&sem->wait);
90}
91
92int __sched __down_interruptible(struct semaphore * sem)
93{
94 int retval = 0;
95 struct task_struct *tsk = current;
96 DECLARE_WAITQUEUE(wait, tsk);
97 tsk->state = TASK_INTERRUPTIBLE;
98 add_wait_queue_exclusive(&sem->wait, &wait);
99
100 spin_lock_irq(&semaphore_lock);
101 sem->sleepers ++;
102 for (;;) {
103 int sleepers = sem->sleepers;
104
105 /*
106 * With signals pending, this turns into
107 * the trylock failure case - we won't be
108 * sleeping, and we* can't get the lock as
109 * it has contention. Just correct the count
110 * and exit.
111 */
112 if (signal_pending(current)) {
113 retval = -EINTR;
114 sem->sleepers = 0;
115 atomic_add(sleepers, &sem->count);
116 break;
117 }
118
119 /*
120 * Add "everybody else" into it. They aren't
121 * playing, because we own the spinlock. The
122 * "-1" is because we're still hoping to get
123 * the lock.
124 */
125 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
126 sem->sleepers = 0;
127 break;
128 }
129 sem->sleepers = 1; /* us - see -1 above */
130 spin_unlock_irq(&semaphore_lock);
131
132 schedule();
133 tsk->state = TASK_INTERRUPTIBLE;
134 spin_lock_irq(&semaphore_lock);
135 }
136 spin_unlock_irq(&semaphore_lock);
137 tsk->state = TASK_RUNNING;
138 remove_wait_queue(&sem->wait, &wait);
139 wake_up(&sem->wait);
140 return retval;
141}
142
143/*
144 * Trylock failed - make sure we correct for
145 * having decremented the count.
146 *
147 * We could have done the trylock with a
148 * single "cmpxchg" without failure cases,
149 * but then it wouldn't work on a 386.
150 */
151int __down_trylock(struct semaphore * sem)
152{
153 int sleepers;
154 unsigned long flags;
155
156 spin_lock_irqsave(&semaphore_lock, flags);
157 sleepers = sem->sleepers + 1;
158 sem->sleepers = 0;
159
160 /*
161 * Add "everybody else" and us into it. They aren't
162 * playing, because we own the spinlock.
163 */
164 if (!atomic_add_negative(sleepers, &sem->count))
165 wake_up(&sem->wait);
166
167 spin_unlock_irqrestore(&semaphore_lock, flags);
168 return 1;
169}
170
171/*
172 * The semaphore operations have a special calling sequence that
173 * allow us to do a simpler in-line version of them. These routines
174 * need to convert that sequence back into the C sequence when
175 * there is contention on the semaphore.
176 *
177 * ip contains the semaphore pointer on entry. Save the C-clobbered
178 * registers (r0 to r3 and lr), but not ip, as we use it as a return
179 * value in some cases..
180 * To remain AAPCS compliant (64-bit stack align) we save r4 as well.
181 */
182asm(" .section .sched.text,\"ax\",%progbits \n\
183 .align 5 \n\
184 .globl __down_failed \n\
185__down_failed: \n\
186 stmfd sp!, {r0 - r4, lr} \n\
187 mov r0, ip \n\
188 bl __down \n\
189 ldmfd sp!, {r0 - r4, pc} \n\
190 \n\
191 .align 5 \n\
192 .globl __down_interruptible_failed \n\
193__down_interruptible_failed: \n\
194 stmfd sp!, {r0 - r4, lr} \n\
195 mov r0, ip \n\
196 bl __down_interruptible \n\
197 mov ip, r0 \n\
198 ldmfd sp!, {r0 - r4, pc} \n\
199 \n\
200 .align 5 \n\
201 .globl __down_trylock_failed \n\
202__down_trylock_failed: \n\
203 stmfd sp!, {r0 - r4, lr} \n\
204 mov r0, ip \n\
205 bl __down_trylock \n\
206 mov ip, r0 \n\
207 ldmfd sp!, {r0 - r4, pc} \n\
208 \n\
209 .align 5 \n\
210 .globl __up_wakeup \n\
211__up_wakeup: \n\
212 stmfd sp!, {r0 - r4, lr} \n\
213 mov r0, ip \n\
214 bl __up \n\
215 ldmfd sp!, {r0 - r4, pc} \n\
216 ");
217
218EXPORT_SYMBOL(__down_failed);
219EXPORT_SYMBOL(__down_interruptible_failed);
220EXPORT_SYMBOL(__down_trylock_failed);
221EXPORT_SYMBOL(__up_wakeup);
diff --git a/arch/avr32/kernel/Makefile b/arch/avr32/kernel/Makefile
index e4b6d122b033..18229d0d1861 100644
--- a/arch/avr32/kernel/Makefile
+++ b/arch/avr32/kernel/Makefile
@@ -6,7 +6,7 @@ extra-y := head.o vmlinux.lds
6 6
7obj-$(CONFIG_SUBARCH_AVR32B) += entry-avr32b.o 7obj-$(CONFIG_SUBARCH_AVR32B) += entry-avr32b.o
8obj-y += syscall_table.o syscall-stubs.o irq.o 8obj-y += syscall_table.o syscall-stubs.o irq.o
9obj-y += setup.o traps.o semaphore.o ocd.o ptrace.o 9obj-y += setup.o traps.o ocd.o ptrace.o
10obj-y += signal.o sys_avr32.o process.o time.o 10obj-y += signal.o sys_avr32.o process.o time.o
11obj-y += init_task.o switch_to.o cpu.o 11obj-y += init_task.o switch_to.o cpu.o
12obj-$(CONFIG_MODULES) += module.o avr32_ksyms.o 12obj-$(CONFIG_MODULES) += module.o avr32_ksyms.o
diff --git a/arch/avr32/kernel/semaphore.c b/arch/avr32/kernel/semaphore.c
deleted file mode 100644
index 1e2705a05016..000000000000
--- a/arch/avr32/kernel/semaphore.c
+++ /dev/null
@@ -1,148 +0,0 @@
1/*
2 * AVR32 sempahore implementation.
3 *
4 * Copyright (C) 2004-2006 Atmel Corporation
5 *
6 * Based on linux/arch/i386/kernel/semaphore.c
7 * Copyright (C) 1999 Linus Torvalds
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/sched.h>
15#include <linux/errno.h>
16#include <linux/module.h>
17
18#include <asm/semaphore.h>
19#include <asm/atomic.h>
20
21/*
22 * Semaphores are implemented using a two-way counter:
23 * The "count" variable is decremented for each process
24 * that tries to acquire the semaphore, while the "sleeping"
25 * variable is a count of such acquires.
26 *
27 * Notably, the inline "up()" and "down()" functions can
28 * efficiently test if they need to do any extra work (up
29 * needs to do something only if count was negative before
30 * the increment operation.
31 *
32 * "sleeping" and the contention routine ordering is protected
33 * by the spinlock in the semaphore's waitqueue head.
34 *
35 * Note that these functions are only called when there is
36 * contention on the lock, and as such all this is the
37 * "non-critical" part of the whole semaphore business. The
38 * critical part is the inline stuff in <asm/semaphore.h>
39 * where we want to avoid any extra jumps and calls.
40 */
41
42/*
43 * Logic:
44 * - only on a boundary condition do we need to care. When we go
45 * from a negative count to a non-negative, we wake people up.
46 * - when we go from a non-negative count to a negative do we
47 * (a) synchronize with the "sleeper" count and (b) make sure
48 * that we're on the wakeup list before we synchronize so that
49 * we cannot lose wakeup events.
50 */
51
52void __up(struct semaphore *sem)
53{
54 wake_up(&sem->wait);
55}
56EXPORT_SYMBOL(__up);
57
58void __sched __down(struct semaphore *sem)
59{
60 struct task_struct *tsk = current;
61 DECLARE_WAITQUEUE(wait, tsk);
62 unsigned long flags;
63
64 tsk->state = TASK_UNINTERRUPTIBLE;
65 spin_lock_irqsave(&sem->wait.lock, flags);
66 add_wait_queue_exclusive_locked(&sem->wait, &wait);
67
68 sem->sleepers++;
69 for (;;) {
70 int sleepers = sem->sleepers;
71
72 /*
73 * Add "everybody else" into it. They aren't
74 * playing, because we own the spinlock in
75 * the wait_queue_head.
76 */
77 if (atomic_add_return(sleepers - 1, &sem->count) >= 0) {
78 sem->sleepers = 0;
79 break;
80 }
81 sem->sleepers = 1; /* us - see -1 above */
82 spin_unlock_irqrestore(&sem->wait.lock, flags);
83
84 schedule();
85
86 spin_lock_irqsave(&sem->wait.lock, flags);
87 tsk->state = TASK_UNINTERRUPTIBLE;
88 }
89 remove_wait_queue_locked(&sem->wait, &wait);
90 wake_up_locked(&sem->wait);
91 spin_unlock_irqrestore(&sem->wait.lock, flags);
92 tsk->state = TASK_RUNNING;
93}
94EXPORT_SYMBOL(__down);
95
96int __sched __down_interruptible(struct semaphore *sem)
97{
98 int retval = 0;
99 struct task_struct *tsk = current;
100 DECLARE_WAITQUEUE(wait, tsk);
101 unsigned long flags;
102
103 tsk->state = TASK_INTERRUPTIBLE;
104 spin_lock_irqsave(&sem->wait.lock, flags);
105 add_wait_queue_exclusive_locked(&sem->wait, &wait);
106
107 sem->sleepers++;
108 for (;;) {
109 int sleepers = sem->sleepers;
110
111 /*
112 * With signals pending, this turns into the trylock
113 * failure case - we won't be sleeping, and we can't
114 * get the lock as it has contention. Just correct the
115 * count and exit.
116 */
117 if (signal_pending(current)) {
118 retval = -EINTR;
119 sem->sleepers = 0;
120 atomic_add(sleepers, &sem->count);
121 break;
122 }
123
124 /*
125 * Add "everybody else" into it. They aren't
126 * playing, because we own the spinlock in
127 * the wait_queue_head.
128 */
129 if (atomic_add_return(sleepers - 1, &sem->count) >= 0) {
130 sem->sleepers = 0;
131 break;
132 }
133 sem->sleepers = 1; /* us - see -1 above */
134 spin_unlock_irqrestore(&sem->wait.lock, flags);
135
136 schedule();
137
138 spin_lock_irqsave(&sem->wait.lock, flags);
139 tsk->state = TASK_INTERRUPTIBLE;
140 }
141 remove_wait_queue_locked(&sem->wait, &wait);
142 wake_up_locked(&sem->wait);
143 spin_unlock_irqrestore(&sem->wait.lock, flags);
144
145 tsk->state = TASK_RUNNING;
146 return retval;
147}
148EXPORT_SYMBOL(__down_interruptible);
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 589c6aca4803..2dd1f300a5cf 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -31,10 +31,6 @@ config ZONE_DMA
31 bool 31 bool
32 default y 32 default y
33 33
34config SEMAPHORE_SLEEPERS
35 bool
36 default y
37
38config GENERIC_FIND_NEXT_BIT 34config GENERIC_FIND_NEXT_BIT
39 bool 35 bool
40 default y 36 default y
diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c
index 0bfbb269e350..053edff6c0d8 100644
--- a/arch/blackfin/kernel/bfin_ksyms.c
+++ b/arch/blackfin/kernel/bfin_ksyms.c
@@ -42,11 +42,6 @@ EXPORT_SYMBOL(ip_fast_csum);
42 42
43EXPORT_SYMBOL(kernel_thread); 43EXPORT_SYMBOL(kernel_thread);
44 44
45EXPORT_SYMBOL(__up);
46EXPORT_SYMBOL(__down);
47EXPORT_SYMBOL(__down_trylock);
48EXPORT_SYMBOL(__down_interruptible);
49
50EXPORT_SYMBOL(is_in_rom); 45EXPORT_SYMBOL(is_in_rom);
51EXPORT_SYMBOL(bfin_return_from_exception); 46EXPORT_SYMBOL(bfin_return_from_exception);
52 47
diff --git a/arch/cris/kernel/Makefile b/arch/cris/kernel/Makefile
index c8e8ea570989..ee7bcd4d20b2 100644
--- a/arch/cris/kernel/Makefile
+++ b/arch/cris/kernel/Makefile
@@ -5,8 +5,7 @@
5 5
6extra-y := vmlinux.lds 6extra-y := vmlinux.lds
7 7
8obj-y := process.o traps.o irq.o ptrace.o setup.o \ 8obj-y := process.o traps.o irq.o ptrace.o setup.o time.o sys_cris.o
9 time.o sys_cris.o semaphore.o
10 9
11obj-$(CONFIG_MODULES) += crisksyms.o 10obj-$(CONFIG_MODULES) += crisksyms.o
12obj-$(CONFIG_MODULES) += module.o 11obj-$(CONFIG_MODULES) += module.o
diff --git a/arch/cris/kernel/crisksyms.c b/arch/cris/kernel/crisksyms.c
index 62f0e752915a..7ac000f6a888 100644
--- a/arch/cris/kernel/crisksyms.c
+++ b/arch/cris/kernel/crisksyms.c
@@ -9,7 +9,6 @@
9#include <linux/string.h> 9#include <linux/string.h>
10#include <linux/tty.h> 10#include <linux/tty.h>
11 11
12#include <asm/semaphore.h>
13#include <asm/processor.h> 12#include <asm/processor.h>
14#include <asm/uaccess.h> 13#include <asm/uaccess.h>
15#include <asm/checksum.h> 14#include <asm/checksum.h>
@@ -49,12 +48,6 @@ EXPORT_SYMBOL(__negdi2);
49EXPORT_SYMBOL(__ioremap); 48EXPORT_SYMBOL(__ioremap);
50EXPORT_SYMBOL(iounmap); 49EXPORT_SYMBOL(iounmap);
51 50
52/* Semaphore functions */
53EXPORT_SYMBOL(__up);
54EXPORT_SYMBOL(__down);
55EXPORT_SYMBOL(__down_interruptible);
56EXPORT_SYMBOL(__down_trylock);
57
58/* Userspace access functions */ 51/* Userspace access functions */
59EXPORT_SYMBOL(__copy_user_zeroing); 52EXPORT_SYMBOL(__copy_user_zeroing);
60EXPORT_SYMBOL(__copy_user); 53EXPORT_SYMBOL(__copy_user);
diff --git a/arch/cris/kernel/semaphore.c b/arch/cris/kernel/semaphore.c
deleted file mode 100644
index f137a439041f..000000000000
--- a/arch/cris/kernel/semaphore.c
+++ /dev/null
@@ -1,129 +0,0 @@
1/*
2 * Generic semaphore code. Buyer beware. Do your own
3 * specific changes in <asm/semaphore-helper.h>
4 */
5
6#include <linux/sched.h>
7#include <asm/semaphore-helper.h>
8
9/*
10 * Semaphores are implemented using a two-way counter:
11 * The "count" variable is decremented for each process
12 * that tries to sleep, while the "waking" variable is
13 * incremented when the "up()" code goes to wake up waiting
14 * processes.
15 *
16 * Notably, the inline "up()" and "down()" functions can
17 * efficiently test if they need to do any extra work (up
18 * needs to do something only if count was negative before
19 * the increment operation.
20 *
21 * waking_non_zero() (from asm/semaphore.h) must execute
22 * atomically.
23 *
24 * When __up() is called, the count was negative before
25 * incrementing it, and we need to wake up somebody.
26 *
27 * This routine adds one to the count of processes that need to
28 * wake up and exit. ALL waiting processes actually wake up but
29 * only the one that gets to the "waking" field first will gate
30 * through and acquire the semaphore. The others will go back
31 * to sleep.
32 *
33 * Note that these functions are only called when there is
34 * contention on the lock, and as such all this is the
35 * "non-critical" part of the whole semaphore business. The
36 * critical part is the inline stuff in <asm/semaphore.h>
37 * where we want to avoid any extra jumps and calls.
38 */
39void __up(struct semaphore *sem)
40{
41 wake_one_more(sem);
42 wake_up(&sem->wait);
43}
44
45/*
46 * Perform the "down" function. Return zero for semaphore acquired,
47 * return negative for signalled out of the function.
48 *
49 * If called from __down, the return is ignored and the wait loop is
50 * not interruptible. This means that a task waiting on a semaphore
51 * using "down()" cannot be killed until someone does an "up()" on
52 * the semaphore.
53 *
54 * If called from __down_interruptible, the return value gets checked
55 * upon return. If the return value is negative then the task continues
56 * with the negative value in the return register (it can be tested by
57 * the caller).
58 *
59 * Either form may be used in conjunction with "up()".
60 *
61 */
62
63#define DOWN_VAR \
64 struct task_struct *tsk = current; \
65 wait_queue_t wait; \
66 init_waitqueue_entry(&wait, tsk);
67
68#define DOWN_HEAD(task_state) \
69 \
70 \
71 tsk->state = (task_state); \
72 add_wait_queue(&sem->wait, &wait); \
73 \
74 /* \
75 * Ok, we're set up. sem->count is known to be less than zero \
76 * so we must wait. \
77 * \
78 * We can let go the lock for purposes of waiting. \
79 * We re-acquire it after awaking so as to protect \
80 * all semaphore operations. \
81 * \
82 * If "up()" is called before we call waking_non_zero() then \
83 * we will catch it right away. If it is called later then \
84 * we will have to go through a wakeup cycle to catch it. \
85 * \
86 * Multiple waiters contend for the semaphore lock to see \
87 * who gets to gate through and who has to wait some more. \
88 */ \
89 for (;;) {
90
91#define DOWN_TAIL(task_state) \
92 tsk->state = (task_state); \
93 } \
94 tsk->state = TASK_RUNNING; \
95 remove_wait_queue(&sem->wait, &wait);
96
97void __sched __down(struct semaphore * sem)
98{
99 DOWN_VAR
100 DOWN_HEAD(TASK_UNINTERRUPTIBLE)
101 if (waking_non_zero(sem))
102 break;
103 schedule();
104 DOWN_TAIL(TASK_UNINTERRUPTIBLE)
105}
106
107int __sched __down_interruptible(struct semaphore * sem)
108{
109 int ret = 0;
110 DOWN_VAR
111 DOWN_HEAD(TASK_INTERRUPTIBLE)
112
113 ret = waking_non_zero_interruptible(sem, tsk);
114 if (ret)
115 {
116 if (ret == 1)
117 /* ret != 0 only if we get interrupted -arca */
118 ret = 0;
119 break;
120 }
121 schedule();
122 DOWN_TAIL(TASK_INTERRUPTIBLE)
123 return ret;
124}
125
126int __down_trylock(struct semaphore * sem)
127{
128 return waking_non_zero_trylock(sem);
129}
diff --git a/arch/frv/kernel/Makefile b/arch/frv/kernel/Makefile
index e8f73ed28b52..c36f70b6699a 100644
--- a/arch/frv/kernel/Makefile
+++ b/arch/frv/kernel/Makefile
@@ -9,7 +9,7 @@ extra-y:= head.o init_task.o vmlinux.lds
9 9
10obj-y := $(heads-y) entry.o entry-table.o break.o switch_to.o kernel_thread.o \ 10obj-y := $(heads-y) entry.o entry-table.o break.o switch_to.o kernel_thread.o \
11 kernel_execve.o process.o traps.o ptrace.o signal.o dma.o \ 11 kernel_execve.o process.o traps.o ptrace.o signal.o dma.o \
12 sys_frv.o time.o semaphore.o setup.o frv_ksyms.o \ 12 sys_frv.o time.o setup.o frv_ksyms.o \
13 debug-stub.o irq.o sleep.o uaccess.o 13 debug-stub.o irq.o sleep.o uaccess.o
14 14
15obj-$(CONFIG_GDBSTUB) += gdb-stub.o gdb-io.o 15obj-$(CONFIG_GDBSTUB) += gdb-stub.o gdb-io.o
diff --git a/arch/frv/kernel/frv_ksyms.c b/arch/frv/kernel/frv_ksyms.c
index f772704b3d28..0316b3c50eff 100644
--- a/arch/frv/kernel/frv_ksyms.c
+++ b/arch/frv/kernel/frv_ksyms.c
@@ -12,7 +12,6 @@
12#include <asm/pgalloc.h> 12#include <asm/pgalloc.h>
13#include <asm/irq.h> 13#include <asm/irq.h>
14#include <asm/io.h> 14#include <asm/io.h>
15#include <asm/semaphore.h>
16#include <asm/checksum.h> 15#include <asm/checksum.h>
17#include <asm/hardirq.h> 16#include <asm/hardirq.h>
18#include <asm/cacheflush.h> 17#include <asm/cacheflush.h>
diff --git a/arch/frv/kernel/semaphore.c b/arch/frv/kernel/semaphore.c
deleted file mode 100644
index 7ee3a147b471..000000000000
--- a/arch/frv/kernel/semaphore.c
+++ /dev/null
@@ -1,155 +0,0 @@
1/* semaphore.c: FR-V semaphores
2 *
3 * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 * - Derived from lib/rwsem-spinlock.c
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/sched.h>
14#include <linux/module.h>
15#include <asm/semaphore.h>
16
17struct sem_waiter {
18 struct list_head list;
19 struct task_struct *task;
20};
21
22#ifdef CONFIG_DEBUG_SEMAPHORE
23void semtrace(struct semaphore *sem, const char *str)
24{
25 if (sem->debug)
26 printk("[%d] %s({%d,%d})\n",
27 current->pid,
28 str,
29 sem->counter,
30 list_empty(&sem->wait_list) ? 0 : 1);
31}
32#else
33#define semtrace(SEM,STR) do { } while(0)
34#endif
35
36/*
37 * wait for a token to be granted from a semaphore
38 * - entered with lock held and interrupts disabled
39 */
40void __down(struct semaphore *sem, unsigned long flags)
41{
42 struct task_struct *tsk = current;
43 struct sem_waiter waiter;
44
45 semtrace(sem, "Entering __down");
46
47 /* set up my own style of waitqueue */
48 waiter.task = tsk;
49 get_task_struct(tsk);
50
51 list_add_tail(&waiter.list, &sem->wait_list);
52
53 /* we don't need to touch the semaphore struct anymore */
54 spin_unlock_irqrestore(&sem->wait_lock, flags);
55
56 /* wait to be given the semaphore */
57 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
58
59 for (;;) {
60 if (list_empty(&waiter.list))
61 break;
62 schedule();
63 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
64 }
65
66 tsk->state = TASK_RUNNING;
67 semtrace(sem, "Leaving __down");
68}
69
70EXPORT_SYMBOL(__down);
71
72/*
73 * interruptibly wait for a token to be granted from a semaphore
74 * - entered with lock held and interrupts disabled
75 */
76int __down_interruptible(struct semaphore *sem, unsigned long flags)
77{
78 struct task_struct *tsk = current;
79 struct sem_waiter waiter;
80 int ret;
81
82 semtrace(sem,"Entering __down_interruptible");
83
84 /* set up my own style of waitqueue */
85 waiter.task = tsk;
86 get_task_struct(tsk);
87
88 list_add_tail(&waiter.list, &sem->wait_list);
89
90 /* we don't need to touch the semaphore struct anymore */
91 set_task_state(tsk, TASK_INTERRUPTIBLE);
92
93 spin_unlock_irqrestore(&sem->wait_lock, flags);
94
95 /* wait to be given the semaphore */
96 ret = 0;
97 for (;;) {
98 if (list_empty(&waiter.list))
99 break;
100 if (unlikely(signal_pending(current)))
101 goto interrupted;
102 schedule();
103 set_task_state(tsk, TASK_INTERRUPTIBLE);
104 }
105
106 out:
107 tsk->state = TASK_RUNNING;
108 semtrace(sem, "Leaving __down_interruptible");
109 return ret;
110
111 interrupted:
112 spin_lock_irqsave(&sem->wait_lock, flags);
113
114 if (!list_empty(&waiter.list)) {
115 list_del(&waiter.list);
116 ret = -EINTR;
117 }
118
119 spin_unlock_irqrestore(&sem->wait_lock, flags);
120 if (ret == -EINTR)
121 put_task_struct(current);
122 goto out;
123}
124
125EXPORT_SYMBOL(__down_interruptible);
126
127/*
128 * release a single token back to a semaphore
129 * - entered with lock held and interrupts disabled
130 */
131void __up(struct semaphore *sem)
132{
133 struct task_struct *tsk;
134 struct sem_waiter *waiter;
135
136 semtrace(sem,"Entering __up");
137
138 /* grant the token to the process at the front of the queue */
139 waiter = list_entry(sem->wait_list.next, struct sem_waiter, list);
140
141 /* We must be careful not to touch 'waiter' after we set ->task = NULL.
142 * It is allocated on the waiter's stack and may become invalid at
143 * any time after that point (due to a wakeup from another source).
144 */
145 list_del_init(&waiter->list);
146 tsk = waiter->task;
147 mb();
148 waiter->task = NULL;
149 wake_up_process(tsk);
150 put_task_struct(tsk);
151
152 semtrace(sem,"Leaving __up");
153}
154
155EXPORT_SYMBOL(__up);
diff --git a/arch/h8300/kernel/Makefile b/arch/h8300/kernel/Makefile
index 874f6aefee65..6c248c3c5c3b 100644
--- a/arch/h8300/kernel/Makefile
+++ b/arch/h8300/kernel/Makefile
@@ -5,7 +5,7 @@
5extra-y := vmlinux.lds 5extra-y := vmlinux.lds
6 6
7obj-y := process.o traps.o ptrace.o irq.o \ 7obj-y := process.o traps.o ptrace.o irq.o \
8 sys_h8300.o time.o semaphore.o signal.o \ 8 sys_h8300.o time.o signal.o \
9 setup.o gpio.o init_task.o syscalls.o \ 9 setup.o gpio.o init_task.o syscalls.o \
10 entry.o 10 entry.o
11 11
diff --git a/arch/h8300/kernel/h8300_ksyms.c b/arch/h8300/kernel/h8300_ksyms.c
index d1b15267ac81..6866bd9c7fb4 100644
--- a/arch/h8300/kernel/h8300_ksyms.c
+++ b/arch/h8300/kernel/h8300_ksyms.c
@@ -12,7 +12,6 @@
12#include <asm/pgalloc.h> 12#include <asm/pgalloc.h>
13#include <asm/irq.h> 13#include <asm/irq.h>
14#include <asm/io.h> 14#include <asm/io.h>
15#include <asm/semaphore.h>
16#include <asm/checksum.h> 15#include <asm/checksum.h>
17#include <asm/current.h> 16#include <asm/current.h>
18#include <asm/gpio.h> 17#include <asm/gpio.h>
diff --git a/arch/h8300/kernel/semaphore.c b/arch/h8300/kernel/semaphore.c
deleted file mode 100644
index d12cbbfe6ebd..000000000000
--- a/arch/h8300/kernel/semaphore.c
+++ /dev/null
@@ -1,132 +0,0 @@
1/*
2 * Generic semaphore code. Buyer beware. Do your own
3 * specific changes in <asm/semaphore-helper.h>
4 */
5
6#include <linux/sched.h>
7#include <linux/init.h>
8#include <asm/semaphore-helper.h>
9
10#ifndef CONFIG_RMW_INSNS
11spinlock_t semaphore_wake_lock;
12#endif
13
14/*
15 * Semaphores are implemented using a two-way counter:
16 * The "count" variable is decremented for each process
17 * that tries to sleep, while the "waking" variable is
18 * incremented when the "up()" code goes to wake up waiting
19 * processes.
20 *
21 * Notably, the inline "up()" and "down()" functions can
22 * efficiently test if they need to do any extra work (up
23 * needs to do something only if count was negative before
24 * the increment operation.
25 *
26 * waking_non_zero() (from asm/semaphore.h) must execute
27 * atomically.
28 *
29 * When __up() is called, the count was negative before
30 * incrementing it, and we need to wake up somebody.
31 *
32 * This routine adds one to the count of processes that need to
33 * wake up and exit. ALL waiting processes actually wake up but
34 * only the one that gets to the "waking" field first will gate
35 * through and acquire the semaphore. The others will go back
36 * to sleep.
37 *
38 * Note that these functions are only called when there is
39 * contention on the lock, and as such all this is the
40 * "non-critical" part of the whole semaphore business. The
41 * critical part is the inline stuff in <asm/semaphore.h>
42 * where we want to avoid any extra jumps and calls.
43 */
44void __up(struct semaphore *sem)
45{
46 wake_one_more(sem);
47 wake_up(&sem->wait);
48}
49
50/*
51 * Perform the "down" function. Return zero for semaphore acquired,
52 * return negative for signalled out of the function.
53 *
54 * If called from __down, the return is ignored and the wait loop is
55 * not interruptible. This means that a task waiting on a semaphore
56 * using "down()" cannot be killed until someone does an "up()" on
57 * the semaphore.
58 *
59 * If called from __down_interruptible, the return value gets checked
60 * upon return. If the return value is negative then the task continues
61 * with the negative value in the return register (it can be tested by
62 * the caller).
63 *
64 * Either form may be used in conjunction with "up()".
65 *
66 */
67
68
69#define DOWN_HEAD(task_state) \
70 \
71 \
72 current->state = (task_state); \
73 add_wait_queue(&sem->wait, &wait); \
74 \
75 /* \
76 * Ok, we're set up. sem->count is known to be less than zero \
77 * so we must wait. \
78 * \
79 * We can let go the lock for purposes of waiting. \
80 * We re-acquire it after awaking so as to protect \
81 * all semaphore operations. \
82 * \
83 * If "up()" is called before we call waking_non_zero() then \
84 * we will catch it right away. If it is called later then \
85 * we will have to go through a wakeup cycle to catch it. \
86 * \
87 * Multiple waiters contend for the semaphore lock to see \
88 * who gets to gate through and who has to wait some more. \
89 */ \
90 for (;;) {
91
92#define DOWN_TAIL(task_state) \
93 current->state = (task_state); \
94 } \
95 current->state = TASK_RUNNING; \
96 remove_wait_queue(&sem->wait, &wait);
97
98void __sched __down(struct semaphore * sem)
99{
100 DECLARE_WAITQUEUE(wait, current);
101
102 DOWN_HEAD(TASK_UNINTERRUPTIBLE)
103 if (waking_non_zero(sem))
104 break;
105 schedule();
106 DOWN_TAIL(TASK_UNINTERRUPTIBLE)
107}
108
109int __sched __down_interruptible(struct semaphore * sem)
110{
111 DECLARE_WAITQUEUE(wait, current);
112 int ret = 0;
113
114 DOWN_HEAD(TASK_INTERRUPTIBLE)
115
116 ret = waking_non_zero_interruptible(sem, current);
117 if (ret)
118 {
119 if (ret == 1)
120 /* ret != 0 only if we get interrupted -arca */
121 ret = 0;
122 break;
123 }
124 schedule();
125 DOWN_TAIL(TASK_INTERRUPTIBLE)
126 return ret;
127}
128
129int __down_trylock(struct semaphore * sem)
130{
131 return waking_non_zero_trylock(sem);
132}
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 33e5a598672d..13fd10e8699e 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -6,7 +6,7 @@ extra-y := head.o init_task.o vmlinux.lds
6 6
7obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ 7obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \
8 irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \ 8 irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \
9 salinfo.o semaphore.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ 9 salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \
10 unwind.o mca.o mca_asm.o topology.o 10 unwind.o mca.o mca_asm.o topology.o
11 11
12obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o 12obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index 8e7193d55528..6da1f20d7372 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -19,12 +19,6 @@ EXPORT_SYMBOL_GPL(empty_zero_page);
19EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */ 19EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */
20EXPORT_SYMBOL(csum_ipv6_magic); 20EXPORT_SYMBOL(csum_ipv6_magic);
21 21
22#include <asm/semaphore.h>
23EXPORT_SYMBOL(__down);
24EXPORT_SYMBOL(__down_interruptible);
25EXPORT_SYMBOL(__down_trylock);
26EXPORT_SYMBOL(__up);
27
28#include <asm/page.h> 22#include <asm/page.h>
29EXPORT_SYMBOL(clear_page); 23EXPORT_SYMBOL(clear_page);
30 24
diff --git a/arch/ia64/kernel/semaphore.c b/arch/ia64/kernel/semaphore.c
deleted file mode 100644
index 2724ef3fbae2..000000000000
--- a/arch/ia64/kernel/semaphore.c
+++ /dev/null
@@ -1,165 +0,0 @@
1/*
2 * IA-64 semaphore implementation (derived from x86 version).
3 *
4 * Copyright (C) 1999-2000, 2002 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 */
7
8/*
9 * Semaphores are implemented using a two-way counter: The "count"
10 * variable is decremented for each process that tries to acquire the
11 * semaphore, while the "sleepers" variable is a count of such
12 * acquires.
13 *
14 * Notably, the inline "up()" and "down()" functions can efficiently
15 * test if they need to do any extra work (up needs to do something
16 * only if count was negative before the increment operation.
17 *
18 * "sleeping" and the contention routine ordering is protected
19 * by the spinlock in the semaphore's waitqueue head.
20 *
21 * Note that these functions are only called when there is contention
22 * on the lock, and as such all this is the "non-critical" part of the
23 * whole semaphore business. The critical part is the inline stuff in
24 * <asm/semaphore.h> where we want to avoid any extra jumps and calls.
25 */
26#include <linux/sched.h>
27#include <linux/init.h>
28
29#include <asm/errno.h>
30#include <asm/semaphore.h>
31
32/*
33 * Logic:
34 * - Only on a boundary condition do we need to care. When we go
35 * from a negative count to a non-negative, we wake people up.
36 * - When we go from a non-negative count to a negative do we
37 * (a) synchronize with the "sleepers" count and (b) make sure
38 * that we're on the wakeup list before we synchronize so that
39 * we cannot lose wakeup events.
40 */
41
42void
43__up (struct semaphore *sem)
44{
45 wake_up(&sem->wait);
46}
47
48void __sched __down (struct semaphore *sem)
49{
50 struct task_struct *tsk = current;
51 DECLARE_WAITQUEUE(wait, tsk);
52 unsigned long flags;
53
54 tsk->state = TASK_UNINTERRUPTIBLE;
55 spin_lock_irqsave(&sem->wait.lock, flags);
56 add_wait_queue_exclusive_locked(&sem->wait, &wait);
57
58 sem->sleepers++;
59 for (;;) {
60 int sleepers = sem->sleepers;
61
62 /*
63 * Add "everybody else" into it. They aren't
64 * playing, because we own the spinlock in
65 * the wait_queue_head.
66 */
67 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
68 sem->sleepers = 0;
69 break;
70 }
71 sem->sleepers = 1; /* us - see -1 above */
72 spin_unlock_irqrestore(&sem->wait.lock, flags);
73
74 schedule();
75
76 spin_lock_irqsave(&sem->wait.lock, flags);
77 tsk->state = TASK_UNINTERRUPTIBLE;
78 }
79 remove_wait_queue_locked(&sem->wait, &wait);
80 wake_up_locked(&sem->wait);
81 spin_unlock_irqrestore(&sem->wait.lock, flags);
82 tsk->state = TASK_RUNNING;
83}
84
85int __sched __down_interruptible (struct semaphore * sem)
86{
87 int retval = 0;
88 struct task_struct *tsk = current;
89 DECLARE_WAITQUEUE(wait, tsk);
90 unsigned long flags;
91
92 tsk->state = TASK_INTERRUPTIBLE;
93 spin_lock_irqsave(&sem->wait.lock, flags);
94 add_wait_queue_exclusive_locked(&sem->wait, &wait);
95
96 sem->sleepers ++;
97 for (;;) {
98 int sleepers = sem->sleepers;
99
100 /*
101 * With signals pending, this turns into
102 * the trylock failure case - we won't be
103 * sleeping, and we* can't get the lock as
104 * it has contention. Just correct the count
105 * and exit.
106 */
107 if (signal_pending(current)) {
108 retval = -EINTR;
109 sem->sleepers = 0;
110 atomic_add(sleepers, &sem->count);
111 break;
112 }
113
114 /*
115 * Add "everybody else" into it. They aren't
116 * playing, because we own the spinlock in
117 * wait_queue_head. The "-1" is because we're
118 * still hoping to get the semaphore.
119 */
120 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
121 sem->sleepers = 0;
122 break;
123 }
124 sem->sleepers = 1; /* us - see -1 above */
125 spin_unlock_irqrestore(&sem->wait.lock, flags);
126
127 schedule();
128
129 spin_lock_irqsave(&sem->wait.lock, flags);
130 tsk->state = TASK_INTERRUPTIBLE;
131 }
132 remove_wait_queue_locked(&sem->wait, &wait);
133 wake_up_locked(&sem->wait);
134 spin_unlock_irqrestore(&sem->wait.lock, flags);
135
136 tsk->state = TASK_RUNNING;
137 return retval;
138}
139
140/*
141 * Trylock failed - make sure we correct for having decremented the
142 * count.
143 */
144int
145__down_trylock (struct semaphore *sem)
146{
147 unsigned long flags;
148 int sleepers;
149
150 spin_lock_irqsave(&sem->wait.lock, flags);
151 sleepers = sem->sleepers + 1;
152 sem->sleepers = 0;
153
154 /*
155 * Add "everybody else" and us into it. They aren't
156 * playing, because we own the spinlock in the
157 * wait_queue_head.
158 */
159 if (!atomic_add_negative(sleepers, &sem->count)) {
160 wake_up_locked(&sem->wait);
161 }
162
163 spin_unlock_irqrestore(&sem->wait.lock, flags);
164 return 1;
165}
diff --git a/arch/m32r/kernel/Makefile b/arch/m32r/kernel/Makefile
index e97e26e87c9e..09200d4886e3 100644
--- a/arch/m32r/kernel/Makefile
+++ b/arch/m32r/kernel/Makefile
@@ -5,7 +5,7 @@
5extra-y := head.o init_task.o vmlinux.lds 5extra-y := head.o init_task.o vmlinux.lds
6 6
7obj-y := process.o entry.o traps.o align.o irq.o setup.o time.o \ 7obj-y := process.o entry.o traps.o align.o irq.o setup.o time.o \
8 m32r_ksyms.o sys_m32r.o semaphore.o signal.o ptrace.o 8 m32r_ksyms.o sys_m32r.o signal.o ptrace.o
9 9
10obj-$(CONFIG_SMP) += smp.o smpboot.o 10obj-$(CONFIG_SMP) += smp.o smpboot.o
11obj-$(CONFIG_MODULES) += module.o 11obj-$(CONFIG_MODULES) += module.o
diff --git a/arch/m32r/kernel/m32r_ksyms.c b/arch/m32r/kernel/m32r_ksyms.c
index 41a4c95e06d6..e6709fe950ba 100644
--- a/arch/m32r/kernel/m32r_ksyms.c
+++ b/arch/m32r/kernel/m32r_ksyms.c
@@ -7,7 +7,6 @@
7#include <linux/interrupt.h> 7#include <linux/interrupt.h>
8#include <linux/string.h> 8#include <linux/string.h>
9 9
10#include <asm/semaphore.h>
11#include <asm/processor.h> 10#include <asm/processor.h>
12#include <asm/uaccess.h> 11#include <asm/uaccess.h>
13#include <asm/checksum.h> 12#include <asm/checksum.h>
@@ -22,10 +21,6 @@ EXPORT_SYMBOL(dump_fpu);
22EXPORT_SYMBOL(__ioremap); 21EXPORT_SYMBOL(__ioremap);
23EXPORT_SYMBOL(iounmap); 22EXPORT_SYMBOL(iounmap);
24EXPORT_SYMBOL(kernel_thread); 23EXPORT_SYMBOL(kernel_thread);
25EXPORT_SYMBOL(__down);
26EXPORT_SYMBOL(__down_interruptible);
27EXPORT_SYMBOL(__up);
28EXPORT_SYMBOL(__down_trylock);
29 24
30/* Networking helper routines. */ 25/* Networking helper routines. */
31/* Delay loops */ 26/* Delay loops */
diff --git a/arch/m32r/kernel/semaphore.c b/arch/m32r/kernel/semaphore.c
deleted file mode 100644
index 940c2d37cfd1..000000000000
--- a/arch/m32r/kernel/semaphore.c
+++ /dev/null
@@ -1,185 +0,0 @@
1/*
2 * linux/arch/m32r/semaphore.c
3 * orig : i386 2.6.4
4 *
5 * M32R semaphore implementation.
6 *
7 * Copyright (c) 2002 - 2004 Hitoshi Yamamoto
8 */
9
10/*
11 * i386 semaphore implementation.
12 *
13 * (C) Copyright 1999 Linus Torvalds
14 *
15 * Portions Copyright 1999 Red Hat, Inc.
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 *
22 * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
23 */
24#include <linux/sched.h>
25#include <linux/err.h>
26#include <linux/init.h>
27#include <asm/semaphore.h>
28
29/*
30 * Semaphores are implemented using a two-way counter:
31 * The "count" variable is decremented for each process
32 * that tries to acquire the semaphore, while the "sleeping"
33 * variable is a count of such acquires.
34 *
35 * Notably, the inline "up()" and "down()" functions can
36 * efficiently test if they need to do any extra work (up
37 * needs to do something only if count was negative before
38 * the increment operation.
39 *
40 * "sleeping" and the contention routine ordering is protected
41 * by the spinlock in the semaphore's waitqueue head.
42 *
43 * Note that these functions are only called when there is
44 * contention on the lock, and as such all this is the
45 * "non-critical" part of the whole semaphore business. The
46 * critical part is the inline stuff in <asm/semaphore.h>
47 * where we want to avoid any extra jumps and calls.
48 */
49
50/*
51 * Logic:
52 * - only on a boundary condition do we need to care. When we go
53 * from a negative count to a non-negative, we wake people up.
54 * - when we go from a non-negative count to a negative do we
55 * (a) synchronize with the "sleeper" count and (b) make sure
56 * that we're on the wakeup list before we synchronize so that
57 * we cannot lose wakeup events.
58 */
59
60asmlinkage void __up(struct semaphore *sem)
61{
62 wake_up(&sem->wait);
63}
64
65asmlinkage void __sched __down(struct semaphore * sem)
66{
67 struct task_struct *tsk = current;
68 DECLARE_WAITQUEUE(wait, tsk);
69 unsigned long flags;
70
71 tsk->state = TASK_UNINTERRUPTIBLE;
72 spin_lock_irqsave(&sem->wait.lock, flags);
73 add_wait_queue_exclusive_locked(&sem->wait, &wait);
74
75 sem->sleepers++;
76 for (;;) {
77 int sleepers = sem->sleepers;
78
79 /*
80 * Add "everybody else" into it. They aren't
81 * playing, because we own the spinlock in
82 * the wait_queue_head.
83 */
84 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
85 sem->sleepers = 0;
86 break;
87 }
88 sem->sleepers = 1; /* us - see -1 above */
89 spin_unlock_irqrestore(&sem->wait.lock, flags);
90
91 schedule();
92
93 spin_lock_irqsave(&sem->wait.lock, flags);
94 tsk->state = TASK_UNINTERRUPTIBLE;
95 }
96 remove_wait_queue_locked(&sem->wait, &wait);
97 wake_up_locked(&sem->wait);
98 spin_unlock_irqrestore(&sem->wait.lock, flags);
99 tsk->state = TASK_RUNNING;
100}
101
102asmlinkage int __sched __down_interruptible(struct semaphore * sem)
103{
104 int retval = 0;
105 struct task_struct *tsk = current;
106 DECLARE_WAITQUEUE(wait, tsk);
107 unsigned long flags;
108
109 tsk->state = TASK_INTERRUPTIBLE;
110 spin_lock_irqsave(&sem->wait.lock, flags);
111 add_wait_queue_exclusive_locked(&sem->wait, &wait);
112
113 sem->sleepers++;
114 for (;;) {
115 int sleepers = sem->sleepers;
116
117 /*
118 * With signals pending, this turns into
119 * the trylock failure case - we won't be
120 * sleeping, and we* can't get the lock as
121 * it has contention. Just correct the count
122 * and exit.
123 */
124 if (signal_pending(current)) {
125 retval = -EINTR;
126 sem->sleepers = 0;
127 atomic_add(sleepers, &sem->count);
128 break;
129 }
130
131 /*
132 * Add "everybody else" into it. They aren't
133 * playing, because we own the spinlock in
134 * wait_queue_head. The "-1" is because we're
135 * still hoping to get the semaphore.
136 */
137 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
138 sem->sleepers = 0;
139 break;
140 }
141 sem->sleepers = 1; /* us - see -1 above */
142 spin_unlock_irqrestore(&sem->wait.lock, flags);
143
144 schedule();
145
146 spin_lock_irqsave(&sem->wait.lock, flags);
147 tsk->state = TASK_INTERRUPTIBLE;
148 }
149 remove_wait_queue_locked(&sem->wait, &wait);
150 wake_up_locked(&sem->wait);
151 spin_unlock_irqrestore(&sem->wait.lock, flags);
152
153 tsk->state = TASK_RUNNING;
154 return retval;
155}
156
157/*
158 * Trylock failed - make sure we correct for
159 * having decremented the count.
160 *
161 * We could have done the trylock with a
162 * single "cmpxchg" without failure cases,
163 * but then it wouldn't work on a 386.
164 */
165asmlinkage int __down_trylock(struct semaphore * sem)
166{
167 int sleepers;
168 unsigned long flags;
169
170 spin_lock_irqsave(&sem->wait.lock, flags);
171 sleepers = sem->sleepers + 1;
172 sem->sleepers = 0;
173
174 /*
175 * Add "everybody else" and us into it. They aren't
176 * playing, because we own the spinlock in the
177 * wait_queue_head.
178 */
179 if (!atomic_add_negative(sleepers, &sem->count)) {
180 wake_up_locked(&sem->wait);
181 }
182
183 spin_unlock_irqrestore(&sem->wait.lock, flags);
184 return 1;
185}
diff --git a/arch/m68k/kernel/Makefile b/arch/m68k/kernel/Makefile
index a806208c7fb5..7a62a718143b 100644
--- a/arch/m68k/kernel/Makefile
+++ b/arch/m68k/kernel/Makefile
@@ -10,7 +10,7 @@ endif
10extra-y += vmlinux.lds 10extra-y += vmlinux.lds
11 11
12obj-y := entry.o process.o traps.o ints.o signal.o ptrace.o module.o \ 12obj-y := entry.o process.o traps.o ints.o signal.o ptrace.o module.o \
13 sys_m68k.o time.o semaphore.o setup.o m68k_ksyms.o devres.o 13 sys_m68k.o time.o setup.o m68k_ksyms.o devres.o
14 14
15devres-y = ../../../kernel/irq/devres.o 15devres-y = ../../../kernel/irq/devres.o
16 16
diff --git a/arch/m68k/kernel/m68k_ksyms.c b/arch/m68k/kernel/m68k_ksyms.c
index 6fc69c74fe2e..d900e77e5363 100644
--- a/arch/m68k/kernel/m68k_ksyms.c
+++ b/arch/m68k/kernel/m68k_ksyms.c
@@ -1,5 +1,4 @@
1#include <linux/module.h> 1#include <linux/module.h>
2#include <asm/semaphore.h>
3 2
4asmlinkage long long __ashldi3 (long long, int); 3asmlinkage long long __ashldi3 (long long, int);
5asmlinkage long long __ashrdi3 (long long, int); 4asmlinkage long long __ashrdi3 (long long, int);
@@ -15,8 +14,3 @@ EXPORT_SYMBOL(__ashrdi3);
15EXPORT_SYMBOL(__lshrdi3); 14EXPORT_SYMBOL(__lshrdi3);
16EXPORT_SYMBOL(__muldi3); 15EXPORT_SYMBOL(__muldi3);
17 16
18EXPORT_SYMBOL(__down_failed);
19EXPORT_SYMBOL(__down_failed_interruptible);
20EXPORT_SYMBOL(__down_failed_trylock);
21EXPORT_SYMBOL(__up_wakeup);
22
diff --git a/arch/m68k/kernel/semaphore.c b/arch/m68k/kernel/semaphore.c
deleted file mode 100644
index d12cbbfe6ebd..000000000000
--- a/arch/m68k/kernel/semaphore.c
+++ /dev/null
@@ -1,132 +0,0 @@
1/*
2 * Generic semaphore code. Buyer beware. Do your own
3 * specific changes in <asm/semaphore-helper.h>
4 */
5
6#include <linux/sched.h>
7#include <linux/init.h>
8#include <asm/semaphore-helper.h>
9
10#ifndef CONFIG_RMW_INSNS
11spinlock_t semaphore_wake_lock;
12#endif
13
14/*
15 * Semaphores are implemented using a two-way counter:
16 * The "count" variable is decremented for each process
17 * that tries to sleep, while the "waking" variable is
18 * incremented when the "up()" code goes to wake up waiting
19 * processes.
20 *
21 * Notably, the inline "up()" and "down()" functions can
22 * efficiently test if they need to do any extra work (up
23 * needs to do something only if count was negative before
24 * the increment operation.
25 *
26 * waking_non_zero() (from asm/semaphore.h) must execute
27 * atomically.
28 *
29 * When __up() is called, the count was negative before
30 * incrementing it, and we need to wake up somebody.
31 *
32 * This routine adds one to the count of processes that need to
33 * wake up and exit. ALL waiting processes actually wake up but
34 * only the one that gets to the "waking" field first will gate
35 * through and acquire the semaphore. The others will go back
36 * to sleep.
37 *
38 * Note that these functions are only called when there is
39 * contention on the lock, and as such all this is the
40 * "non-critical" part of the whole semaphore business. The
41 * critical part is the inline stuff in <asm/semaphore.h>
42 * where we want to avoid any extra jumps and calls.
43 */
44void __up(struct semaphore *sem)
45{
46 wake_one_more(sem);
47 wake_up(&sem->wait);
48}
49
50/*
51 * Perform the "down" function. Return zero for semaphore acquired,
52 * return negative for signalled out of the function.
53 *
54 * If called from __down, the return is ignored and the wait loop is
55 * not interruptible. This means that a task waiting on a semaphore
56 * using "down()" cannot be killed until someone does an "up()" on
57 * the semaphore.
58 *
59 * If called from __down_interruptible, the return value gets checked
60 * upon return. If the return value is negative then the task continues
61 * with the negative value in the return register (it can be tested by
62 * the caller).
63 *
64 * Either form may be used in conjunction with "up()".
65 *
66 */
67
68
69#define DOWN_HEAD(task_state) \
70 \
71 \
72 current->state = (task_state); \
73 add_wait_queue(&sem->wait, &wait); \
74 \
75 /* \
76 * Ok, we're set up. sem->count is known to be less than zero \
77 * so we must wait. \
78 * \
79 * We can let go the lock for purposes of waiting. \
80 * We re-acquire it after awaking so as to protect \
81 * all semaphore operations. \
82 * \
83 * If "up()" is called before we call waking_non_zero() then \
84 * we will catch it right away. If it is called later then \
85 * we will have to go through a wakeup cycle to catch it. \
86 * \
87 * Multiple waiters contend for the semaphore lock to see \
88 * who gets to gate through and who has to wait some more. \
89 */ \
90 for (;;) {
91
92#define DOWN_TAIL(task_state) \
93 current->state = (task_state); \
94 } \
95 current->state = TASK_RUNNING; \
96 remove_wait_queue(&sem->wait, &wait);
97
98void __sched __down(struct semaphore * sem)
99{
100 DECLARE_WAITQUEUE(wait, current);
101
102 DOWN_HEAD(TASK_UNINTERRUPTIBLE)
103 if (waking_non_zero(sem))
104 break;
105 schedule();
106 DOWN_TAIL(TASK_UNINTERRUPTIBLE)
107}
108
109int __sched __down_interruptible(struct semaphore * sem)
110{
111 DECLARE_WAITQUEUE(wait, current);
112 int ret = 0;
113
114 DOWN_HEAD(TASK_INTERRUPTIBLE)
115
116 ret = waking_non_zero_interruptible(sem, current);
117 if (ret)
118 {
119 if (ret == 1)
120 /* ret != 0 only if we get interrupted -arca */
121 ret = 0;
122 break;
123 }
124 schedule();
125 DOWN_TAIL(TASK_INTERRUPTIBLE)
126 return ret;
127}
128
129int __down_trylock(struct semaphore * sem)
130{
131 return waking_non_zero_trylock(sem);
132}
diff --git a/arch/m68k/lib/Makefile b/arch/m68k/lib/Makefile
index 6bbf19f96007..a18af095cd7c 100644
--- a/arch/m68k/lib/Makefile
+++ b/arch/m68k/lib/Makefile
@@ -5,4 +5,4 @@
5EXTRA_AFLAGS := -traditional 5EXTRA_AFLAGS := -traditional
6 6
7lib-y := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \ 7lib-y := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
8 checksum.o string.o semaphore.o uaccess.o 8 checksum.o string.o uaccess.o
diff --git a/arch/m68k/lib/semaphore.S b/arch/m68k/lib/semaphore.S
deleted file mode 100644
index 0215624c1602..000000000000
--- a/arch/m68k/lib/semaphore.S
+++ /dev/null
@@ -1,53 +0,0 @@
1/*
2 * linux/arch/m68k/lib/semaphore.S
3 *
4 * Copyright (C) 1996 Linus Torvalds
5 *
6 * m68k version by Andreas Schwab
7 */
8
9#include <linux/linkage.h>
10#include <asm/semaphore.h>
11
12/*
13 * The semaphore operations have a special calling sequence that
14 * allow us to do a simpler in-line version of them. These routines
15 * need to convert that sequence back into the C sequence when
16 * there is contention on the semaphore.
17 */
18ENTRY(__down_failed)
19 moveml %a0/%d0/%d1,-(%sp)
20 movel %a1,-(%sp)
21 jbsr __down
22 movel (%sp)+,%a1
23 moveml (%sp)+,%a0/%d0/%d1
24 rts
25
26ENTRY(__down_failed_interruptible)
27 movel %a0,-(%sp)
28 movel %d1,-(%sp)
29 movel %a1,-(%sp)
30 jbsr __down_interruptible
31 movel (%sp)+,%a1
32 movel (%sp)+,%d1
33 movel (%sp)+,%a0
34 rts
35
36ENTRY(__down_failed_trylock)
37 movel %a0,-(%sp)
38 movel %d1,-(%sp)
39 movel %a1,-(%sp)
40 jbsr __down_trylock
41 movel (%sp)+,%a1
42 movel (%sp)+,%d1
43 movel (%sp)+,%a0
44 rts
45
46ENTRY(__up_wakeup)
47 moveml %a0/%d0/%d1,-(%sp)
48 movel %a1,-(%sp)
49 jbsr __up
50 movel (%sp)+,%a1
51 moveml (%sp)+,%a0/%d0/%d1
52 rts
53
diff --git a/arch/m68knommu/kernel/Makefile b/arch/m68knommu/kernel/Makefile
index 1524b39ad63f..f0eab3dedb5a 100644
--- a/arch/m68knommu/kernel/Makefile
+++ b/arch/m68knommu/kernel/Makefile
@@ -5,7 +5,7 @@
5extra-y := vmlinux.lds 5extra-y := vmlinux.lds
6 6
7obj-y += dma.o entry.o init_task.o irq.o m68k_ksyms.o process.o ptrace.o \ 7obj-y += dma.o entry.o init_task.o irq.o m68k_ksyms.o process.o ptrace.o \
8 semaphore.o setup.o signal.o syscalltable.o sys_m68k.o time.o traps.o 8 setup.o signal.o syscalltable.o sys_m68k.o time.o traps.o
9 9
10obj-$(CONFIG_MODULES) += module.o 10obj-$(CONFIG_MODULES) += module.o
11obj-$(CONFIG_COMEMPCI) += comempci.o 11obj-$(CONFIG_COMEMPCI) += comempci.o
diff --git a/arch/m68knommu/kernel/m68k_ksyms.c b/arch/m68knommu/kernel/m68k_ksyms.c
index 53fad1490282..39fe0a7aec32 100644
--- a/arch/m68knommu/kernel/m68k_ksyms.c
+++ b/arch/m68knommu/kernel/m68k_ksyms.c
@@ -13,7 +13,6 @@
13#include <asm/pgalloc.h> 13#include <asm/pgalloc.h>
14#include <asm/irq.h> 14#include <asm/irq.h>
15#include <asm/io.h> 15#include <asm/io.h>
16#include <asm/semaphore.h>
17#include <asm/checksum.h> 16#include <asm/checksum.h>
18#include <asm/current.h> 17#include <asm/current.h>
19 18
@@ -39,11 +38,6 @@ EXPORT_SYMBOL(csum_partial_copy_nocheck);
39EXPORT_SYMBOL(memcpy); 38EXPORT_SYMBOL(memcpy);
40EXPORT_SYMBOL(memset); 39EXPORT_SYMBOL(memset);
41 40
42EXPORT_SYMBOL(__down_failed);
43EXPORT_SYMBOL(__down_failed_interruptible);
44EXPORT_SYMBOL(__down_failed_trylock);
45EXPORT_SYMBOL(__up_wakeup);
46
47/* 41/*
48 * libgcc functions - functions that are used internally by the 42 * libgcc functions - functions that are used internally by the
49 * compiler... (prototypes are not correct though, but that 43 * compiler... (prototypes are not correct though, but that
diff --git a/arch/m68knommu/kernel/semaphore.c b/arch/m68knommu/kernel/semaphore.c
deleted file mode 100644
index bce2bc7d87c6..000000000000
--- a/arch/m68knommu/kernel/semaphore.c
+++ /dev/null
@@ -1,133 +0,0 @@
1/*
2 * Generic semaphore code. Buyer beware. Do your own
3 * specific changes in <asm/semaphore-helper.h>
4 */
5
6#include <linux/sched.h>
7#include <linux/err.h>
8#include <linux/init.h>
9#include <asm/semaphore-helper.h>
10
11#ifndef CONFIG_RMW_INSNS
12spinlock_t semaphore_wake_lock;
13#endif
14
15/*
16 * Semaphores are implemented using a two-way counter:
17 * The "count" variable is decremented for each process
18 * that tries to sleep, while the "waking" variable is
19 * incremented when the "up()" code goes to wake up waiting
20 * processes.
21 *
22 * Notably, the inline "up()" and "down()" functions can
23 * efficiently test if they need to do any extra work (up
24 * needs to do something only if count was negative before
25 * the increment operation.
26 *
27 * waking_non_zero() (from asm/semaphore.h) must execute
28 * atomically.
29 *
30 * When __up() is called, the count was negative before
31 * incrementing it, and we need to wake up somebody.
32 *
33 * This routine adds one to the count of processes that need to
34 * wake up and exit. ALL waiting processes actually wake up but
35 * only the one that gets to the "waking" field first will gate
36 * through and acquire the semaphore. The others will go back
37 * to sleep.
38 *
39 * Note that these functions are only called when there is
40 * contention on the lock, and as such all this is the
41 * "non-critical" part of the whole semaphore business. The
42 * critical part is the inline stuff in <asm/semaphore.h>
43 * where we want to avoid any extra jumps and calls.
44 */
45void __up(struct semaphore *sem)
46{
47 wake_one_more(sem);
48 wake_up(&sem->wait);
49}
50
51/*
52 * Perform the "down" function. Return zero for semaphore acquired,
53 * return negative for signalled out of the function.
54 *
55 * If called from __down, the return is ignored and the wait loop is
56 * not interruptible. This means that a task waiting on a semaphore
57 * using "down()" cannot be killed until someone does an "up()" on
58 * the semaphore.
59 *
60 * If called from __down_interruptible, the return value gets checked
61 * upon return. If the return value is negative then the task continues
62 * with the negative value in the return register (it can be tested by
63 * the caller).
64 *
65 * Either form may be used in conjunction with "up()".
66 *
67 */
68
69
70#define DOWN_HEAD(task_state) \
71 \
72 \
73 current->state = (task_state); \
74 add_wait_queue(&sem->wait, &wait); \
75 \
76 /* \
77 * Ok, we're set up. sem->count is known to be less than zero \
78 * so we must wait. \
79 * \
80 * We can let go the lock for purposes of waiting. \
81 * We re-acquire it after awaking so as to protect \
82 * all semaphore operations. \
83 * \
84 * If "up()" is called before we call waking_non_zero() then \
85 * we will catch it right away. If it is called later then \
86 * we will have to go through a wakeup cycle to catch it. \
87 * \
88 * Multiple waiters contend for the semaphore lock to see \
89 * who gets to gate through and who has to wait some more. \
90 */ \
91 for (;;) {
92
93#define DOWN_TAIL(task_state) \
94 current->state = (task_state); \
95 } \
96 current->state = TASK_RUNNING; \
97 remove_wait_queue(&sem->wait, &wait);
98
99void __sched __down(struct semaphore * sem)
100{
101 DECLARE_WAITQUEUE(wait, current);
102
103 DOWN_HEAD(TASK_UNINTERRUPTIBLE)
104 if (waking_non_zero(sem))
105 break;
106 schedule();
107 DOWN_TAIL(TASK_UNINTERRUPTIBLE)
108}
109
110int __sched __down_interruptible(struct semaphore * sem)
111{
112 DECLARE_WAITQUEUE(wait, current);
113 int ret = 0;
114
115 DOWN_HEAD(TASK_INTERRUPTIBLE)
116
117 ret = waking_non_zero_interruptible(sem, current);
118 if (ret)
119 {
120 if (ret == 1)
121 /* ret != 0 only if we get interrupted -arca */
122 ret = 0;
123 break;
124 }
125 schedule();
126 DOWN_TAIL(TASK_INTERRUPTIBLE)
127 return ret;
128}
129
130int __down_trylock(struct semaphore * sem)
131{
132 return waking_non_zero_trylock(sem);
133}
diff --git a/arch/m68knommu/lib/Makefile b/arch/m68knommu/lib/Makefile
index e051a7913987..d94d709665aa 100644
--- a/arch/m68knommu/lib/Makefile
+++ b/arch/m68knommu/lib/Makefile
@@ -4,4 +4,4 @@
4 4
5lib-y := ashldi3.o ashrdi3.o lshrdi3.o \ 5lib-y := ashldi3.o ashrdi3.o lshrdi3.o \
6 muldi3.o mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o \ 6 muldi3.o mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o \
7 checksum.o semaphore.o memcpy.o memset.o delay.o 7 checksum.o memcpy.o memset.o delay.o
diff --git a/arch/m68knommu/lib/semaphore.S b/arch/m68knommu/lib/semaphore.S
deleted file mode 100644
index 87c746034376..000000000000
--- a/arch/m68knommu/lib/semaphore.S
+++ /dev/null
@@ -1,66 +0,0 @@
1/*
2 * linux/arch/m68k/lib/semaphore.S
3 *
4 * Copyright (C) 1996 Linus Torvalds
5 *
6 * m68k version by Andreas Schwab
7 *
8 * MAR/1999 -- modified to support ColdFire (gerg@snapgear.com)
9 */
10
11#include <linux/linkage.h>
12#include <asm/semaphore.h>
13
14/*
15 * "down_failed" is called with the eventual return address
16 * in %a0, and the address of the semaphore in %a1. We need
17 * to increment the number of waiters on the semaphore,
18 * call "__down()", and then eventually return to try again.
19 */
20ENTRY(__down_failed)
21#ifdef CONFIG_COLDFIRE
22 subl #12,%sp
23 moveml %a0/%d0/%d1,(%sp)
24#else
25 moveml %a0/%d0/%d1,-(%sp)
26#endif
27 movel %a1,-(%sp)
28 jbsr __down
29 movel (%sp)+,%a1
30 movel (%sp)+,%d0
31 movel (%sp)+,%d1
32 rts
33
34ENTRY(__down_failed_interruptible)
35 movel %a0,-(%sp)
36 movel %d1,-(%sp)
37 movel %a1,-(%sp)
38 jbsr __down_interruptible
39 movel (%sp)+,%a1
40 movel (%sp)+,%d1
41 rts
42
43ENTRY(__up_wakeup)
44#ifdef CONFIG_COLDFIRE
45 subl #12,%sp
46 moveml %a0/%d0/%d1,(%sp)
47#else
48 moveml %a0/%d0/%d1,-(%sp)
49#endif
50 movel %a1,-(%sp)
51 jbsr __up
52 movel (%sp)+,%a1
53 movel (%sp)+,%d0
54 movel (%sp)+,%d1
55 rts
56
57ENTRY(__down_failed_trylock)
58 movel %a0,-(%sp)
59 movel %d1,-(%sp)
60 movel %a1,-(%sp)
61 jbsr __down_trylock
62 movel (%sp)+,%a1
63 movel (%sp)+,%d1
64 movel (%sp)+,%a0
65 rts
66
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 9e78e1a4ca17..6fcdb6fda2e2 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -5,7 +5,7 @@
5extra-y := head.o init_task.o vmlinux.lds 5extra-y := head.o init_task.o vmlinux.lds
6 6
7obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ 7obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
8 ptrace.o reset.o semaphore.o setup.o signal.o syscall.o \ 8 ptrace.o reset.o setup.o signal.o syscall.o \
9 time.o topology.o traps.o unaligned.o 9 time.o topology.o traps.o unaligned.o
10 10
11obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o 11obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
diff --git a/arch/mips/kernel/semaphore.c b/arch/mips/kernel/semaphore.c
deleted file mode 100644
index 1265358cdca1..000000000000
--- a/arch/mips/kernel/semaphore.c
+++ /dev/null
@@ -1,168 +0,0 @@
1/*
2 * MIPS-specific semaphore code.
3 *
4 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
5 * Copyright (C) 2004 Ralf Baechle <ralf@linux-mips.org>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * April 2001 - Reworked by Paul Mackerras <paulus@samba.org>
13 * to eliminate the SMP races in the old version between the updates
14 * of `count' and `waking'. Now we use negative `count' values to
15 * indicate that some process(es) are waiting for the semaphore.
16 */
17
18#include <linux/module.h>
19#include <linux/sched.h>
20#include <linux/init.h>
21#include <asm/atomic.h>
22#include <asm/cpu-features.h>
23#include <asm/errno.h>
24#include <asm/semaphore.h>
25#include <asm/war.h>
26/*
27 * Atomically update sem->count.
28 * This does the equivalent of the following:
29 *
30 * old_count = sem->count;
31 * tmp = MAX(old_count, 0) + incr;
32 * sem->count = tmp;
33 * return old_count;
34 *
35 * On machines without lld/scd we need a spinlock to make the manipulation of
36 * sem->count and sem->waking atomic. Scalability isn't an issue because
37 * this lock is used on UP only so it's just an empty variable.
38 */
39static inline int __sem_update_count(struct semaphore *sem, int incr)
40{
41 int old_count, tmp;
42
43 if (cpu_has_llsc && R10000_LLSC_WAR) {
44 __asm__ __volatile__(
45 " .set mips3 \n"
46 "1: ll %0, %2 # __sem_update_count \n"
47 " sra %1, %0, 31 \n"
48 " not %1 \n"
49 " and %1, %0, %1 \n"
50 " addu %1, %1, %3 \n"
51 " sc %1, %2 \n"
52 " beqzl %1, 1b \n"
53 " .set mips0 \n"
54 : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
55 : "r" (incr), "m" (sem->count));
56 } else if (cpu_has_llsc) {
57 __asm__ __volatile__(
58 " .set mips3 \n"
59 "1: ll %0, %2 # __sem_update_count \n"
60 " sra %1, %0, 31 \n"
61 " not %1 \n"
62 " and %1, %0, %1 \n"
63 " addu %1, %1, %3 \n"
64 " sc %1, %2 \n"
65 " beqz %1, 1b \n"
66 " .set mips0 \n"
67 : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
68 : "r" (incr), "m" (sem->count));
69 } else {
70 static DEFINE_SPINLOCK(semaphore_lock);
71 unsigned long flags;
72
73 spin_lock_irqsave(&semaphore_lock, flags);
74 old_count = atomic_read(&sem->count);
75 tmp = max_t(int, old_count, 0) + incr;
76 atomic_set(&sem->count, tmp);
77 spin_unlock_irqrestore(&semaphore_lock, flags);
78 }
79
80 return old_count;
81}
82
83void __up(struct semaphore *sem)
84{
85 /*
86 * Note that we incremented count in up() before we came here,
87 * but that was ineffective since the result was <= 0, and
88 * any negative value of count is equivalent to 0.
89 * This ends up setting count to 1, unless count is now > 0
90 * (i.e. because some other cpu has called up() in the meantime),
91 * in which case we just increment count.
92 */
93 __sem_update_count(sem, 1);
94 wake_up(&sem->wait);
95}
96
97EXPORT_SYMBOL(__up);
98
99/*
100 * Note that when we come in to __down or __down_interruptible,
101 * we have already decremented count, but that decrement was
102 * ineffective since the result was < 0, and any negative value
103 * of count is equivalent to 0.
104 * Thus it is only when we decrement count from some value > 0
105 * that we have actually got the semaphore.
106 */
107void __sched __down(struct semaphore *sem)
108{
109 struct task_struct *tsk = current;
110 DECLARE_WAITQUEUE(wait, tsk);
111
112 __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
113 add_wait_queue_exclusive(&sem->wait, &wait);
114
115 /*
116 * Try to get the semaphore. If the count is > 0, then we've
117 * got the semaphore; we decrement count and exit the loop.
118 * If the count is 0 or negative, we set it to -1, indicating
119 * that we are asleep, and then sleep.
120 */
121 while (__sem_update_count(sem, -1) <= 0) {
122 schedule();
123 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
124 }
125 remove_wait_queue(&sem->wait, &wait);
126 __set_task_state(tsk, TASK_RUNNING);
127
128 /*
129 * If there are any more sleepers, wake one of them up so
130 * that it can either get the semaphore, or set count to -1
131 * indicating that there are still processes sleeping.
132 */
133 wake_up(&sem->wait);
134}
135
136EXPORT_SYMBOL(__down);
137
138int __sched __down_interruptible(struct semaphore * sem)
139{
140 int retval = 0;
141 struct task_struct *tsk = current;
142 DECLARE_WAITQUEUE(wait, tsk);
143
144 __set_task_state(tsk, TASK_INTERRUPTIBLE);
145 add_wait_queue_exclusive(&sem->wait, &wait);
146
147 while (__sem_update_count(sem, -1) <= 0) {
148 if (signal_pending(current)) {
149 /*
150 * A signal is pending - give up trying.
151 * Set sem->count to 0 if it is negative,
152 * since we are no longer sleeping.
153 */
154 __sem_update_count(sem, 0);
155 retval = -EINTR;
156 break;
157 }
158 schedule();
159 set_task_state(tsk, TASK_INTERRUPTIBLE);
160 }
161 remove_wait_queue(&sem->wait, &wait);
162 __set_task_state(tsk, TASK_RUNNING);
163
164 wake_up(&sem->wait);
165 return retval;
166}
167
168EXPORT_SYMBOL(__down_interruptible);
diff --git a/arch/mn10300/kernel/Makefile b/arch/mn10300/kernel/Makefile
index ef07c956170a..23f2ab67574c 100644
--- a/arch/mn10300/kernel/Makefile
+++ b/arch/mn10300/kernel/Makefile
@@ -3,7 +3,7 @@
3# 3#
4extra-y := head.o init_task.o vmlinux.lds 4extra-y := head.o init_task.o vmlinux.lds
5 5
6obj-y := process.o semaphore.o signal.o entry.o fpu.o traps.o irq.o \ 6obj-y := process.o signal.o entry.o fpu.o traps.o irq.o \
7 ptrace.o setup.o time.o sys_mn10300.o io.o kthread.o \ 7 ptrace.o setup.o time.o sys_mn10300.o io.o kthread.o \
8 switch_to.o mn10300_ksyms.o kernel_execve.o 8 switch_to.o mn10300_ksyms.o kernel_execve.o
9 9
diff --git a/arch/mn10300/kernel/semaphore.c b/arch/mn10300/kernel/semaphore.c
deleted file mode 100644
index 9153c4039fd2..000000000000
--- a/arch/mn10300/kernel/semaphore.c
+++ /dev/null
@@ -1,149 +0,0 @@
1/* MN10300 Semaphore implementation
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#include <linux/sched.h>
12#include <linux/module.h>
13#include <asm/semaphore.h>
14
15struct sem_waiter {
16 struct list_head list;
17 struct task_struct *task;
18};
19
20#if SEMAPHORE_DEBUG
21void semtrace(struct semaphore *sem, const char *str)
22{
23 if (sem->debug)
24 printk(KERN_DEBUG "[%d] %s({%d,%d})\n",
25 current->pid,
26 str,
27 atomic_read(&sem->count),
28 list_empty(&sem->wait_list) ? 0 : 1);
29}
30#else
31#define semtrace(SEM, STR) do { } while (0)
32#endif
33
34/*
35 * wait for a token to be granted from a semaphore
36 * - entered with lock held and interrupts disabled
37 */
38void __down(struct semaphore *sem, unsigned long flags)
39{
40 struct task_struct *tsk = current;
41 struct sem_waiter waiter;
42
43 semtrace(sem, "Entering __down");
44
45 /* set up my own style of waitqueue */
46 waiter.task = tsk;
47 get_task_struct(tsk);
48
49 list_add_tail(&waiter.list, &sem->wait_list);
50
51 /* we don't need to touch the semaphore struct anymore */
52 spin_unlock_irqrestore(&sem->wait_lock, flags);
53
54 /* wait to be given the semaphore */
55 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
56
57 for (;;) {
58 if (!waiter.task)
59 break;
60 schedule();
61 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
62 }
63
64 tsk->state = TASK_RUNNING;
65 semtrace(sem, "Leaving __down");
66}
67EXPORT_SYMBOL(__down);
68
69/*
70 * interruptibly wait for a token to be granted from a semaphore
71 * - entered with lock held and interrupts disabled
72 */
73int __down_interruptible(struct semaphore *sem, unsigned long flags)
74{
75 struct task_struct *tsk = current;
76 struct sem_waiter waiter;
77 int ret;
78
79 semtrace(sem, "Entering __down_interruptible");
80
81 /* set up my own style of waitqueue */
82 waiter.task = tsk;
83 get_task_struct(tsk);
84
85 list_add_tail(&waiter.list, &sem->wait_list);
86
87 /* we don't need to touch the semaphore struct anymore */
88 set_task_state(tsk, TASK_INTERRUPTIBLE);
89
90 spin_unlock_irqrestore(&sem->wait_lock, flags);
91
92 /* wait to be given the semaphore */
93 ret = 0;
94 for (;;) {
95 if (!waiter.task)
96 break;
97 if (unlikely(signal_pending(current)))
98 goto interrupted;
99 schedule();
100 set_task_state(tsk, TASK_INTERRUPTIBLE);
101 }
102
103 out:
104 tsk->state = TASK_RUNNING;
105 semtrace(sem, "Leaving __down_interruptible");
106 return ret;
107
108 interrupted:
109 spin_lock_irqsave(&sem->wait_lock, flags);
110 list_del(&waiter.list);
111 spin_unlock_irqrestore(&sem->wait_lock, flags);
112
113 ret = 0;
114 if (!waiter.task) {
115 put_task_struct(current);
116 ret = -EINTR;
117 }
118 goto out;
119}
120EXPORT_SYMBOL(__down_interruptible);
121
122/*
123 * release a single token back to a semaphore
124 * - entered with lock held and interrupts disabled
125 */
126void __up(struct semaphore *sem)
127{
128 struct task_struct *tsk;
129 struct sem_waiter *waiter;
130
131 semtrace(sem, "Entering __up");
132
133 /* grant the token to the process at the front of the queue */
134 waiter = list_entry(sem->wait_list.next, struct sem_waiter, list);
135
136 /* We must be careful not to touch 'waiter' after we set ->task = NULL.
137 * It is an allocated on the waiter's stack and may become invalid at
138 * any time after that point (due to a wakeup from another source).
139 */
140 list_del_init(&waiter->list);
141 tsk = waiter->task;
142 smp_mb();
143 waiter->task = NULL;
144 wake_up_process(tsk);
145 put_task_struct(tsk);
146
147 semtrace(sem, "Leaving __up");
148}
149EXPORT_SYMBOL(__up);
diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile
index 27827bc3717e..1f6585a56f97 100644
--- a/arch/parisc/kernel/Makefile
+++ b/arch/parisc/kernel/Makefile
@@ -9,7 +9,7 @@ AFLAGS_pacache.o := -traditional
9 9
10obj-y := cache.o pacache.o setup.o traps.o time.o irq.o \ 10obj-y := cache.o pacache.o setup.o traps.o time.o irq.o \
11 pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \ 11 pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \
12 ptrace.o hardware.o inventory.o drivers.o semaphore.o \ 12 ptrace.o hardware.o inventory.o drivers.o \
13 signal.o hpmc.o real2.o parisc_ksyms.o unaligned.o \ 13 signal.o hpmc.o real2.o parisc_ksyms.o unaligned.o \
14 process.o processor.o pdc_cons.o pdc_chassis.o unwind.o \ 14 process.o processor.o pdc_cons.o pdc_chassis.o unwind.o \
15 topology.o 15 topology.o
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index 7aca704e96f0..5b7fc4aa044d 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -69,11 +69,6 @@ EXPORT_SYMBOL(memcpy_toio);
69EXPORT_SYMBOL(memcpy_fromio); 69EXPORT_SYMBOL(memcpy_fromio);
70EXPORT_SYMBOL(memset_io); 70EXPORT_SYMBOL(memset_io);
71 71
72#include <asm/semaphore.h>
73EXPORT_SYMBOL(__up);
74EXPORT_SYMBOL(__down_interruptible);
75EXPORT_SYMBOL(__down);
76
77extern void $$divI(void); 72extern void $$divI(void);
78extern void $$divU(void); 73extern void $$divU(void);
79extern void $$remI(void); 74extern void $$remI(void);
diff --git a/arch/parisc/kernel/semaphore.c b/arch/parisc/kernel/semaphore.c
deleted file mode 100644
index ee806bcc3726..000000000000
--- a/arch/parisc/kernel/semaphore.c
+++ /dev/null
@@ -1,102 +0,0 @@
1/*
2 * Semaphore implementation Copyright (c) 2001 Matthew Wilcox, Hewlett-Packard
3 */
4
5#include <linux/sched.h>
6#include <linux/spinlock.h>
7#include <linux/errno.h>
8#include <linux/init.h>
9
10/*
11 * Semaphores are complex as we wish to avoid using two variables.
12 * `count' has multiple roles, depending on its value. If it is positive
13 * or zero, there are no waiters. The functions here will never be
14 * called; see <asm/semaphore.h>
15 *
16 * When count is -1 it indicates there is at least one task waiting
17 * for the semaphore.
18 *
19 * When count is less than that, there are '- count - 1' wakeups
20 * pending. ie if it has value -3, there are 2 wakeups pending.
21 *
22 * Note that these functions are only called when there is contention
23 * on the lock, and as such all this is the "non-critical" part of the
24 * whole semaphore business. The critical part is the inline stuff in
25 * <asm/semaphore.h> where we want to avoid any extra jumps and calls.
26 */
27void __up(struct semaphore *sem)
28{
29 sem->count--;
30 wake_up(&sem->wait);
31}
32
33#define wakers(count) (-1 - count)
34
35#define DOWN_HEAD \
36 int ret = 0; \
37 DECLARE_WAITQUEUE(wait, current); \
38 \
39 /* Note that someone is waiting */ \
40 if (sem->count == 0) \
41 sem->count = -1; \
42 \
43 /* protected by the sentry still -- use unlocked version */ \
44 wait.flags = WQ_FLAG_EXCLUSIVE; \
45 __add_wait_queue_tail(&sem->wait, &wait); \
46 lost_race: \
47 spin_unlock_irq(&sem->sentry); \
48
49#define DOWN_TAIL \
50 spin_lock_irq(&sem->sentry); \
51 if (wakers(sem->count) == 0 && ret == 0) \
52 goto lost_race; /* Someone stole our wakeup */ \
53 __remove_wait_queue(&sem->wait, &wait); \
54 current->state = TASK_RUNNING; \
55 if (!waitqueue_active(&sem->wait) && (sem->count < 0)) \
56 sem->count = wakers(sem->count);
57
58#define UPDATE_COUNT \
59 sem->count += (sem->count < 0) ? 1 : - 1;
60
61
62void __sched __down(struct semaphore * sem)
63{
64 DOWN_HEAD
65
66 for(;;) {
67 set_task_state(current, TASK_UNINTERRUPTIBLE);
68 /* we can _read_ this without the sentry */
69 if (sem->count != -1)
70 break;
71 schedule();
72 }
73
74 DOWN_TAIL
75 UPDATE_COUNT
76}
77
78int __sched __down_interruptible(struct semaphore * sem)
79{
80 DOWN_HEAD
81
82 for(;;) {
83 set_task_state(current, TASK_INTERRUPTIBLE);
84 /* we can _read_ this without the sentry */
85 if (sem->count != -1)
86 break;
87
88 if (signal_pending(current)) {
89 ret = -EINTR;
90 break;
91 }
92 schedule();
93 }
94
95 DOWN_TAIL
96
97 if (!ret) {
98 UPDATE_COUNT
99 }
100
101 return ret;
102}
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index c1baf9d5903f..b9dbfff9afe9 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -12,7 +12,7 @@ CFLAGS_prom_init.o += -fPIC
12CFLAGS_btext.o += -fPIC 12CFLAGS_btext.o += -fPIC
13endif 13endif
14 14
15obj-y := semaphore.o cputable.o ptrace.o syscalls.o \ 15obj-y := cputable.o ptrace.o syscalls.o \
16 irq.o align.o signal_32.o pmc.o vdso.o \ 16 irq.o align.o signal_32.o pmc.o vdso.o \
17 init_task.o process.o systbl.o idle.o \ 17 init_task.o process.o systbl.o idle.o \
18 signal.o 18 signal.o
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 9c98424277a8..65d14e6ddc3c 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -15,7 +15,6 @@
15#include <linux/bitops.h> 15#include <linux/bitops.h>
16 16
17#include <asm/page.h> 17#include <asm/page.h>
18#include <asm/semaphore.h>
19#include <asm/processor.h> 18#include <asm/processor.h>
20#include <asm/cacheflush.h> 19#include <asm/cacheflush.h>
21#include <asm/uaccess.h> 20#include <asm/uaccess.h>
diff --git a/arch/powerpc/kernel/semaphore.c b/arch/powerpc/kernel/semaphore.c
deleted file mode 100644
index 2f8c3c951394..000000000000
--- a/arch/powerpc/kernel/semaphore.c
+++ /dev/null
@@ -1,135 +0,0 @@
1/*
2 * PowerPC-specific semaphore code.
3 *
4 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * April 2001 - Reworked by Paul Mackerras <paulus@samba.org>
12 * to eliminate the SMP races in the old version between the updates
13 * of `count' and `waking'. Now we use negative `count' values to
14 * indicate that some process(es) are waiting for the semaphore.
15 */
16
17#include <linux/sched.h>
18#include <linux/init.h>
19#include <linux/module.h>
20
21#include <asm/atomic.h>
22#include <asm/semaphore.h>
23#include <asm/errno.h>
24
25/*
26 * Atomically update sem->count.
27 * This does the equivalent of the following:
28 *
29 * old_count = sem->count;
30 * tmp = MAX(old_count, 0) + incr;
31 * sem->count = tmp;
32 * return old_count;
33 */
34static inline int __sem_update_count(struct semaphore *sem, int incr)
35{
36 int old_count, tmp;
37
38 __asm__ __volatile__("\n"
39"1: lwarx %0,0,%3\n"
40" srawi %1,%0,31\n"
41" andc %1,%0,%1\n"
42" add %1,%1,%4\n"
43 PPC405_ERR77(0,%3)
44" stwcx. %1,0,%3\n"
45" bne 1b"
46 : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
47 : "r" (&sem->count), "r" (incr), "m" (sem->count)
48 : "cc");
49
50 return old_count;
51}
52
53void __up(struct semaphore *sem)
54{
55 /*
56 * Note that we incremented count in up() before we came here,
57 * but that was ineffective since the result was <= 0, and
58 * any negative value of count is equivalent to 0.
59 * This ends up setting count to 1, unless count is now > 0
60 * (i.e. because some other cpu has called up() in the meantime),
61 * in which case we just increment count.
62 */
63 __sem_update_count(sem, 1);
64 wake_up(&sem->wait);
65}
66EXPORT_SYMBOL(__up);
67
68/*
69 * Note that when we come in to __down or __down_interruptible,
70 * we have already decremented count, but that decrement was
71 * ineffective since the result was < 0, and any negative value
72 * of count is equivalent to 0.
73 * Thus it is only when we decrement count from some value > 0
74 * that we have actually got the semaphore.
75 */
76void __sched __down(struct semaphore *sem)
77{
78 struct task_struct *tsk = current;
79 DECLARE_WAITQUEUE(wait, tsk);
80
81 __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
82 add_wait_queue_exclusive(&sem->wait, &wait);
83
84 /*
85 * Try to get the semaphore. If the count is > 0, then we've
86 * got the semaphore; we decrement count and exit the loop.
87 * If the count is 0 or negative, we set it to -1, indicating
88 * that we are asleep, and then sleep.
89 */
90 while (__sem_update_count(sem, -1) <= 0) {
91 schedule();
92 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
93 }
94 remove_wait_queue(&sem->wait, &wait);
95 __set_task_state(tsk, TASK_RUNNING);
96
97 /*
98 * If there are any more sleepers, wake one of them up so
99 * that it can either get the semaphore, or set count to -1
100 * indicating that there are still processes sleeping.
101 */
102 wake_up(&sem->wait);
103}
104EXPORT_SYMBOL(__down);
105
106int __sched __down_interruptible(struct semaphore * sem)
107{
108 int retval = 0;
109 struct task_struct *tsk = current;
110 DECLARE_WAITQUEUE(wait, tsk);
111
112 __set_task_state(tsk, TASK_INTERRUPTIBLE);
113 add_wait_queue_exclusive(&sem->wait, &wait);
114
115 while (__sem_update_count(sem, -1) <= 0) {
116 if (signal_pending(current)) {
117 /*
118 * A signal is pending - give up trying.
119 * Set sem->count to 0 if it is negative,
120 * since we are no longer sleeping.
121 */
122 __sem_update_count(sem, 0);
123 retval = -EINTR;
124 break;
125 }
126 schedule();
127 set_task_state(tsk, TASK_INTERRUPTIBLE);
128 }
129 remove_wait_queue(&sem->wait, &wait);
130 __set_task_state(tsk, TASK_RUNNING);
131
132 wake_up(&sem->wait);
133 return retval;
134}
135EXPORT_SYMBOL(__down_interruptible);
diff --git a/arch/ppc/kernel/semaphore.c b/arch/ppc/kernel/semaphore.c
deleted file mode 100644
index 2fe429b27c14..000000000000
--- a/arch/ppc/kernel/semaphore.c
+++ /dev/null
@@ -1,131 +0,0 @@
1/*
2 * PowerPC-specific semaphore code.
3 *
4 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * April 2001 - Reworked by Paul Mackerras <paulus@samba.org>
12 * to eliminate the SMP races in the old version between the updates
13 * of `count' and `waking'. Now we use negative `count' values to
14 * indicate that some process(es) are waiting for the semaphore.
15 */
16
17#include <linux/sched.h>
18#include <linux/init.h>
19#include <asm/atomic.h>
20#include <asm/semaphore.h>
21#include <asm/errno.h>
22
23/*
24 * Atomically update sem->count.
25 * This does the equivalent of the following:
26 *
27 * old_count = sem->count;
28 * tmp = MAX(old_count, 0) + incr;
29 * sem->count = tmp;
30 * return old_count;
31 */
32static inline int __sem_update_count(struct semaphore *sem, int incr)
33{
34 int old_count, tmp;
35
36 __asm__ __volatile__("\n"
37"1: lwarx %0,0,%3\n"
38" srawi %1,%0,31\n"
39" andc %1,%0,%1\n"
40" add %1,%1,%4\n"
41 PPC405_ERR77(0,%3)
42" stwcx. %1,0,%3\n"
43" bne 1b"
44 : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
45 : "r" (&sem->count), "r" (incr), "m" (sem->count)
46 : "cc");
47
48 return old_count;
49}
50
51void __up(struct semaphore *sem)
52{
53 /*
54 * Note that we incremented count in up() before we came here,
55 * but that was ineffective since the result was <= 0, and
56 * any negative value of count is equivalent to 0.
57 * This ends up setting count to 1, unless count is now > 0
58 * (i.e. because some other cpu has called up() in the meantime),
59 * in which case we just increment count.
60 */
61 __sem_update_count(sem, 1);
62 wake_up(&sem->wait);
63}
64
65/*
66 * Note that when we come in to __down or __down_interruptible,
67 * we have already decremented count, but that decrement was
68 * ineffective since the result was < 0, and any negative value
69 * of count is equivalent to 0.
70 * Thus it is only when we decrement count from some value > 0
71 * that we have actually got the semaphore.
72 */
73void __sched __down(struct semaphore *sem)
74{
75 struct task_struct *tsk = current;
76 DECLARE_WAITQUEUE(wait, tsk);
77
78 tsk->state = TASK_UNINTERRUPTIBLE;
79 add_wait_queue_exclusive(&sem->wait, &wait);
80 smp_wmb();
81
82 /*
83 * Try to get the semaphore. If the count is > 0, then we've
84 * got the semaphore; we decrement count and exit the loop.
85 * If the count is 0 or negative, we set it to -1, indicating
86 * that we are asleep, and then sleep.
87 */
88 while (__sem_update_count(sem, -1) <= 0) {
89 schedule();
90 tsk->state = TASK_UNINTERRUPTIBLE;
91 }
92 remove_wait_queue(&sem->wait, &wait);
93 tsk->state = TASK_RUNNING;
94
95 /*
96 * If there are any more sleepers, wake one of them up so
97 * that it can either get the semaphore, or set count to -1
98 * indicating that there are still processes sleeping.
99 */
100 wake_up(&sem->wait);
101}
102
103int __sched __down_interruptible(struct semaphore * sem)
104{
105 int retval = 0;
106 struct task_struct *tsk = current;
107 DECLARE_WAITQUEUE(wait, tsk);
108
109 tsk->state = TASK_INTERRUPTIBLE;
110 add_wait_queue_exclusive(&sem->wait, &wait);
111 smp_wmb();
112
113 while (__sem_update_count(sem, -1) <= 0) {
114 if (signal_pending(current)) {
115 /*
116 * A signal is pending - give up trying.
117 * Set sem->count to 0 if it is negative,
118 * since we are no longer sleeping.
119 */
120 __sem_update_count(sem, 0);
121 retval = -EINTR;
122 break;
123 }
124 schedule();
125 tsk->state = TASK_INTERRUPTIBLE;
126 }
127 tsk->state = TASK_RUNNING;
128 remove_wait_queue(&sem->wait, &wait);
129 wake_up(&sem->wait);
130 return retval;
131}
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 1831833c430e..f6a68e178fc5 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -3,6 +3,10 @@
3# see Documentation/kbuild/kconfig-language.txt. 3# see Documentation/kbuild/kconfig-language.txt.
4# 4#
5 5
6config SCHED_MC
7 def_bool y
8 depends on SMP
9
6config MMU 10config MMU
7 def_bool y 11 def_bool y
8 12
@@ -39,6 +43,9 @@ config GENERIC_HWEIGHT
39config GENERIC_TIME 43config GENERIC_TIME
40 def_bool y 44 def_bool y
41 45
46config GENERIC_CLOCKEVENTS
47 def_bool y
48
42config GENERIC_BUG 49config GENERIC_BUG
43 bool 50 bool
44 depends on BUG 51 depends on BUG
@@ -69,6 +76,8 @@ menu "Base setup"
69 76
70comment "Processor type and features" 77comment "Processor type and features"
71 78
79source "kernel/time/Kconfig"
80
72config 64BIT 81config 64BIT
73 bool "64 bit kernel" 82 bool "64 bit kernel"
74 help 83 help
@@ -301,10 +310,7 @@ config QDIO
301 tristate "QDIO support" 310 tristate "QDIO support"
302 ---help--- 311 ---help---
303 This driver provides the Queued Direct I/O base support for 312 This driver provides the Queued Direct I/O base support for
304 IBM mainframes. 313 IBM System z.
305
306 For details please refer to the documentation provided by IBM at
307 <http://www10.software.ibm.com/developerworks/opensource/linux390>
308 314
309 To compile this driver as a module, choose M here: the 315 To compile this driver as a module, choose M here: the
310 module will be called qdio. 316 module will be called qdio.
@@ -486,25 +492,6 @@ config APPLDATA_NET_SUM
486 492
487source kernel/Kconfig.hz 493source kernel/Kconfig.hz
488 494
489config NO_IDLE_HZ
490 bool "No HZ timer ticks in idle"
491 help
492 Switches the regular HZ timer off when the system is going idle.
493 This helps z/VM to detect that the Linux system is idle. VM can
494 then "swap-out" this guest which reduces memory usage. It also
495 reduces the overhead of idle systems.
496
497 The HZ timer can be switched on/off via /proc/sys/kernel/hz_timer.
498 hz_timer=0 means HZ timer is disabled. hz_timer=1 means HZ
499 timer is active.
500
501config NO_IDLE_HZ_INIT
502 bool "HZ timer in idle off by default"
503 depends on NO_IDLE_HZ
504 help
505 The HZ timer is switched off in idle by default. That means the
506 HZ timer is already disabled at boot time.
507
508config S390_HYPFS_FS 495config S390_HYPFS_FS
509 bool "s390 hypervisor file system support" 496 bool "s390 hypervisor file system support"
510 select SYS_HYPERVISOR 497 select SYS_HYPERVISOR
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index a3f67f8b5427..e33f32b54c08 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -499,7 +499,7 @@ static struct crypto_alg cbc_aes_alg = {
499 } 499 }
500}; 500};
501 501
502static int __init aes_init(void) 502static int __init aes_s390_init(void)
503{ 503{
504 int ret; 504 int ret;
505 505
@@ -542,15 +542,15 @@ aes_err:
542 goto out; 542 goto out;
543} 543}
544 544
545static void __exit aes_fini(void) 545static void __exit aes_s390_fini(void)
546{ 546{
547 crypto_unregister_alg(&cbc_aes_alg); 547 crypto_unregister_alg(&cbc_aes_alg);
548 crypto_unregister_alg(&ecb_aes_alg); 548 crypto_unregister_alg(&ecb_aes_alg);
549 crypto_unregister_alg(&aes_alg); 549 crypto_unregister_alg(&aes_alg);
550} 550}
551 551
552module_init(aes_init); 552module_init(aes_s390_init);
553module_exit(aes_fini); 553module_exit(aes_s390_fini);
554 554
555MODULE_ALIAS("aes"); 555MODULE_ALIAS("aes");
556 556
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index ea22707f435f..4aba83b31596 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -550,7 +550,7 @@ static struct crypto_alg cbc_des3_192_alg = {
550 } 550 }
551}; 551};
552 552
553static int init(void) 553static int des_s390_init(void)
554{ 554{
555 int ret = 0; 555 int ret = 0;
556 556
@@ -612,7 +612,7 @@ des_err:
612 goto out; 612 goto out;
613} 613}
614 614
615static void __exit fini(void) 615static void __exit des_s390_fini(void)
616{ 616{
617 crypto_unregister_alg(&cbc_des3_192_alg); 617 crypto_unregister_alg(&cbc_des3_192_alg);
618 crypto_unregister_alg(&ecb_des3_192_alg); 618 crypto_unregister_alg(&ecb_des3_192_alg);
@@ -625,8 +625,8 @@ static void __exit fini(void)
625 crypto_unregister_alg(&des_alg); 625 crypto_unregister_alg(&des_alg);
626} 626}
627 627
628module_init(init); 628module_init(des_s390_init);
629module_exit(fini); 629module_exit(des_s390_fini);
630 630
631MODULE_ALIAS("des"); 631MODULE_ALIAS("des");
632MODULE_ALIAS("des3_ede"); 632MODULE_ALIAS("des3_ede");
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index 5a834f6578ab..9cf9eca22747 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -137,7 +137,7 @@ static struct crypto_alg alg = {
137 .dia_final = sha1_final } } 137 .dia_final = sha1_final } }
138}; 138};
139 139
140static int __init init(void) 140static int __init sha1_s390_init(void)
141{ 141{
142 if (!crypt_s390_func_available(KIMD_SHA_1)) 142 if (!crypt_s390_func_available(KIMD_SHA_1))
143 return -EOPNOTSUPP; 143 return -EOPNOTSUPP;
@@ -145,13 +145,13 @@ static int __init init(void)
145 return crypto_register_alg(&alg); 145 return crypto_register_alg(&alg);
146} 146}
147 147
148static void __exit fini(void) 148static void __exit sha1_s390_fini(void)
149{ 149{
150 crypto_unregister_alg(&alg); 150 crypto_unregister_alg(&alg);
151} 151}
152 152
153module_init(init); 153module_init(sha1_s390_init);
154module_exit(fini); 154module_exit(sha1_s390_fini);
155 155
156MODULE_ALIAS("sha1"); 156MODULE_ALIAS("sha1");
157 157
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index ccf8633c4f65..2a3d756b35d4 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -133,7 +133,7 @@ static struct crypto_alg alg = {
133 .dia_final = sha256_final } } 133 .dia_final = sha256_final } }
134}; 134};
135 135
136static int init(void) 136static int sha256_s390_init(void)
137{ 137{
138 if (!crypt_s390_func_available(KIMD_SHA_256)) 138 if (!crypt_s390_func_available(KIMD_SHA_256))
139 return -EOPNOTSUPP; 139 return -EOPNOTSUPP;
@@ -141,13 +141,13 @@ static int init(void)
141 return crypto_register_alg(&alg); 141 return crypto_register_alg(&alg);
142} 142}
143 143
144static void __exit fini(void) 144static void __exit sha256_s390_fini(void)
145{ 145{
146 crypto_unregister_alg(&alg); 146 crypto_unregister_alg(&alg);
147} 147}
148 148
149module_init(init); 149module_init(sha256_s390_init);
150module_exit(fini); 150module_exit(sha256_s390_fini);
151 151
152MODULE_ALIAS("sha256"); 152MODULE_ALIAS("sha256");
153 153
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 62f6b5a606dd..dcc3ec2ef643 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -3,6 +3,7 @@
3# Linux kernel version: 2.6.25-rc4 3# Linux kernel version: 2.6.25-rc4
4# Wed Mar 5 11:22:59 2008 4# Wed Mar 5 11:22:59 2008
5# 5#
6CONFIG_SCHED_MC=y
6CONFIG_MMU=y 7CONFIG_MMU=y
7CONFIG_ZONE_DMA=y 8CONFIG_ZONE_DMA=y
8CONFIG_LOCKDEP_SUPPORT=y 9CONFIG_LOCKDEP_SUPPORT=y
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 4d3e38392cb1..77051cd27925 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -11,7 +11,7 @@ CFLAGS_smp.o := -Wno-nonnull
11 11
12obj-y := bitmap.o traps.o time.o process.o base.o early.o \ 12obj-y := bitmap.o traps.o time.o process.o base.o early.o \
13 setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ 13 setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
14 semaphore.o s390_ext.o debug.o irq.o ipl.o dis.o diag.o 14 s390_ext.o debug.o irq.o ipl.o dis.o diag.o
15 15
16obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) 16obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
17obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) 17obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
@@ -19,7 +19,7 @@ obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
19extra-y += head.o init_task.o vmlinux.lds 19extra-y += head.o init_task.o vmlinux.lds
20 20
21obj-$(CONFIG_MODULES) += s390_ksyms.o module.o 21obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
22obj-$(CONFIG_SMP) += smp.o 22obj-$(CONFIG_SMP) += smp.o topology.o
23 23
24obj-$(CONFIG_AUDIT) += audit.o 24obj-$(CONFIG_AUDIT) += audit.o
25compat-obj-$(CONFIG_AUDIT) += compat_audit.o 25compat-obj-$(CONFIG_AUDIT) += compat_audit.o
diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h
index e89f8c0c42a0..20723a062017 100644
--- a/arch/s390/kernel/compat_linux.h
+++ b/arch/s390/kernel/compat_linux.h
@@ -162,4 +162,77 @@ struct ucontext32 {
162 compat_sigset_t uc_sigmask; /* mask last for extensibility */ 162 compat_sigset_t uc_sigmask; /* mask last for extensibility */
163}; 163};
164 164
165struct __sysctl_args32;
166struct stat64_emu31;
167struct mmap_arg_struct_emu31;
168struct fadvise64_64_args;
169struct old_sigaction32;
170struct old_sigaction32;
171
172long sys32_chown16(const char __user * filename, u16 user, u16 group);
173long sys32_lchown16(const char __user * filename, u16 user, u16 group);
174long sys32_fchown16(unsigned int fd, u16 user, u16 group);
175long sys32_setregid16(u16 rgid, u16 egid);
176long sys32_setgid16(u16 gid);
177long sys32_setreuid16(u16 ruid, u16 euid);
178long sys32_setuid16(u16 uid);
179long sys32_setresuid16(u16 ruid, u16 euid, u16 suid);
180long sys32_getresuid16(u16 __user *ruid, u16 __user *euid, u16 __user *suid);
181long sys32_setresgid16(u16 rgid, u16 egid, u16 sgid);
182long sys32_getresgid16(u16 __user *rgid, u16 __user *egid, u16 __user *sgid);
183long sys32_setfsuid16(u16 uid);
184long sys32_setfsgid16(u16 gid);
185long sys32_getgroups16(int gidsetsize, u16 __user *grouplist);
186long sys32_setgroups16(int gidsetsize, u16 __user *grouplist);
187long sys32_getuid16(void);
188long sys32_geteuid16(void);
189long sys32_getgid16(void);
190long sys32_getegid16(void);
191long sys32_ipc(u32 call, int first, int second, int third, u32 ptr);
192long sys32_truncate64(const char __user * path, unsigned long high,
193 unsigned long low);
194long sys32_ftruncate64(unsigned int fd, unsigned long high, unsigned long low);
195long sys32_sched_rr_get_interval(compat_pid_t pid,
196 struct compat_timespec __user *interval);
197long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
198 compat_sigset_t __user *oset, size_t sigsetsize);
199long sys32_rt_sigpending(compat_sigset_t __user *set, size_t sigsetsize);
200long sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo);
201long sys32_execve(void);
202long sys32_init_module(void __user *umod, unsigned long len,
203 const char __user *uargs);
204long sys32_delete_module(const char __user *name_user, unsigned int flags);
205long sys32_gettimeofday(struct compat_timeval __user *tv,
206 struct timezone __user *tz);
207long sys32_settimeofday(struct compat_timeval __user *tv,
208 struct timezone __user *tz);
209long sys32_pause(void);
210long sys32_pread64(unsigned int fd, char __user *ubuf, size_t count,
211 u32 poshi, u32 poslo);
212long sys32_pwrite64(unsigned int fd, const char __user *ubuf,
213 size_t count, u32 poshi, u32 poslo);
214compat_ssize_t sys32_readahead(int fd, u32 offhi, u32 offlo, s32 count);
215long sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset,
216 size_t count);
217long sys32_sendfile64(int out_fd, int in_fd, compat_loff_t __user *offset,
218 s32 count);
219long sys32_sysctl(struct __sysctl_args32 __user *args);
220long sys32_stat64(char __user * filename, struct stat64_emu31 __user * statbuf);
221long sys32_lstat64(char __user * filename,
222 struct stat64_emu31 __user * statbuf);
223long sys32_fstat64(unsigned long fd, struct stat64_emu31 __user * statbuf);
224long sys32_fstatat64(unsigned int dfd, char __user *filename,
225 struct stat64_emu31 __user* statbuf, int flag);
226unsigned long old32_mmap(struct mmap_arg_struct_emu31 __user *arg);
227long sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg);
228long sys32_read(unsigned int fd, char __user * buf, size_t count);
229long sys32_write(unsigned int fd, char __user * buf, size_t count);
230long sys32_clone(void);
231long sys32_fadvise64(int fd, loff_t offset, size_t len, int advise);
232long sys32_fadvise64_64(struct fadvise64_64_args __user *args);
233long sys32_sigaction(int sig, const struct old_sigaction32 __user *act,
234 struct old_sigaction32 __user *oact);
235long sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
236 struct sigaction32 __user *oact, size_t sigsetsize);
237long sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss);
165#endif /* _ASM_S390X_S390_H */ 238#endif /* _ASM_S390X_S390_H */
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index a5692c460bad..c7f02e777af2 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -29,6 +29,7 @@
29#include <asm/lowcore.h> 29#include <asm/lowcore.h>
30#include "compat_linux.h" 30#include "compat_linux.h"
31#include "compat_ptrace.h" 31#include "compat_ptrace.h"
32#include "entry.h"
32 33
33#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 34#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
34 35
@@ -428,6 +429,10 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
428 /* Default to using normal stack */ 429 /* Default to using normal stack */
429 sp = (unsigned long) A(regs->gprs[15]); 430 sp = (unsigned long) A(regs->gprs[15]);
430 431
432 /* Overflow on alternate signal stack gives SIGSEGV. */
433 if (on_sig_stack(sp) && !on_sig_stack((sp - frame_size) & -8UL))
434 return (void __user *) -1UL;
435
431 /* This is the X/Open sanctioned signal stack switching. */ 436 /* This is the X/Open sanctioned signal stack switching. */
432 if (ka->sa.sa_flags & SA_ONSTACK) { 437 if (ka->sa.sa_flags & SA_ONSTACK) {
433 if (! sas_ss_flags(sp)) 438 if (! sas_ss_flags(sp))
@@ -461,6 +466,9 @@ static int setup_frame32(int sig, struct k_sigaction *ka,
461 if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe32))) 466 if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe32)))
462 goto give_sigsegv; 467 goto give_sigsegv;
463 468
469 if (frame == (void __user *) -1UL)
470 goto give_sigsegv;
471
464 if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32)) 472 if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32))
465 goto give_sigsegv; 473 goto give_sigsegv;
466 474
@@ -514,6 +522,9 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
514 if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe32))) 522 if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe32)))
515 goto give_sigsegv; 523 goto give_sigsegv;
516 524
525 if (frame == (void __user *) -1UL)
526 goto give_sigsegv;
527
517 if (copy_siginfo_to_user32(&frame->info, info)) 528 if (copy_siginfo_to_user32(&frame->info, info))
518 goto give_sigsegv; 529 goto give_sigsegv;
519 530
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 1b2f5ce45320..1e7d4ac7068b 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -73,7 +73,7 @@ static ssize_t debug_input(struct file *file, const char __user *user_buf,
73static int debug_open(struct inode *inode, struct file *file); 73static int debug_open(struct inode *inode, struct file *file);
74static int debug_close(struct inode *inode, struct file *file); 74static int debug_close(struct inode *inode, struct file *file);
75static debug_info_t* debug_info_create(char *name, int pages_per_area, 75static debug_info_t* debug_info_create(char *name, int pages_per_area,
76 int nr_areas, int buf_size); 76 int nr_areas, int buf_size, mode_t mode);
77static void debug_info_get(debug_info_t *); 77static void debug_info_get(debug_info_t *);
78static void debug_info_put(debug_info_t *); 78static void debug_info_put(debug_info_t *);
79static int debug_prolog_level_fn(debug_info_t * id, 79static int debug_prolog_level_fn(debug_info_t * id,
@@ -157,7 +157,7 @@ struct debug_view debug_sprintf_view = {
157}; 157};
158 158
159/* used by dump analysis tools to determine version of debug feature */ 159/* used by dump analysis tools to determine version of debug feature */
160unsigned int debug_feature_version = __DEBUG_FEATURE_VERSION; 160static unsigned int __used debug_feature_version = __DEBUG_FEATURE_VERSION;
161 161
162/* static globals */ 162/* static globals */
163 163
@@ -327,7 +327,8 @@ debug_info_free(debug_info_t* db_info){
327 */ 327 */
328 328
329static debug_info_t* 329static debug_info_t*
330debug_info_create(char *name, int pages_per_area, int nr_areas, int buf_size) 330debug_info_create(char *name, int pages_per_area, int nr_areas, int buf_size,
331 mode_t mode)
331{ 332{
332 debug_info_t* rc; 333 debug_info_t* rc;
333 334
@@ -336,6 +337,8 @@ debug_info_create(char *name, int pages_per_area, int nr_areas, int buf_size)
336 if(!rc) 337 if(!rc)
337 goto out; 338 goto out;
338 339
340 rc->mode = mode & ~S_IFMT;
341
339 /* create root directory */ 342 /* create root directory */
340 rc->debugfs_root_entry = debugfs_create_dir(rc->name, 343 rc->debugfs_root_entry = debugfs_create_dir(rc->name,
341 debug_debugfs_root_entry); 344 debug_debugfs_root_entry);
@@ -676,23 +679,30 @@ debug_close(struct inode *inode, struct file *file)
676} 679}
677 680
678/* 681/*
679 * debug_register: 682 * debug_register_mode:
680 * - creates and initializes debug area for the caller 683 * - Creates and initializes debug area for the caller
681 * - returns handle for debug area 684 * The mode parameter allows to specify access rights for the s390dbf files
685 * - Returns handle for debug area
682 */ 686 */
683 687
684debug_info_t* 688debug_info_t *debug_register_mode(char *name, int pages_per_area, int nr_areas,
685debug_register (char *name, int pages_per_area, int nr_areas, int buf_size) 689 int buf_size, mode_t mode, uid_t uid,
690 gid_t gid)
686{ 691{
687 debug_info_t *rc = NULL; 692 debug_info_t *rc = NULL;
688 693
694 /* Since debugfs currently does not support uid/gid other than root, */
695 /* we do not allow gid/uid != 0 until we get support for that. */
696 if ((uid != 0) || (gid != 0))
697 printk(KERN_WARNING "debug: Warning - Currently only uid/gid "
698 "= 0 are supported. Using root as owner now!");
689 if (!initialized) 699 if (!initialized)
690 BUG(); 700 BUG();
691 mutex_lock(&debug_mutex); 701 mutex_lock(&debug_mutex);
692 702
693 /* create new debug_info */ 703 /* create new debug_info */
694 704
695 rc = debug_info_create(name, pages_per_area, nr_areas, buf_size); 705 rc = debug_info_create(name, pages_per_area, nr_areas, buf_size, mode);
696 if(!rc) 706 if(!rc)
697 goto out; 707 goto out;
698 debug_register_view(rc, &debug_level_view); 708 debug_register_view(rc, &debug_level_view);
@@ -705,6 +715,20 @@ out:
705 mutex_unlock(&debug_mutex); 715 mutex_unlock(&debug_mutex);
706 return rc; 716 return rc;
707} 717}
718EXPORT_SYMBOL(debug_register_mode);
719
720/*
721 * debug_register:
722 * - creates and initializes debug area for the caller
723 * - returns handle for debug area
724 */
725
726debug_info_t *debug_register(char *name, int pages_per_area, int nr_areas,
727 int buf_size)
728{
729 return debug_register_mode(name, pages_per_area, nr_areas, buf_size,
730 S_IRUSR | S_IWUSR, 0, 0);
731}
708 732
709/* 733/*
710 * debug_unregister: 734 * debug_unregister:
@@ -1073,15 +1097,16 @@ debug_register_view(debug_info_t * id, struct debug_view *view)
1073 int rc = 0; 1097 int rc = 0;
1074 int i; 1098 int i;
1075 unsigned long flags; 1099 unsigned long flags;
1076 mode_t mode = S_IFREG; 1100 mode_t mode;
1077 struct dentry *pde; 1101 struct dentry *pde;
1078 1102
1079 if (!id) 1103 if (!id)
1080 goto out; 1104 goto out;
1081 if (view->prolog_proc || view->format_proc || view->header_proc) 1105 mode = (id->mode | S_IFREG) & ~S_IXUGO;
1082 mode |= S_IRUSR; 1106 if (!(view->prolog_proc || view->format_proc || view->header_proc))
1083 if (view->input_proc) 1107 mode &= ~(S_IRUSR | S_IRGRP | S_IROTH);
1084 mode |= S_IWUSR; 1108 if (!view->input_proc)
1109 mode &= ~(S_IWUSR | S_IWGRP | S_IWOTH);
1085 pde = debugfs_create_file(view->name, mode, id->debugfs_root_entry, 1110 pde = debugfs_create_file(view->name, mode, id->debugfs_root_entry,
1086 id , &debug_file_ops); 1111 id , &debug_file_ops);
1087 if (!pde){ 1112 if (!pde){
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 01832c440636..540a67f979b6 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -21,6 +21,7 @@
21#include <asm/setup.h> 21#include <asm/setup.h>
22#include <asm/cpcmd.h> 22#include <asm/cpcmd.h>
23#include <asm/sclp.h> 23#include <asm/sclp.h>
24#include "entry.h"
24 25
25/* 26/*
26 * Create a Kernel NSS if the SAVESYS= parameter is defined 27 * Create a Kernel NSS if the SAVESYS= parameter is defined
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
new file mode 100644
index 000000000000..6b1896345eda
--- /dev/null
+++ b/arch/s390/kernel/entry.h
@@ -0,0 +1,60 @@
1#ifndef _ENTRY_H
2#define _ENTRY_H
3
4#include <linux/types.h>
5#include <linux/signal.h>
6#include <asm/ptrace.h>
7
8typedef void pgm_check_handler_t(struct pt_regs *, long);
9extern pgm_check_handler_t *pgm_check_table[128];
10pgm_check_handler_t do_protection_exception;
11pgm_check_handler_t do_dat_exception;
12
13extern int sysctl_userprocess_debug;
14
15void do_single_step(struct pt_regs *regs);
16void syscall_trace(struct pt_regs *regs, int entryexit);
17void kernel_stack_overflow(struct pt_regs * regs);
18void do_signal(struct pt_regs *regs);
19int handle_signal32(unsigned long sig, struct k_sigaction *ka,
20 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs);
21
22void do_extint(struct pt_regs *regs, unsigned short code);
23int __cpuinit start_secondary(void *cpuvoid);
24void __init startup_init(void);
25void die(const char * str, struct pt_regs * regs, long err);
26
27struct new_utsname;
28struct mmap_arg_struct;
29struct fadvise64_64_args;
30struct old_sigaction;
31struct sel_arg_struct;
32
33long sys_pipe(unsigned long __user *fildes);
34long sys_mmap2(struct mmap_arg_struct __user *arg);
35long old_mmap(struct mmap_arg_struct __user *arg);
36long sys_ipc(uint call, int first, unsigned long second,
37 unsigned long third, void __user *ptr);
38long s390x_newuname(struct new_utsname __user *name);
39long s390x_personality(unsigned long personality);
40long s390_fadvise64(int fd, u32 offset_high, u32 offset_low,
41 size_t len, int advice);
42long s390_fadvise64_64(struct fadvise64_64_args __user *args);
43long s390_fallocate(int fd, int mode, loff_t offset, u32 len_high, u32 len_low);
44long sys_fork(void);
45long sys_clone(void);
46long sys_vfork(void);
47void execve_tail(void);
48long sys_execve(void);
49int sys_sigsuspend(int history0, int history1, old_sigset_t mask);
50long sys_sigaction(int sig, const struct old_sigaction __user *act,
51 struct old_sigaction __user *oact);
52long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss);
53long sys_sigreturn(void);
54long sys_rt_sigreturn(void);
55long sys32_sigreturn(void);
56long sys32_rt_sigreturn(void);
57long old_select(struct sel_arg_struct __user *arg);
58long sys_ptrace(long request, long pid, long addr, long data);
59
60#endif /* _ENTRY_H */
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index efde6e178f6c..cd959c0b2e16 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -475,6 +475,7 @@ pgm_check_handler:
475pgm_no_vtime: 475pgm_no_vtime:
476#endif 476#endif
477 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct 477 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
478 mvc SP_ARGS(8,%r15),__LC_LAST_BREAK
478 TRACE_IRQS_OFF 479 TRACE_IRQS_OFF
479 lgf %r3,__LC_PGM_ILC # load program interruption code 480 lgf %r3,__LC_PGM_ILC # load program interruption code
480 lghi %r8,0x7f 481 lghi %r8,0x7f
@@ -847,6 +848,7 @@ stack_overflow:
847 je 0f 848 je 0f
848 la %r1,__LC_SAVE_AREA+32 849 la %r1,__LC_SAVE_AREA+32
8490: mvc SP_R12(32,%r15),0(%r1) # move %r12-%r15 to stack 8500: mvc SP_R12(32,%r15),0(%r1) # move %r12-%r15 to stack
851 mvc SP_ARGS(8,%r15),__LC_LAST_BREAK
850 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain 852 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain
851 la %r2,SP_PTREGS(%r15) # load pt_regs 853 la %r2,SP_PTREGS(%r15) # load pt_regs
852 jg kernel_stack_overflow 854 jg kernel_stack_overflow
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 375232c46c7a..532542447d66 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -655,7 +655,7 @@ static struct kobj_attribute reipl_type_attr =
655 655
656static struct kset *reipl_kset; 656static struct kset *reipl_kset;
657 657
658void reipl_run(struct shutdown_trigger *trigger) 658static void reipl_run(struct shutdown_trigger *trigger)
659{ 659{
660 struct ccw_dev_id devid; 660 struct ccw_dev_id devid;
661 static char buf[100]; 661 static char buf[100];
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index c5549a206284..ed04d1372d5d 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -360,7 +360,7 @@ no_kprobe:
360 * - When the probed function returns, this probe 360 * - When the probed function returns, this probe
361 * causes the handlers to fire 361 * causes the handlers to fire
362 */ 362 */
363void kretprobe_trampoline_holder(void) 363static void __used kretprobe_trampoline_holder(void)
364{ 364{
365 asm volatile(".global kretprobe_trampoline\n" 365 asm volatile(".global kretprobe_trampoline\n"
366 "kretprobe_trampoline: bcr 0,0\n"); 366 "kretprobe_trampoline: bcr 0,0\n");
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index ce203154d8ce..c1aff194141d 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -36,6 +36,8 @@
36#include <linux/module.h> 36#include <linux/module.h>
37#include <linux/notifier.h> 37#include <linux/notifier.h>
38#include <linux/utsname.h> 38#include <linux/utsname.h>
39#include <linux/tick.h>
40#include <linux/elfcore.h>
39#include <asm/uaccess.h> 41#include <asm/uaccess.h>
40#include <asm/pgtable.h> 42#include <asm/pgtable.h>
41#include <asm/system.h> 43#include <asm/system.h>
@@ -44,6 +46,7 @@
44#include <asm/irq.h> 46#include <asm/irq.h>
45#include <asm/timer.h> 47#include <asm/timer.h>
46#include <asm/cpu.h> 48#include <asm/cpu.h>
49#include "entry.h"
47 50
48asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); 51asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
49 52
@@ -76,6 +79,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
76 * Need to know about CPUs going idle? 79 * Need to know about CPUs going idle?
77 */ 80 */
78static ATOMIC_NOTIFIER_HEAD(idle_chain); 81static ATOMIC_NOTIFIER_HEAD(idle_chain);
82DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
79 83
80int register_idle_notifier(struct notifier_block *nb) 84int register_idle_notifier(struct notifier_block *nb)
81{ 85{
@@ -89,9 +93,33 @@ int unregister_idle_notifier(struct notifier_block *nb)
89} 93}
90EXPORT_SYMBOL(unregister_idle_notifier); 94EXPORT_SYMBOL(unregister_idle_notifier);
91 95
92void do_monitor_call(struct pt_regs *regs, long interruption_code) 96static int s390_idle_enter(void)
97{
98 struct s390_idle_data *idle;
99 int nr_calls = 0;
100 void *hcpu;
101 int rc;
102
103 hcpu = (void *)(long)smp_processor_id();
104 rc = __atomic_notifier_call_chain(&idle_chain, S390_CPU_IDLE, hcpu, -1,
105 &nr_calls);
106 if (rc == NOTIFY_BAD) {
107 nr_calls--;
108 __atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
109 hcpu, nr_calls, NULL);
110 return rc;
111 }
112 idle = &__get_cpu_var(s390_idle);
113 spin_lock(&idle->lock);
114 idle->idle_count++;
115 idle->in_idle = 1;
116 idle->idle_enter = get_clock();
117 spin_unlock(&idle->lock);
118 return NOTIFY_OK;
119}
120
121void s390_idle_leave(void)
93{ 122{
94#ifdef CONFIG_SMP
95 struct s390_idle_data *idle; 123 struct s390_idle_data *idle;
96 124
97 idle = &__get_cpu_var(s390_idle); 125 idle = &__get_cpu_var(s390_idle);
@@ -99,10 +127,6 @@ void do_monitor_call(struct pt_regs *regs, long interruption_code)
99 idle->idle_time += get_clock() - idle->idle_enter; 127 idle->idle_time += get_clock() - idle->idle_enter;
100 idle->in_idle = 0; 128 idle->in_idle = 0;
101 spin_unlock(&idle->lock); 129 spin_unlock(&idle->lock);
102#endif
103 /* disable monitor call class 0 */
104 __ctl_clear_bit(8, 15);
105
106 atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE, 130 atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
107 (void *)(long) smp_processor_id()); 131 (void *)(long) smp_processor_id());
108} 132}
@@ -113,61 +137,30 @@ extern void s390_handle_mcck(void);
113 */ 137 */
114static void default_idle(void) 138static void default_idle(void)
115{ 139{
116 int cpu, rc;
117 int nr_calls = 0;
118 void *hcpu;
119#ifdef CONFIG_SMP
120 struct s390_idle_data *idle;
121#endif
122
123 /* CPU is going idle. */ 140 /* CPU is going idle. */
124 cpu = smp_processor_id();
125 hcpu = (void *)(long)cpu;
126 local_irq_disable(); 141 local_irq_disable();
127 if (need_resched()) { 142 if (need_resched()) {
128 local_irq_enable(); 143 local_irq_enable();
129 return; 144 return;
130 } 145 }
131 146 if (s390_idle_enter() == NOTIFY_BAD) {
132 rc = __atomic_notifier_call_chain(&idle_chain, S390_CPU_IDLE, hcpu, -1,
133 &nr_calls);
134 if (rc == NOTIFY_BAD) {
135 nr_calls--;
136 __atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
137 hcpu, nr_calls, NULL);
138 local_irq_enable(); 147 local_irq_enable();
139 return; 148 return;
140 } 149 }
141
142 /* enable monitor call class 0 */
143 __ctl_set_bit(8, 15);
144
145#ifdef CONFIG_HOTPLUG_CPU 150#ifdef CONFIG_HOTPLUG_CPU
146 if (cpu_is_offline(cpu)) { 151 if (cpu_is_offline(smp_processor_id())) {
147 preempt_enable_no_resched(); 152 preempt_enable_no_resched();
148 cpu_die(); 153 cpu_die();
149 } 154 }
150#endif 155#endif
151
152 local_mcck_disable(); 156 local_mcck_disable();
153 if (test_thread_flag(TIF_MCCK_PENDING)) { 157 if (test_thread_flag(TIF_MCCK_PENDING)) {
154 local_mcck_enable(); 158 local_mcck_enable();
155 /* disable monitor call class 0 */ 159 s390_idle_leave();
156 __ctl_clear_bit(8, 15);
157 atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
158 hcpu);
159 local_irq_enable(); 160 local_irq_enable();
160 s390_handle_mcck(); 161 s390_handle_mcck();
161 return; 162 return;
162 } 163 }
163#ifdef CONFIG_SMP
164 idle = &__get_cpu_var(s390_idle);
165 spin_lock(&idle->lock);
166 idle->idle_count++;
167 idle->in_idle = 1;
168 idle->idle_enter = get_clock();
169 spin_unlock(&idle->lock);
170#endif
171 trace_hardirqs_on(); 164 trace_hardirqs_on();
172 /* Wait for external, I/O or machine check interrupt. */ 165 /* Wait for external, I/O or machine check interrupt. */
173 __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | 166 __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
@@ -177,9 +170,10 @@ static void default_idle(void)
177void cpu_idle(void) 170void cpu_idle(void)
178{ 171{
179 for (;;) { 172 for (;;) {
173 tick_nohz_stop_sched_tick();
180 while (!need_resched()) 174 while (!need_resched())
181 default_idle(); 175 default_idle();
182 176 tick_nohz_restart_sched_tick();
183 preempt_enable_no_resched(); 177 preempt_enable_no_resched();
184 schedule(); 178 schedule();
185 preempt_disable(); 179 preempt_disable();
@@ -201,6 +195,7 @@ void show_regs(struct pt_regs *regs)
201 /* Show stack backtrace if pt_regs is from kernel mode */ 195 /* Show stack backtrace if pt_regs is from kernel mode */
202 if (!(regs->psw.mask & PSW_MASK_PSTATE)) 196 if (!(regs->psw.mask & PSW_MASK_PSTATE))
203 show_trace(NULL, (unsigned long *) regs->gprs[15]); 197 show_trace(NULL, (unsigned long *) regs->gprs[15]);
198 show_last_breaking_event(regs);
204} 199}
205 200
206extern void kernel_thread_starter(void); 201extern void kernel_thread_starter(void);
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 6e036bae9875..58a064296987 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -41,6 +41,7 @@
41#include <asm/system.h> 41#include <asm/system.h>
42#include <asm/uaccess.h> 42#include <asm/uaccess.h>
43#include <asm/unistd.h> 43#include <asm/unistd.h>
44#include "entry.h"
44 45
45#ifdef CONFIG_COMPAT 46#ifdef CONFIG_COMPAT
46#include "compat_ptrace.h" 47#include "compat_ptrace.h"
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c
index acf93dba7727..e019b419efc6 100644
--- a/arch/s390/kernel/s390_ext.c
+++ b/arch/s390/kernel/s390_ext.c
@@ -13,11 +13,12 @@
13#include <linux/errno.h> 13#include <linux/errno.h>
14#include <linux/kernel_stat.h> 14#include <linux/kernel_stat.h>
15#include <linux/interrupt.h> 15#include <linux/interrupt.h>
16 16#include <asm/cpu.h>
17#include <asm/lowcore.h> 17#include <asm/lowcore.h>
18#include <asm/s390_ext.h> 18#include <asm/s390_ext.h>
19#include <asm/irq_regs.h> 19#include <asm/irq_regs.h>
20#include <asm/irq.h> 20#include <asm/irq.h>
21#include "entry.h"
21 22
22/* 23/*
23 * ext_int_hash[index] is the start of the list for all external interrupts 24 * ext_int_hash[index] is the start of the list for all external interrupts
@@ -119,13 +120,10 @@ void do_extint(struct pt_regs *regs, unsigned short code)
119 120
120 old_regs = set_irq_regs(regs); 121 old_regs = set_irq_regs(regs);
121 irq_enter(); 122 irq_enter();
122 asm volatile ("mc 0,0"); 123 s390_idle_check();
123 if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer) 124 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
124 /** 125 /* Serve timer interrupts first. */
125 * Make sure that the i/o interrupt did not "overtake" 126 clock_comparator_work();
126 * the last HZ timer interrupt.
127 */
128 account_ticks(S390_lowcore.int_clock);
129 kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; 127 kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
130 index = ext_hash(code); 128 index = ext_hash(code);
131 for (p = ext_int_hash[index]; p; p = p->next) { 129 for (p = ext_int_hash[index]; p; p = p->next) {
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c
index 7234c737f825..48238a114ce9 100644
--- a/arch/s390/kernel/s390_ksyms.c
+++ b/arch/s390/kernel/s390_ksyms.c
@@ -27,13 +27,6 @@ EXPORT_SYMBOL(_zb_findmap);
27EXPORT_SYMBOL(_sb_findmap); 27EXPORT_SYMBOL(_sb_findmap);
28 28
29/* 29/*
30 * semaphore ops
31 */
32EXPORT_SYMBOL(__up);
33EXPORT_SYMBOL(__down);
34EXPORT_SYMBOL(__down_interruptible);
35
36/*
37 * binfmt_elf loader 30 * binfmt_elf loader
38 */ 31 */
39extern int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs); 32extern int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs);
diff --git a/arch/s390/kernel/semaphore.c b/arch/s390/kernel/semaphore.c
deleted file mode 100644
index 191303f6c1d8..000000000000
--- a/arch/s390/kernel/semaphore.c
+++ /dev/null
@@ -1,108 +0,0 @@
1/*
2 * linux/arch/s390/kernel/semaphore.c
3 *
4 * S390 version
5 * Copyright (C) 1998-2000 IBM Corporation
6 * Author(s): Martin Schwidefsky
7 *
8 * Derived from "linux/arch/i386/kernel/semaphore.c
9 * Copyright (C) 1999, Linus Torvalds
10 *
11 */
12#include <linux/sched.h>
13#include <linux/errno.h>
14#include <linux/init.h>
15
16#include <asm/semaphore.h>
17
18/*
19 * Atomically update sem->count. Equivalent to:
20 * old_val = sem->count.counter;
21 * new_val = ((old_val >= 0) ? old_val : 0) + incr;
22 * sem->count.counter = new_val;
23 * return old_val;
24 */
25static inline int __sem_update_count(struct semaphore *sem, int incr)
26{
27 int old_val, new_val;
28
29 asm volatile(
30 " l %0,0(%3)\n"
31 "0: ltr %1,%0\n"
32 " jhe 1f\n"
33 " lhi %1,0\n"
34 "1: ar %1,%4\n"
35 " cs %0,%1,0(%3)\n"
36 " jl 0b\n"
37 : "=&d" (old_val), "=&d" (new_val), "=m" (sem->count)
38 : "a" (&sem->count), "d" (incr), "m" (sem->count)
39 : "cc");
40 return old_val;
41}
42
43/*
44 * The inline function up() incremented count but the result
45 * was <= 0. This indicates that some process is waiting on
46 * the semaphore. The semaphore is free and we'll wake the
47 * first sleeping process, so we set count to 1 unless some
48 * other cpu has called up in the meantime in which case
49 * we just increment count by 1.
50 */
51void __up(struct semaphore *sem)
52{
53 __sem_update_count(sem, 1);
54 wake_up(&sem->wait);
55}
56
57/*
58 * The inline function down() decremented count and the result
59 * was < 0. The wait loop will atomically test and update the
60 * semaphore counter following the rules:
61 * count > 0: decrement count, wake up queue and exit.
62 * count <= 0: set count to -1, go to sleep.
63 */
64void __sched __down(struct semaphore * sem)
65{
66 struct task_struct *tsk = current;
67 DECLARE_WAITQUEUE(wait, tsk);
68
69 __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
70 add_wait_queue_exclusive(&sem->wait, &wait);
71 while (__sem_update_count(sem, -1) <= 0) {
72 schedule();
73 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
74 }
75 remove_wait_queue(&sem->wait, &wait);
76 __set_task_state(tsk, TASK_RUNNING);
77 wake_up(&sem->wait);
78}
79
80/*
81 * Same as __down() with an additional test for signals.
82 * If a signal is pending the count is updated as follows:
83 * count > 0: wake up queue and exit.
84 * count <= 0: set count to 0, wake up queue and exit.
85 */
86int __sched __down_interruptible(struct semaphore * sem)
87{
88 int retval = 0;
89 struct task_struct *tsk = current;
90 DECLARE_WAITQUEUE(wait, tsk);
91
92 __set_task_state(tsk, TASK_INTERRUPTIBLE);
93 add_wait_queue_exclusive(&sem->wait, &wait);
94 while (__sem_update_count(sem, -1) <= 0) {
95 if (signal_pending(current)) {
96 __sem_update_count(sem, 0);
97 retval = -EINTR;
98 break;
99 }
100 schedule();
101 set_task_state(tsk, TASK_INTERRUPTIBLE);
102 }
103 remove_wait_queue(&sem->wait, &wait);
104 __set_task_state(tsk, TASK_RUNNING);
105 wake_up(&sem->wait);
106 return retval;
107}
108
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 290e504061a3..7141147e6b63 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -39,6 +39,7 @@
39#include <linux/pfn.h> 39#include <linux/pfn.h>
40#include <linux/ctype.h> 40#include <linux/ctype.h>
41#include <linux/reboot.h> 41#include <linux/reboot.h>
42#include <linux/topology.h>
42 43
43#include <asm/ipl.h> 44#include <asm/ipl.h>
44#include <asm/uaccess.h> 45#include <asm/uaccess.h>
@@ -427,7 +428,7 @@ setup_lowcore(void)
427 lc->io_new_psw.mask = psw_kernel_bits; 428 lc->io_new_psw.mask = psw_kernel_bits;
428 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; 429 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
429 lc->ipl_device = S390_lowcore.ipl_device; 430 lc->ipl_device = S390_lowcore.ipl_device;
430 lc->jiffy_timer = -1LL; 431 lc->clock_comparator = -1ULL;
431 lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; 432 lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE;
432 lc->async_stack = (unsigned long) 433 lc->async_stack = (unsigned long)
433 __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE; 434 __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE;
@@ -687,7 +688,7 @@ static __init unsigned int stfl(void)
687 return S390_lowcore.stfl_fac_list; 688 return S390_lowcore.stfl_fac_list;
688} 689}
689 690
690static __init int stfle(unsigned long long *list, int doublewords) 691static int __init __stfle(unsigned long long *list, int doublewords)
691{ 692{
692 typedef struct { unsigned long long _[doublewords]; } addrtype; 693 typedef struct { unsigned long long _[doublewords]; } addrtype;
693 register unsigned long __nr asm("0") = doublewords - 1; 694 register unsigned long __nr asm("0") = doublewords - 1;
@@ -697,6 +698,13 @@ static __init int stfle(unsigned long long *list, int doublewords)
697 return __nr + 1; 698 return __nr + 1;
698} 699}
699 700
701int __init stfle(unsigned long long *list, int doublewords)
702{
703 if (!(stfl() & (1UL << 24)))
704 return -EOPNOTSUPP;
705 return __stfle(list, doublewords);
706}
707
700/* 708/*
701 * Setup hardware capabilities. 709 * Setup hardware capabilities.
702 */ 710 */
@@ -741,7 +749,7 @@ static void __init setup_hwcaps(void)
741 * HWCAP_S390_DFP bit 6. 749 * HWCAP_S390_DFP bit 6.
742 */ 750 */
743 if ((elf_hwcap & (1UL << 2)) && 751 if ((elf_hwcap & (1UL << 2)) &&
744 stfle(&facility_list_extended, 1) > 0) { 752 __stfle(&facility_list_extended, 1) > 0) {
745 if (facility_list_extended & (1ULL << (64 - 43))) 753 if (facility_list_extended & (1ULL << (64 - 43)))
746 elf_hwcap |= 1UL << 6; 754 elf_hwcap |= 1UL << 6;
747 } 755 }
@@ -823,6 +831,7 @@ setup_arch(char **cmdline_p)
823 831
824 cpu_init(); 832 cpu_init();
825 __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr; 833 __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr;
834 s390_init_cpu_topology();
826 835
827 /* 836 /*
828 * Setup capabilities (ELF_HWCAP & ELF_PLATFORM). 837 * Setup capabilities (ELF_HWCAP & ELF_PLATFORM).
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 4449bf32cbf1..b97682040215 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -27,6 +27,7 @@
27#include <asm/ucontext.h> 27#include <asm/ucontext.h>
28#include <asm/uaccess.h> 28#include <asm/uaccess.h>
29#include <asm/lowcore.h> 29#include <asm/lowcore.h>
30#include "entry.h"
30 31
31#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 32#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
32 33
@@ -235,6 +236,10 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
235 /* Default to using normal stack */ 236 /* Default to using normal stack */
236 sp = regs->gprs[15]; 237 sp = regs->gprs[15];
237 238
239 /* Overflow on alternate signal stack gives SIGSEGV. */
240 if (on_sig_stack(sp) && !on_sig_stack((sp - frame_size) & -8UL))
241 return (void __user *) -1UL;
242
238 /* This is the X/Open sanctioned signal stack switching. */ 243 /* This is the X/Open sanctioned signal stack switching. */
239 if (ka->sa.sa_flags & SA_ONSTACK) { 244 if (ka->sa.sa_flags & SA_ONSTACK) {
240 if (! sas_ss_flags(sp)) 245 if (! sas_ss_flags(sp))
@@ -270,6 +275,9 @@ static int setup_frame(int sig, struct k_sigaction *ka,
270 if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe))) 275 if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe)))
271 goto give_sigsegv; 276 goto give_sigsegv;
272 277
278 if (frame == (void __user *) -1UL)
279 goto give_sigsegv;
280
273 if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE)) 281 if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE))
274 goto give_sigsegv; 282 goto give_sigsegv;
275 283
@@ -327,6 +335,9 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
327 if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe))) 335 if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe)))
328 goto give_sigsegv; 336 goto give_sigsegv;
329 337
338 if (frame == (void __user *) -1UL)
339 goto give_sigsegv;
340
330 if (copy_siginfo_to_user(&frame->info, info)) 341 if (copy_siginfo_to_user(&frame->info, info))
331 goto give_sigsegv; 342 goto give_sigsegv;
332 343
@@ -474,11 +485,6 @@ void do_signal(struct pt_regs *regs)
474 int ret; 485 int ret;
475#ifdef CONFIG_COMPAT 486#ifdef CONFIG_COMPAT
476 if (test_thread_flag(TIF_31BIT)) { 487 if (test_thread_flag(TIF_31BIT)) {
477 extern int handle_signal32(unsigned long sig,
478 struct k_sigaction *ka,
479 siginfo_t *info,
480 sigset_t *oldset,
481 struct pt_regs *regs);
482 ret = handle_signal32(signr, &ka, &info, oldset, regs); 488 ret = handle_signal32(signr, &ka, &info, oldset, regs);
483 } 489 }
484 else 490 else
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 8f894d380a62..0dfa988c1b26 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -44,6 +44,7 @@
44#include <asm/lowcore.h> 44#include <asm/lowcore.h>
45#include <asm/sclp.h> 45#include <asm/sclp.h>
46#include <asm/cpu.h> 46#include <asm/cpu.h>
47#include "entry.h"
47 48
48/* 49/*
49 * An array with a pointer the lowcore of every CPU. 50 * An array with a pointer the lowcore of every CPU.
@@ -67,13 +68,12 @@ enum s390_cpu_state {
67 CPU_STATE_CONFIGURED, 68 CPU_STATE_CONFIGURED,
68}; 69};
69 70
70#ifdef CONFIG_HOTPLUG_CPU 71DEFINE_MUTEX(smp_cpu_state_mutex);
71static DEFINE_MUTEX(smp_cpu_state_mutex); 72int smp_cpu_polarization[NR_CPUS];
72#endif
73static int smp_cpu_state[NR_CPUS]; 73static int smp_cpu_state[NR_CPUS];
74static int cpu_management;
74 75
75static DEFINE_PER_CPU(struct cpu, cpu_devices); 76static DEFINE_PER_CPU(struct cpu, cpu_devices);
76DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
77 77
78static void smp_ext_bitcall(int, ec_bit_sig); 78static void smp_ext_bitcall(int, ec_bit_sig);
79 79
@@ -298,7 +298,7 @@ static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
298/* 298/*
299 * this function sends a 'purge tlb' signal to another CPU. 299 * this function sends a 'purge tlb' signal to another CPU.
300 */ 300 */
301void smp_ptlb_callback(void *info) 301static void smp_ptlb_callback(void *info)
302{ 302{
303 __tlb_flush_local(); 303 __tlb_flush_local();
304} 304}
@@ -456,6 +456,7 @@ static int smp_rescan_cpus_sigp(cpumask_t avail)
456 if (cpu_known(cpu_id)) 456 if (cpu_known(cpu_id))
457 continue; 457 continue;
458 __cpu_logical_map[logical_cpu] = cpu_id; 458 __cpu_logical_map[logical_cpu] = cpu_id;
459 smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
459 if (!cpu_stopped(logical_cpu)) 460 if (!cpu_stopped(logical_cpu))
460 continue; 461 continue;
461 cpu_set(logical_cpu, cpu_present_map); 462 cpu_set(logical_cpu, cpu_present_map);
@@ -489,6 +490,7 @@ static int smp_rescan_cpus_sclp(cpumask_t avail)
489 if (cpu_known(cpu_id)) 490 if (cpu_known(cpu_id))
490 continue; 491 continue;
491 __cpu_logical_map[logical_cpu] = cpu_id; 492 __cpu_logical_map[logical_cpu] = cpu_id;
493 smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
492 cpu_set(logical_cpu, cpu_present_map); 494 cpu_set(logical_cpu, cpu_present_map);
493 if (cpu >= info->configured) 495 if (cpu >= info->configured)
494 smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY; 496 smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
@@ -846,6 +848,7 @@ void __init smp_prepare_boot_cpu(void)
846 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 848 S390_lowcore.percpu_offset = __per_cpu_offset[0];
847 current_set[0] = current; 849 current_set[0] = current;
848 smp_cpu_state[0] = CPU_STATE_CONFIGURED; 850 smp_cpu_state[0] = CPU_STATE_CONFIGURED;
851 smp_cpu_polarization[0] = POLARIZATION_UNKNWN;
849 spin_lock_init(&(&__get_cpu_var(s390_idle))->lock); 852 spin_lock_init(&(&__get_cpu_var(s390_idle))->lock);
850} 853}
851 854
@@ -897,15 +900,19 @@ static ssize_t cpu_configure_store(struct sys_device *dev, const char *buf,
897 case 0: 900 case 0:
898 if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) { 901 if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) {
899 rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]); 902 rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]);
900 if (!rc) 903 if (!rc) {
901 smp_cpu_state[cpu] = CPU_STATE_STANDBY; 904 smp_cpu_state[cpu] = CPU_STATE_STANDBY;
905 smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
906 }
902 } 907 }
903 break; 908 break;
904 case 1: 909 case 1:
905 if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) { 910 if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) {
906 rc = sclp_cpu_configure(__cpu_logical_map[cpu]); 911 rc = sclp_cpu_configure(__cpu_logical_map[cpu]);
907 if (!rc) 912 if (!rc) {
908 smp_cpu_state[cpu] = CPU_STATE_CONFIGURED; 913 smp_cpu_state[cpu] = CPU_STATE_CONFIGURED;
914 smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
915 }
909 } 916 }
910 break; 917 break;
911 default: 918 default:
@@ -919,6 +926,34 @@ out:
919static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store); 926static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
920#endif /* CONFIG_HOTPLUG_CPU */ 927#endif /* CONFIG_HOTPLUG_CPU */
921 928
929static ssize_t cpu_polarization_show(struct sys_device *dev, char *buf)
930{
931 int cpu = dev->id;
932 ssize_t count;
933
934 mutex_lock(&smp_cpu_state_mutex);
935 switch (smp_cpu_polarization[cpu]) {
936 case POLARIZATION_HRZ:
937 count = sprintf(buf, "horizontal\n");
938 break;
939 case POLARIZATION_VL:
940 count = sprintf(buf, "vertical:low\n");
941 break;
942 case POLARIZATION_VM:
943 count = sprintf(buf, "vertical:medium\n");
944 break;
945 case POLARIZATION_VH:
946 count = sprintf(buf, "vertical:high\n");
947 break;
948 default:
949 count = sprintf(buf, "unknown\n");
950 break;
951 }
952 mutex_unlock(&smp_cpu_state_mutex);
953 return count;
954}
955static SYSDEV_ATTR(polarization, 0444, cpu_polarization_show, NULL);
956
922static ssize_t show_cpu_address(struct sys_device *dev, char *buf) 957static ssize_t show_cpu_address(struct sys_device *dev, char *buf)
923{ 958{
924 return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]); 959 return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]);
@@ -931,6 +966,7 @@ static struct attribute *cpu_common_attrs[] = {
931 &attr_configure.attr, 966 &attr_configure.attr,
932#endif 967#endif
933 &attr_address.attr, 968 &attr_address.attr,
969 &attr_polarization.attr,
934 NULL, 970 NULL,
935}; 971};
936 972
@@ -1075,11 +1111,48 @@ static ssize_t __ref rescan_store(struct sys_device *dev,
1075out: 1111out:
1076 put_online_cpus(); 1112 put_online_cpus();
1077 mutex_unlock(&smp_cpu_state_mutex); 1113 mutex_unlock(&smp_cpu_state_mutex);
1114 if (!cpus_empty(newcpus))
1115 topology_schedule_update();
1078 return rc ? rc : count; 1116 return rc ? rc : count;
1079} 1117}
1080static SYSDEV_ATTR(rescan, 0200, NULL, rescan_store); 1118static SYSDEV_ATTR(rescan, 0200, NULL, rescan_store);
1081#endif /* CONFIG_HOTPLUG_CPU */ 1119#endif /* CONFIG_HOTPLUG_CPU */
1082 1120
1121static ssize_t dispatching_show(struct sys_device *dev, char *buf)
1122{
1123 ssize_t count;
1124
1125 mutex_lock(&smp_cpu_state_mutex);
1126 count = sprintf(buf, "%d\n", cpu_management);
1127 mutex_unlock(&smp_cpu_state_mutex);
1128 return count;
1129}
1130
1131static ssize_t dispatching_store(struct sys_device *dev, const char *buf,
1132 size_t count)
1133{
1134 int val, rc;
1135 char delim;
1136
1137 if (sscanf(buf, "%d %c", &val, &delim) != 1)
1138 return -EINVAL;
1139 if (val != 0 && val != 1)
1140 return -EINVAL;
1141 rc = 0;
1142 mutex_lock(&smp_cpu_state_mutex);
1143 get_online_cpus();
1144 if (cpu_management == val)
1145 goto out;
1146 rc = topology_set_cpu_management(val);
1147 if (!rc)
1148 cpu_management = val;
1149out:
1150 put_online_cpus();
1151 mutex_unlock(&smp_cpu_state_mutex);
1152 return rc ? rc : count;
1153}
1154static SYSDEV_ATTR(dispatching, 0644, dispatching_show, dispatching_store);
1155
1083static int __init topology_init(void) 1156static int __init topology_init(void)
1084{ 1157{
1085 int cpu; 1158 int cpu;
@@ -1093,6 +1166,10 @@ static int __init topology_init(void)
1093 if (rc) 1166 if (rc)
1094 return rc; 1167 return rc;
1095#endif 1168#endif
1169 rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
1170 &attr_dispatching.attr);
1171 if (rc)
1172 return rc;
1096 for_each_present_cpu(cpu) { 1173 for_each_present_cpu(cpu) {
1097 rc = smp_add_present_cpu(cpu); 1174 rc = smp_add_present_cpu(cpu);
1098 if (rc) 1175 if (rc)
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
index fefee99f28aa..988d0d64c2c8 100644
--- a/arch/s390/kernel/sys_s390.c
+++ b/arch/s390/kernel/sys_s390.c
@@ -29,8 +29,8 @@
29#include <linux/personality.h> 29#include <linux/personality.h>
30#include <linux/unistd.h> 30#include <linux/unistd.h>
31#include <linux/ipc.h> 31#include <linux/ipc.h>
32
33#include <asm/uaccess.h> 32#include <asm/uaccess.h>
33#include "entry.h"
34 34
35/* 35/*
36 * sys_pipe() is the normal C calling standard for creating 36 * sys_pipe() is the normal C calling standard for creating
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index cb232c155360..7aec676fefd5 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -30,7 +30,7 @@
30#include <linux/timex.h> 30#include <linux/timex.h>
31#include <linux/notifier.h> 31#include <linux/notifier.h>
32#include <linux/clocksource.h> 32#include <linux/clocksource.h>
33 33#include <linux/clockchips.h>
34#include <asm/uaccess.h> 34#include <asm/uaccess.h>
35#include <asm/delay.h> 35#include <asm/delay.h>
36#include <asm/s390_ext.h> 36#include <asm/s390_ext.h>
@@ -39,6 +39,7 @@
39#include <asm/irq_regs.h> 39#include <asm/irq_regs.h>
40#include <asm/timer.h> 40#include <asm/timer.h>
41#include <asm/etr.h> 41#include <asm/etr.h>
42#include <asm/cio.h>
42 43
43/* change this if you have some constant time drift */ 44/* change this if you have some constant time drift */
44#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) 45#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ)
@@ -57,16 +58,16 @@
57 58
58static ext_int_info_t ext_int_info_cc; 59static ext_int_info_t ext_int_info_cc;
59static ext_int_info_t ext_int_etr_cc; 60static ext_int_info_t ext_int_etr_cc;
60static u64 init_timer_cc;
61static u64 jiffies_timer_cc; 61static u64 jiffies_timer_cc;
62static u64 xtime_cc; 62
63static DEFINE_PER_CPU(struct clock_event_device, comparators);
63 64
64/* 65/*
65 * Scheduler clock - returns current time in nanosec units. 66 * Scheduler clock - returns current time in nanosec units.
66 */ 67 */
67unsigned long long sched_clock(void) 68unsigned long long sched_clock(void)
68{ 69{
69 return ((get_clock() - jiffies_timer_cc) * 125) >> 9; 70 return ((get_clock_xt() - jiffies_timer_cc) * 125) >> 9;
70} 71}
71 72
72/* 73/*
@@ -95,162 +96,40 @@ void tod_to_timeval(__u64 todval, struct timespec *xtime)
95#define s390_do_profile() do { ; } while(0) 96#define s390_do_profile() do { ; } while(0)
96#endif /* CONFIG_PROFILING */ 97#endif /* CONFIG_PROFILING */
97 98
98/* 99void clock_comparator_work(void)
99 * Advance the per cpu tick counter up to the time given with the
100 * "time" argument. The per cpu update consists of accounting
101 * the virtual cpu time, calling update_process_times and calling
102 * the profiling hook. If xtime is before time it is advanced as well.
103 */
104void account_ticks(u64 time)
105{ 100{
106 __u32 ticks; 101 struct clock_event_device *cd;
107 __u64 tmp;
108
109 /* Calculate how many ticks have passed. */
110 if (time < S390_lowcore.jiffy_timer)
111 return;
112 tmp = time - S390_lowcore.jiffy_timer;
113 if (tmp >= 2*CLK_TICKS_PER_JIFFY) { /* more than two ticks ? */
114 ticks = __div(tmp, CLK_TICKS_PER_JIFFY) + 1;
115 S390_lowcore.jiffy_timer +=
116 CLK_TICKS_PER_JIFFY * (__u64) ticks;
117 } else if (tmp >= CLK_TICKS_PER_JIFFY) {
118 ticks = 2;
119 S390_lowcore.jiffy_timer += 2*CLK_TICKS_PER_JIFFY;
120 } else {
121 ticks = 1;
122 S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY;
123 }
124
125#ifdef CONFIG_SMP
126 /*
127 * Do not rely on the boot cpu to do the calls to do_timer.
128 * Spread it over all cpus instead.
129 */
130 write_seqlock(&xtime_lock);
131 if (S390_lowcore.jiffy_timer > xtime_cc) {
132 __u32 xticks;
133 tmp = S390_lowcore.jiffy_timer - xtime_cc;
134 if (tmp >= 2*CLK_TICKS_PER_JIFFY) {
135 xticks = __div(tmp, CLK_TICKS_PER_JIFFY);
136 xtime_cc += (__u64) xticks * CLK_TICKS_PER_JIFFY;
137 } else {
138 xticks = 1;
139 xtime_cc += CLK_TICKS_PER_JIFFY;
140 }
141 do_timer(xticks);
142 }
143 write_sequnlock(&xtime_lock);
144#else
145 do_timer(ticks);
146#endif
147
148 while (ticks--)
149 update_process_times(user_mode(get_irq_regs()));
150 102
103 S390_lowcore.clock_comparator = -1ULL;
104 set_clock_comparator(S390_lowcore.clock_comparator);
105 cd = &__get_cpu_var(comparators);
106 cd->event_handler(cd);
151 s390_do_profile(); 107 s390_do_profile();
152} 108}
153 109
154#ifdef CONFIG_NO_IDLE_HZ
155
156#ifdef CONFIG_NO_IDLE_HZ_INIT
157int sysctl_hz_timer = 0;
158#else
159int sysctl_hz_timer = 1;
160#endif
161
162/*
163 * Stop the HZ tick on the current CPU.
164 * Only cpu_idle may call this function.
165 */
166static void stop_hz_timer(void)
167{
168 unsigned long flags;
169 unsigned long seq, next;
170 __u64 timer, todval;
171 int cpu = smp_processor_id();
172
173 if (sysctl_hz_timer != 0)
174 return;
175
176 cpu_set(cpu, nohz_cpu_mask);
177
178 /*
179 * Leave the clock comparator set up for the next timer
180 * tick if either rcu or a softirq is pending.
181 */
182 if (rcu_needs_cpu(cpu) || local_softirq_pending()) {
183 cpu_clear(cpu, nohz_cpu_mask);
184 return;
185 }
186
187 /*
188 * This cpu is going really idle. Set up the clock comparator
189 * for the next event.
190 */
191 next = next_timer_interrupt();
192 do {
193 seq = read_seqbegin_irqsave(&xtime_lock, flags);
194 timer = ((__u64) next) - ((__u64) jiffies) + jiffies_64;
195 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
196 todval = -1ULL;
197 /* Be careful about overflows. */
198 if (timer < (-1ULL / CLK_TICKS_PER_JIFFY)) {
199 timer = jiffies_timer_cc + timer * CLK_TICKS_PER_JIFFY;
200 if (timer >= jiffies_timer_cc)
201 todval = timer;
202 }
203 set_clock_comparator(todval);
204}
205
206/* 110/*
207 * Start the HZ tick on the current CPU. 111 * Fixup the clock comparator.
208 * Only cpu_idle may call this function.
209 */ 112 */
210static void start_hz_timer(void) 113static void fixup_clock_comparator(unsigned long long delta)
211{ 114{
212 if (!cpu_isset(smp_processor_id(), nohz_cpu_mask)) 115 /* If nobody is waiting there's nothing to fix. */
116 if (S390_lowcore.clock_comparator == -1ULL)
213 return; 117 return;
214 account_ticks(get_clock()); 118 S390_lowcore.clock_comparator += delta;
215 set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION); 119 set_clock_comparator(S390_lowcore.clock_comparator);
216 cpu_clear(smp_processor_id(), nohz_cpu_mask);
217}
218
219static int nohz_idle_notify(struct notifier_block *self,
220 unsigned long action, void *hcpu)
221{
222 switch (action) {
223 case S390_CPU_IDLE:
224 stop_hz_timer();
225 break;
226 case S390_CPU_NOT_IDLE:
227 start_hz_timer();
228 break;
229 }
230 return NOTIFY_OK;
231} 120}
232 121
233static struct notifier_block nohz_idle_nb = { 122static int s390_next_event(unsigned long delta,
234 .notifier_call = nohz_idle_notify, 123 struct clock_event_device *evt)
235};
236
237static void __init nohz_init(void)
238{ 124{
239 if (register_idle_notifier(&nohz_idle_nb)) 125 S390_lowcore.clock_comparator = get_clock() + delta;
240 panic("Couldn't register idle notifier"); 126 set_clock_comparator(S390_lowcore.clock_comparator);
127 return 0;
241} 128}
242 129
243#endif 130static void s390_set_mode(enum clock_event_mode mode,
244 131 struct clock_event_device *evt)
245/*
246 * Set up per cpu jiffy timer and set the clock comparator.
247 */
248static void setup_jiffy_timer(void)
249{ 132{
250 /* Set up clock comparator to next jiffy. */
251 S390_lowcore.jiffy_timer =
252 jiffies_timer_cc + (jiffies_64 + 1) * CLK_TICKS_PER_JIFFY;
253 set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION);
254} 133}
255 134
256/* 135/*
@@ -259,7 +138,26 @@ static void setup_jiffy_timer(void)
259 */ 138 */
260void init_cpu_timer(void) 139void init_cpu_timer(void)
261{ 140{
262 setup_jiffy_timer(); 141 struct clock_event_device *cd;
142 int cpu;
143
144 S390_lowcore.clock_comparator = -1ULL;
145 set_clock_comparator(S390_lowcore.clock_comparator);
146
147 cpu = smp_processor_id();
148 cd = &per_cpu(comparators, cpu);
149 cd->name = "comparator";
150 cd->features = CLOCK_EVT_FEAT_ONESHOT;
151 cd->mult = 16777;
152 cd->shift = 12;
153 cd->min_delta_ns = 1;
154 cd->max_delta_ns = LONG_MAX;
155 cd->rating = 400;
156 cd->cpumask = cpumask_of_cpu(cpu);
157 cd->set_next_event = s390_next_event;
158 cd->set_mode = s390_set_mode;
159
160 clockevents_register_device(cd);
263 161
264 /* Enable clock comparator timer interrupt. */ 162 /* Enable clock comparator timer interrupt. */
265 __ctl_set_bit(0,11); 163 __ctl_set_bit(0,11);
@@ -270,8 +168,6 @@ void init_cpu_timer(void)
270 168
271static void clock_comparator_interrupt(__u16 code) 169static void clock_comparator_interrupt(__u16 code)
272{ 170{
273 /* set clock comparator for next tick */
274 set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION);
275} 171}
276 172
277static void etr_reset(void); 173static void etr_reset(void);
@@ -316,8 +212,9 @@ static struct clocksource clocksource_tod = {
316 */ 212 */
317void __init time_init(void) 213void __init time_init(void)
318{ 214{
215 u64 init_timer_cc;
216
319 init_timer_cc = reset_tod_clock(); 217 init_timer_cc = reset_tod_clock();
320 xtime_cc = init_timer_cc + CLK_TICKS_PER_JIFFY;
321 jiffies_timer_cc = init_timer_cc - jiffies_64 * CLK_TICKS_PER_JIFFY; 218 jiffies_timer_cc = init_timer_cc - jiffies_64 * CLK_TICKS_PER_JIFFY;
322 219
323 /* set xtime */ 220 /* set xtime */
@@ -342,10 +239,6 @@ void __init time_init(void)
342 /* Enable TOD clock interrupts on the boot cpu. */ 239 /* Enable TOD clock interrupts on the boot cpu. */
343 init_cpu_timer(); 240 init_cpu_timer();
344 241
345#ifdef CONFIG_NO_IDLE_HZ
346 nohz_init();
347#endif
348
349#ifdef CONFIG_VIRT_TIMER 242#ifdef CONFIG_VIRT_TIMER
350 vtime_init(); 243 vtime_init();
351#endif 244#endif
@@ -699,53 +592,49 @@ static int etr_aib_follows(struct etr_aib *a1, struct etr_aib *a2, int p)
699} 592}
700 593
701/* 594/*
702 * The time is "clock". xtime is what we think the time is. 595 * The time is "clock". old is what we think the time is.
703 * Adjust the value by a multiple of jiffies and add the delta to ntp. 596 * Adjust the value by a multiple of jiffies and add the delta to ntp.
704 * "delay" is an approximation how long the synchronization took. If 597 * "delay" is an approximation how long the synchronization took. If
705 * the time correction is positive, then "delay" is subtracted from 598 * the time correction is positive, then "delay" is subtracted from
706 * the time difference and only the remaining part is passed to ntp. 599 * the time difference and only the remaining part is passed to ntp.
707 */ 600 */
708static void etr_adjust_time(unsigned long long clock, unsigned long long delay) 601static unsigned long long etr_adjust_time(unsigned long long old,
602 unsigned long long clock,
603 unsigned long long delay)
709{ 604{
710 unsigned long long delta, ticks; 605 unsigned long long delta, ticks;
711 struct timex adjust; 606 struct timex adjust;
712 607
713 /* 608 if (clock > old) {
714 * We don't have to take the xtime lock because the cpu
715 * executing etr_adjust_time is running disabled in
716 * tasklet context and all other cpus are looping in
717 * etr_sync_cpu_start.
718 */
719 if (clock > xtime_cc) {
720 /* It is later than we thought. */ 609 /* It is later than we thought. */
721 delta = ticks = clock - xtime_cc; 610 delta = ticks = clock - old;
722 delta = ticks = (delta < delay) ? 0 : delta - delay; 611 delta = ticks = (delta < delay) ? 0 : delta - delay;
723 delta -= do_div(ticks, CLK_TICKS_PER_JIFFY); 612 delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
724 init_timer_cc = init_timer_cc + delta;
725 jiffies_timer_cc = jiffies_timer_cc + delta;
726 xtime_cc = xtime_cc + delta;
727 adjust.offset = ticks * (1000000 / HZ); 613 adjust.offset = ticks * (1000000 / HZ);
728 } else { 614 } else {
729 /* It is earlier than we thought. */ 615 /* It is earlier than we thought. */
730 delta = ticks = xtime_cc - clock; 616 delta = ticks = old - clock;
731 delta -= do_div(ticks, CLK_TICKS_PER_JIFFY); 617 delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
732 init_timer_cc = init_timer_cc - delta; 618 delta = -delta;
733 jiffies_timer_cc = jiffies_timer_cc - delta;
734 xtime_cc = xtime_cc - delta;
735 adjust.offset = -ticks * (1000000 / HZ); 619 adjust.offset = -ticks * (1000000 / HZ);
736 } 620 }
621 jiffies_timer_cc += delta;
737 if (adjust.offset != 0) { 622 if (adjust.offset != 0) {
738 printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n", 623 printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n",
739 adjust.offset); 624 adjust.offset);
740 adjust.modes = ADJ_OFFSET_SINGLESHOT; 625 adjust.modes = ADJ_OFFSET_SINGLESHOT;
741 do_adjtimex(&adjust); 626 do_adjtimex(&adjust);
742 } 627 }
628 return delta;
743} 629}
744 630
631static struct {
632 int in_sync;
633 unsigned long long fixup_cc;
634} etr_sync;
635
745static void etr_sync_cpu_start(void *dummy) 636static void etr_sync_cpu_start(void *dummy)
746{ 637{
747 int *in_sync = dummy;
748
749 etr_enable_sync_clock(); 638 etr_enable_sync_clock();
750 /* 639 /*
751 * This looks like a busy wait loop but it isn't. etr_sync_cpus 640 * This looks like a busy wait loop but it isn't. etr_sync_cpus
@@ -753,7 +642,7 @@ static void etr_sync_cpu_start(void *dummy)
753 * __udelay will stop the cpu on an enabled wait psw until the 642 * __udelay will stop the cpu on an enabled wait psw until the
754 * TOD is running again. 643 * TOD is running again.
755 */ 644 */
756 while (*in_sync == 0) { 645 while (etr_sync.in_sync == 0) {
757 __udelay(1); 646 __udelay(1);
758 /* 647 /*
759 * A different cpu changes *in_sync. Therefore use 648 * A different cpu changes *in_sync. Therefore use
@@ -761,14 +650,14 @@ static void etr_sync_cpu_start(void *dummy)
761 */ 650 */
762 barrier(); 651 barrier();
763 } 652 }
764 if (*in_sync != 1) 653 if (etr_sync.in_sync != 1)
765 /* Didn't work. Clear per-cpu in sync bit again. */ 654 /* Didn't work. Clear per-cpu in sync bit again. */
766 etr_disable_sync_clock(NULL); 655 etr_disable_sync_clock(NULL);
767 /* 656 /*
768 * This round of TOD syncing is done. Set the clock comparator 657 * This round of TOD syncing is done. Set the clock comparator
769 * to the next tick and let the processor continue. 658 * to the next tick and let the processor continue.
770 */ 659 */
771 setup_jiffy_timer(); 660 fixup_clock_comparator(etr_sync.fixup_cc);
772} 661}
773 662
774static void etr_sync_cpu_end(void *dummy) 663static void etr_sync_cpu_end(void *dummy)
@@ -783,8 +672,8 @@ static void etr_sync_cpu_end(void *dummy)
783static int etr_sync_clock(struct etr_aib *aib, int port) 672static int etr_sync_clock(struct etr_aib *aib, int port)
784{ 673{
785 struct etr_aib *sync_port; 674 struct etr_aib *sync_port;
786 unsigned long long clock, delay; 675 unsigned long long clock, old_clock, delay, delta;
787 int in_sync, follows; 676 int follows;
788 int rc; 677 int rc;
789 678
790 /* Check if the current aib is adjacent to the sync port aib. */ 679 /* Check if the current aib is adjacent to the sync port aib. */
@@ -799,9 +688,9 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
799 * successfully synced the clock. smp_call_function will 688 * successfully synced the clock. smp_call_function will
800 * return after all other cpus are in etr_sync_cpu_start. 689 * return after all other cpus are in etr_sync_cpu_start.
801 */ 690 */
802 in_sync = 0; 691 memset(&etr_sync, 0, sizeof(etr_sync));
803 preempt_disable(); 692 preempt_disable();
804 smp_call_function(etr_sync_cpu_start,&in_sync,0,0); 693 smp_call_function(etr_sync_cpu_start, NULL, 0, 0);
805 local_irq_disable(); 694 local_irq_disable();
806 etr_enable_sync_clock(); 695 etr_enable_sync_clock();
807 696
@@ -809,6 +698,7 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
809 __ctl_set_bit(14, 21); 698 __ctl_set_bit(14, 21);
810 __ctl_set_bit(0, 29); 699 __ctl_set_bit(0, 29);
811 clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32; 700 clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32;
701 old_clock = get_clock();
812 if (set_clock(clock) == 0) { 702 if (set_clock(clock) == 0) {
813 __udelay(1); /* Wait for the clock to start. */ 703 __udelay(1); /* Wait for the clock to start. */
814 __ctl_clear_bit(0, 29); 704 __ctl_clear_bit(0, 29);
@@ -817,16 +707,17 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
817 /* Adjust Linux timing variables. */ 707 /* Adjust Linux timing variables. */
818 delay = (unsigned long long) 708 delay = (unsigned long long)
819 (aib->edf2.etv - sync_port->edf2.etv) << 32; 709 (aib->edf2.etv - sync_port->edf2.etv) << 32;
820 etr_adjust_time(clock, delay); 710 delta = etr_adjust_time(old_clock, clock, delay);
821 setup_jiffy_timer(); 711 etr_sync.fixup_cc = delta;
712 fixup_clock_comparator(delta);
822 /* Verify that the clock is properly set. */ 713 /* Verify that the clock is properly set. */
823 if (!etr_aib_follows(sync_port, aib, port)) { 714 if (!etr_aib_follows(sync_port, aib, port)) {
824 /* Didn't work. */ 715 /* Didn't work. */
825 etr_disable_sync_clock(NULL); 716 etr_disable_sync_clock(NULL);
826 in_sync = -EAGAIN; 717 etr_sync.in_sync = -EAGAIN;
827 rc = -EAGAIN; 718 rc = -EAGAIN;
828 } else { 719 } else {
829 in_sync = 1; 720 etr_sync.in_sync = 1;
830 rc = 0; 721 rc = 0;
831 } 722 }
832 } else { 723 } else {
@@ -834,7 +725,7 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
834 __ctl_clear_bit(0, 29); 725 __ctl_clear_bit(0, 29);
835 __ctl_clear_bit(14, 21); 726 __ctl_clear_bit(14, 21);
836 etr_disable_sync_clock(NULL); 727 etr_disable_sync_clock(NULL);
837 in_sync = -EAGAIN; 728 etr_sync.in_sync = -EAGAIN;
838 rc = -EAGAIN; 729 rc = -EAGAIN;
839 } 730 }
840 local_irq_enable(); 731 local_irq_enable();
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
new file mode 100644
index 000000000000..12b39b3d9c38
--- /dev/null
+++ b/arch/s390/kernel/topology.c
@@ -0,0 +1,314 @@
1/*
2 * Copyright IBM Corp. 2007
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
4 */
5
6#include <linux/kernel.h>
7#include <linux/mm.h>
8#include <linux/init.h>
9#include <linux/device.h>
10#include <linux/bootmem.h>
11#include <linux/sched.h>
12#include <linux/workqueue.h>
13#include <linux/cpu.h>
14#include <linux/smp.h>
15#include <asm/delay.h>
16#include <asm/s390_ext.h>
17#include <asm/sysinfo.h>
18
19#define CPU_BITS 64
20#define NR_MAG 6
21
22#define PTF_HORIZONTAL (0UL)
23#define PTF_VERTICAL (1UL)
24#define PTF_CHECK (2UL)
25
26struct tl_cpu {
27 unsigned char reserved0[4];
28 unsigned char :6;
29 unsigned char pp:2;
30 unsigned char reserved1;
31 unsigned short origin;
32 unsigned long mask[CPU_BITS / BITS_PER_LONG];
33};
34
35struct tl_container {
36 unsigned char reserved[8];
37};
38
39union tl_entry {
40 unsigned char nl;
41 struct tl_cpu cpu;
42 struct tl_container container;
43};
44
45struct tl_info {
46 unsigned char reserved0[2];
47 unsigned short length;
48 unsigned char mag[NR_MAG];
49 unsigned char reserved1;
50 unsigned char mnest;
51 unsigned char reserved2[4];
52 union tl_entry tle[0];
53};
54
55struct core_info {
56 struct core_info *next;
57 cpumask_t mask;
58};
59
60static void topology_work_fn(struct work_struct *work);
61static struct tl_info *tl_info;
62static struct core_info core_info;
63static int machine_has_topology;
64static int machine_has_topology_irq;
65static struct timer_list topology_timer;
66static void set_topology_timer(void);
67static DECLARE_WORK(topology_work, topology_work_fn);
68
69cpumask_t cpu_coregroup_map(unsigned int cpu)
70{
71 struct core_info *core = &core_info;
72 cpumask_t mask;
73
74 cpus_clear(mask);
75 if (!machine_has_topology)
76 return cpu_present_map;
77 mutex_lock(&smp_cpu_state_mutex);
78 while (core) {
79 if (cpu_isset(cpu, core->mask)) {
80 mask = core->mask;
81 break;
82 }
83 core = core->next;
84 }
85 mutex_unlock(&smp_cpu_state_mutex);
86 if (cpus_empty(mask))
87 mask = cpumask_of_cpu(cpu);
88 return mask;
89}
90
91static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core)
92{
93 unsigned int cpu;
94
95 for (cpu = find_first_bit(&tl_cpu->mask[0], CPU_BITS);
96 cpu < CPU_BITS;
97 cpu = find_next_bit(&tl_cpu->mask[0], CPU_BITS, cpu + 1))
98 {
99 unsigned int rcpu, lcpu;
100
101 rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin;
102 for_each_present_cpu(lcpu) {
103 if (__cpu_logical_map[lcpu] == rcpu) {
104 cpu_set(lcpu, core->mask);
105 smp_cpu_polarization[lcpu] = tl_cpu->pp;
106 }
107 }
108 }
109}
110
111static void clear_cores(void)
112{
113 struct core_info *core = &core_info;
114
115 while (core) {
116 cpus_clear(core->mask);
117 core = core->next;
118 }
119}
120
121static union tl_entry *next_tle(union tl_entry *tle)
122{
123 if (tle->nl)
124 return (union tl_entry *)((struct tl_container *)tle + 1);
125 else
126 return (union tl_entry *)((struct tl_cpu *)tle + 1);
127}
128
129static void tl_to_cores(struct tl_info *info)
130{
131 union tl_entry *tle, *end;
132 struct core_info *core = &core_info;
133
134 mutex_lock(&smp_cpu_state_mutex);
135 clear_cores();
136 tle = info->tle;
137 end = (union tl_entry *)((unsigned long)info + info->length);
138 while (tle < end) {
139 switch (tle->nl) {
140 case 5:
141 case 4:
142 case 3:
143 case 2:
144 break;
145 case 1:
146 core = core->next;
147 break;
148 case 0:
149 add_cpus_to_core(&tle->cpu, core);
150 break;
151 default:
152 clear_cores();
153 machine_has_topology = 0;
154 return;
155 }
156 tle = next_tle(tle);
157 }
158 mutex_unlock(&smp_cpu_state_mutex);
159}
160
161static void topology_update_polarization_simple(void)
162{
163 int cpu;
164
165 mutex_lock(&smp_cpu_state_mutex);
166 for_each_present_cpu(cpu)
167 smp_cpu_polarization[cpu] = POLARIZATION_HRZ;
168 mutex_unlock(&smp_cpu_state_mutex);
169}
170
171static int ptf(unsigned long fc)
172{
173 int rc;
174
175 asm volatile(
176 " .insn rre,0xb9a20000,%1,%1\n"
177 " ipm %0\n"
178 " srl %0,28\n"
179 : "=d" (rc)
180 : "d" (fc) : "cc");
181 return rc;
182}
183
184int topology_set_cpu_management(int fc)
185{
186 int cpu;
187 int rc;
188
189 if (!machine_has_topology)
190 return -EOPNOTSUPP;
191 if (fc)
192 rc = ptf(PTF_VERTICAL);
193 else
194 rc = ptf(PTF_HORIZONTAL);
195 if (rc)
196 return -EBUSY;
197 for_each_present_cpu(cpu)
198 smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
199 return rc;
200}
201
202void arch_update_cpu_topology(void)
203{
204 struct tl_info *info = tl_info;
205 struct sys_device *sysdev;
206 int cpu;
207
208 if (!machine_has_topology) {
209 topology_update_polarization_simple();
210 return;
211 }
212 stsi(info, 15, 1, 2);
213 tl_to_cores(info);
214 for_each_online_cpu(cpu) {
215 sysdev = get_cpu_sysdev(cpu);
216 kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
217 }
218}
219
220static void topology_work_fn(struct work_struct *work)
221{
222 arch_reinit_sched_domains();
223}
224
225void topology_schedule_update(void)
226{
227 schedule_work(&topology_work);
228}
229
230static void topology_timer_fn(unsigned long ignored)
231{
232 if (ptf(PTF_CHECK))
233 topology_schedule_update();
234 set_topology_timer();
235}
236
237static void set_topology_timer(void)
238{
239 topology_timer.function = topology_timer_fn;
240 topology_timer.data = 0;
241 topology_timer.expires = jiffies + 60 * HZ;
242 add_timer(&topology_timer);
243}
244
245static void topology_interrupt(__u16 code)
246{
247 schedule_work(&topology_work);
248}
249
250static int __init init_topology_update(void)
251{
252 int rc;
253
254 if (!machine_has_topology) {
255 topology_update_polarization_simple();
256 return 0;
257 }
258 init_timer_deferrable(&topology_timer);
259 if (machine_has_topology_irq) {
260 rc = register_external_interrupt(0x2005, topology_interrupt);
261 if (rc)
262 return rc;
263 ctl_set_bit(0, 8);
264 }
265 else
266 set_topology_timer();
267 return 0;
268}
269__initcall(init_topology_update);
270
271void __init s390_init_cpu_topology(void)
272{
273 unsigned long long facility_bits;
274 struct tl_info *info;
275 struct core_info *core;
276 int nr_cores;
277 int i;
278
279 if (stfle(&facility_bits, 1) <= 0)
280 return;
281 if (!(facility_bits & (1ULL << 52)) || !(facility_bits & (1ULL << 61)))
282 return;
283 machine_has_topology = 1;
284
285 if (facility_bits & (1ULL << 51))
286 machine_has_topology_irq = 1;
287
288 tl_info = alloc_bootmem_pages(PAGE_SIZE);
289 if (!tl_info)
290 goto error;
291 info = tl_info;
292 stsi(info, 15, 1, 2);
293
294 nr_cores = info->mag[NR_MAG - 2];
295 for (i = 0; i < info->mnest - 2; i++)
296 nr_cores *= info->mag[NR_MAG - 3 - i];
297
298 printk(KERN_INFO "CPU topology:");
299 for (i = 0; i < NR_MAG; i++)
300 printk(" %d", info->mag[i]);
301 printk(" / %d\n", info->mnest);
302
303 core = &core_info;
304 for (i = 0; i < nr_cores; i++) {
305 core->next = alloc_bootmem(sizeof(struct core_info));
306 core = core->next;
307 if (!core)
308 goto error;
309 }
310 return;
311error:
312 machine_has_topology = 0;
313 machine_has_topology_irq = 0;
314}
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 60f728aeaf12..57b607b61100 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -42,11 +42,8 @@
42#include <asm/s390_ext.h> 42#include <asm/s390_ext.h>
43#include <asm/lowcore.h> 43#include <asm/lowcore.h>
44#include <asm/debug.h> 44#include <asm/debug.h>
45#include "entry.h"
45 46
46/* Called from entry.S only */
47extern void handle_per_exception(struct pt_regs *regs);
48
49typedef void pgm_check_handler_t(struct pt_regs *, long);
50pgm_check_handler_t *pgm_check_table[128]; 47pgm_check_handler_t *pgm_check_table[128];
51 48
52#ifdef CONFIG_SYSCTL 49#ifdef CONFIG_SYSCTL
@@ -59,7 +56,6 @@ int sysctl_userprocess_debug = 0;
59 56
60extern pgm_check_handler_t do_protection_exception; 57extern pgm_check_handler_t do_protection_exception;
61extern pgm_check_handler_t do_dat_exception; 58extern pgm_check_handler_t do_dat_exception;
62extern pgm_check_handler_t do_monitor_call;
63extern pgm_check_handler_t do_asce_exception; 59extern pgm_check_handler_t do_asce_exception;
64 60
65#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) 61#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
@@ -138,7 +134,6 @@ void show_trace(struct task_struct *task, unsigned long *stack)
138 else 134 else
139 __show_trace(sp, S390_lowcore.thread_info, 135 __show_trace(sp, S390_lowcore.thread_info,
140 S390_lowcore.thread_info + THREAD_SIZE); 136 S390_lowcore.thread_info + THREAD_SIZE);
141 printk("\n");
142 if (!task) 137 if (!task)
143 task = current; 138 task = current;
144 debug_show_held_locks(task); 139 debug_show_held_locks(task);
@@ -166,6 +161,15 @@ void show_stack(struct task_struct *task, unsigned long *sp)
166 show_trace(task, sp); 161 show_trace(task, sp);
167} 162}
168 163
164#ifdef CONFIG_64BIT
165void show_last_breaking_event(struct pt_regs *regs)
166{
167 printk("Last Breaking-Event-Address:\n");
168 printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
169 print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
170}
171#endif
172
169/* 173/*
170 * The architecture-independent dump_stack generator 174 * The architecture-independent dump_stack generator
171 */ 175 */
@@ -739,6 +743,5 @@ void __init trap_init(void)
739 pgm_check_table[0x15] = &operand_exception; 743 pgm_check_table[0x15] = &operand_exception;
740 pgm_check_table[0x1C] = &space_switch_exception; 744 pgm_check_table[0x1C] = &space_switch_exception;
741 pgm_check_table[0x1D] = &hfp_sqrt_exception; 745 pgm_check_table[0x1D] = &hfp_sqrt_exception;
742 pgm_check_table[0x40] = &do_monitor_call;
743 pfault_irq_init(); 746 pfault_irq_init();
744} 747}
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index 70f2a862b670..eae21a8ac72d 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -34,7 +34,7 @@ void __delay(unsigned long loops)
34 */ 34 */
35void __udelay(unsigned long usecs) 35void __udelay(unsigned long usecs)
36{ 36{
37 u64 end, time, jiffy_timer = 0; 37 u64 end, time, old_cc = 0;
38 unsigned long flags, cr0, mask, dummy; 38 unsigned long flags, cr0, mask, dummy;
39 int irq_context; 39 int irq_context;
40 40
@@ -43,8 +43,8 @@ void __udelay(unsigned long usecs)
43 local_bh_disable(); 43 local_bh_disable();
44 local_irq_save(flags); 44 local_irq_save(flags);
45 if (raw_irqs_disabled_flags(flags)) { 45 if (raw_irqs_disabled_flags(flags)) {
46 jiffy_timer = S390_lowcore.jiffy_timer; 46 old_cc = S390_lowcore.clock_comparator;
47 S390_lowcore.jiffy_timer = -1ULL - (4096 << 12); 47 S390_lowcore.clock_comparator = -1ULL;
48 __ctl_store(cr0, 0, 0); 48 __ctl_store(cr0, 0, 0);
49 dummy = (cr0 & 0xffff00e0) | 0x00000800; 49 dummy = (cr0 & 0xffff00e0) | 0x00000800;
50 __ctl_load(dummy , 0, 0); 50 __ctl_load(dummy , 0, 0);
@@ -55,8 +55,8 @@ void __udelay(unsigned long usecs)
55 55
56 end = get_clock() + ((u64) usecs << 12); 56 end = get_clock() + ((u64) usecs << 12);
57 do { 57 do {
58 time = end < S390_lowcore.jiffy_timer ? 58 time = end < S390_lowcore.clock_comparator ?
59 end : S390_lowcore.jiffy_timer; 59 end : S390_lowcore.clock_comparator;
60 set_clock_comparator(time); 60 set_clock_comparator(time);
61 trace_hardirqs_on(); 61 trace_hardirqs_on();
62 __load_psw_mask(mask); 62 __load_psw_mask(mask);
@@ -65,10 +65,10 @@ void __udelay(unsigned long usecs)
65 65
66 if (raw_irqs_disabled_flags(flags)) { 66 if (raw_irqs_disabled_flags(flags)) {
67 __ctl_load(cr0, 0, 0); 67 __ctl_load(cr0, 0, 0);
68 S390_lowcore.jiffy_timer = jiffy_timer; 68 S390_lowcore.clock_comparator = old_cc;
69 } 69 }
70 if (!irq_context) 70 if (!irq_context)
71 _local_bh_enable(); 71 _local_bh_enable();
72 set_clock_comparator(S390_lowcore.jiffy_timer); 72 set_clock_comparator(S390_lowcore.clock_comparator);
73 local_irq_restore(flags); 73 local_irq_restore(flags);
74} 74}
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 5efdfe9f5e76..d66215b0fde9 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -302,6 +302,10 @@ static size_t copy_in_user_pt(size_t n, void __user *to,
302 pte_t *pte_from, *pte_to; 302 pte_t *pte_from, *pte_to;
303 int write_user; 303 int write_user;
304 304
305 if (segment_eq(get_fs(), KERNEL_DS)) {
306 memcpy((void __force *) to, (void __force *) from, n);
307 return 0;
308 }
305 done = 0; 309 done = 0;
306retry: 310retry:
307 spin_lock(&mm->page_table_lock); 311 spin_lock(&mm->page_table_lock);
@@ -361,18 +365,10 @@ fault:
361 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ 365 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
362 "m" (*uaddr) : "cc" ); 366 "m" (*uaddr) : "cc" );
363 367
364int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) 368static int __futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
365{ 369{
366 int oldval = 0, newval, ret; 370 int oldval = 0, newval, ret;
367 371
368 spin_lock(&current->mm->page_table_lock);
369 uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
370 if (!uaddr) {
371 spin_unlock(&current->mm->page_table_lock);
372 return -EFAULT;
373 }
374 get_page(virt_to_page(uaddr));
375 spin_unlock(&current->mm->page_table_lock);
376 switch (op) { 372 switch (op) {
377 case FUTEX_OP_SET: 373 case FUTEX_OP_SET:
378 __futex_atomic_op("lr %2,%5\n", 374 __futex_atomic_op("lr %2,%5\n",
@@ -397,17 +393,17 @@ int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
397 default: 393 default:
398 ret = -ENOSYS; 394 ret = -ENOSYS;
399 } 395 }
400 put_page(virt_to_page(uaddr)); 396 if (ret == 0)
401 *old = oldval; 397 *old = oldval;
402 return ret; 398 return ret;
403} 399}
404 400
405int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval) 401int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
406{ 402{
407 int ret; 403 int ret;
408 404
409 if (!current->mm) 405 if (segment_eq(get_fs(), KERNEL_DS))
410 return -EFAULT; 406 return __futex_atomic_op_pt(op, uaddr, oparg, old);
411 spin_lock(&current->mm->page_table_lock); 407 spin_lock(&current->mm->page_table_lock);
412 uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr); 408 uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
413 if (!uaddr) { 409 if (!uaddr) {
@@ -416,13 +412,40 @@ int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
416 } 412 }
417 get_page(virt_to_page(uaddr)); 413 get_page(virt_to_page(uaddr));
418 spin_unlock(&current->mm->page_table_lock); 414 spin_unlock(&current->mm->page_table_lock);
419 asm volatile(" cs %1,%4,0(%5)\n" 415 ret = __futex_atomic_op_pt(op, uaddr, oparg, old);
420 "0: lr %0,%1\n" 416 put_page(virt_to_page(uaddr));
421 "1:\n" 417 return ret;
422 EX_TABLE(0b,1b) 418}
419
420static int __futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
421{
422 int ret;
423
424 asm volatile("0: cs %1,%4,0(%5)\n"
425 "1: lr %0,%1\n"
426 "2:\n"
427 EX_TABLE(0b,2b) EX_TABLE(1b,2b)
423 : "=d" (ret), "+d" (oldval), "=m" (*uaddr) 428 : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
424 : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) 429 : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
425 : "cc", "memory" ); 430 : "cc", "memory" );
431 return ret;
432}
433
434int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
435{
436 int ret;
437
438 if (segment_eq(get_fs(), KERNEL_DS))
439 return __futex_atomic_cmpxchg_pt(uaddr, oldval, newval);
440 spin_lock(&current->mm->page_table_lock);
441 uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
442 if (!uaddr) {
443 spin_unlock(&current->mm->page_table_lock);
444 return -EFAULT;
445 }
446 get_page(virt_to_page(uaddr));
447 spin_unlock(&current->mm->page_table_lock);
448 ret = __futex_atomic_cmpxchg_pt(uaddr, oldval, newval);
426 put_page(virt_to_page(uaddr)); 449 put_page(virt_to_page(uaddr));
427 return ret; 450 return ret;
428} 451}
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 880b0ebf894b..ed2af0a3303b 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -289,22 +289,8 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
289 289
290 rc = add_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); 290 rc = add_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
291 291
292 switch (rc) { 292 if (rc)
293 case 0:
294 break;
295 case -ENOSPC:
296 PRINT_WARN("segment_load: not loading segment %s - overlaps "
297 "storage/segment\n", name);
298 goto out_free;
299 case -ERANGE:
300 PRINT_WARN("segment_load: not loading segment %s - exceeds "
301 "kernel mapping range\n", name);
302 goto out_free;
303 default:
304 PRINT_WARN("segment_load: not loading segment %s (rc: %d)\n",
305 name, rc);
306 goto out_free; 293 goto out_free;
307 }
308 294
309 seg->res = kzalloc(sizeof(struct resource), GFP_KERNEL); 295 seg->res = kzalloc(sizeof(struct resource), GFP_KERNEL);
310 if (seg->res == NULL) { 296 if (seg->res == NULL) {
@@ -582,8 +568,59 @@ out:
582 mutex_unlock(&dcss_lock); 568 mutex_unlock(&dcss_lock);
583} 569}
584 570
571/*
572 * print appropriate error message for segment_load()/segment_type()
573 * return code
574 */
575void segment_warning(int rc, char *seg_name)
576{
577 switch (rc) {
578 case -ENOENT:
579 PRINT_WARN("cannot load/query segment %s, "
580 "does not exist\n", seg_name);
581 break;
582 case -ENOSYS:
583 PRINT_WARN("cannot load/query segment %s, "
584 "not running on VM\n", seg_name);
585 break;
586 case -EIO:
587 PRINT_WARN("cannot load/query segment %s, "
588 "hardware error\n", seg_name);
589 break;
590 case -ENOTSUPP:
591 PRINT_WARN("cannot load/query segment %s, "
592 "is a multi-part segment\n", seg_name);
593 break;
594 case -ENOSPC:
595 PRINT_WARN("cannot load/query segment %s, "
596 "overlaps with storage\n", seg_name);
597 break;
598 case -EBUSY:
599 PRINT_WARN("cannot load/query segment %s, "
600 "overlaps with already loaded dcss\n", seg_name);
601 break;
602 case -EPERM:
603 PRINT_WARN("cannot load/query segment %s, "
604 "already loaded in incompatible mode\n", seg_name);
605 break;
606 case -ENOMEM:
607 PRINT_WARN("cannot load/query segment %s, "
608 "out of memory\n", seg_name);
609 break;
610 case -ERANGE:
611 PRINT_WARN("cannot load/query segment %s, "
612 "exceeds kernel mapping range\n", seg_name);
613 break;
614 default:
615 PRINT_WARN("cannot load/query segment %s, "
616 "return value %i\n", seg_name, rc);
617 break;
618 }
619}
620
585EXPORT_SYMBOL(segment_load); 621EXPORT_SYMBOL(segment_load);
586EXPORT_SYMBOL(segment_unload); 622EXPORT_SYMBOL(segment_unload);
587EXPORT_SYMBOL(segment_save); 623EXPORT_SYMBOL(segment_save);
588EXPORT_SYMBOL(segment_type); 624EXPORT_SYMBOL(segment_type);
589EXPORT_SYMBOL(segment_modify_shared); 625EXPORT_SYMBOL(segment_modify_shared);
626EXPORT_SYMBOL(segment_warning);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index ed13d429a487..2650f46001d0 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -28,11 +28,11 @@
28#include <linux/hardirq.h> 28#include <linux/hardirq.h>
29#include <linux/kprobes.h> 29#include <linux/kprobes.h>
30#include <linux/uaccess.h> 30#include <linux/uaccess.h>
31
32#include <asm/system.h> 31#include <asm/system.h>
33#include <asm/pgtable.h> 32#include <asm/pgtable.h>
34#include <asm/s390_ext.h> 33#include <asm/s390_ext.h>
35#include <asm/mmu_context.h> 34#include <asm/mmu_context.h>
35#include "../kernel/entry.h"
36 36
37#ifndef CONFIG_64BIT 37#ifndef CONFIG_64BIT
38#define __FAIL_ADDR_MASK 0x7ffff000 38#define __FAIL_ADDR_MASK 0x7ffff000
@@ -50,8 +50,6 @@
50extern int sysctl_userprocess_debug; 50extern int sysctl_userprocess_debug;
51#endif 51#endif
52 52
53extern void die(const char *,struct pt_regs *,long);
54
55#ifdef CONFIG_KPROBES 53#ifdef CONFIG_KPROBES
56static inline int notify_page_fault(struct pt_regs *regs, long err) 54static inline int notify_page_fault(struct pt_regs *regs, long err)
57{ 55{
@@ -245,11 +243,6 @@ static void do_sigbus(struct pt_regs *regs, unsigned long error_code,
245} 243}
246 244
247#ifdef CONFIG_S390_EXEC_PROTECT 245#ifdef CONFIG_S390_EXEC_PROTECT
248extern long sys_sigreturn(struct pt_regs *regs);
249extern long sys_rt_sigreturn(struct pt_regs *regs);
250extern long sys32_sigreturn(struct pt_regs *regs);
251extern long sys32_rt_sigreturn(struct pt_regs *regs);
252
253static int signal_return(struct mm_struct *mm, struct pt_regs *regs, 246static int signal_return(struct mm_struct *mm, struct pt_regs *regs,
254 unsigned long address, unsigned long error_code) 247 unsigned long address, unsigned long error_code)
255{ 248{
@@ -270,15 +263,15 @@ static int signal_return(struct mm_struct *mm, struct pt_regs *regs,
270#ifdef CONFIG_COMPAT 263#ifdef CONFIG_COMPAT
271 compat = test_tsk_thread_flag(current, TIF_31BIT); 264 compat = test_tsk_thread_flag(current, TIF_31BIT);
272 if (compat && instruction == 0x0a77) 265 if (compat && instruction == 0x0a77)
273 sys32_sigreturn(regs); 266 sys32_sigreturn();
274 else if (compat && instruction == 0x0aad) 267 else if (compat && instruction == 0x0aad)
275 sys32_rt_sigreturn(regs); 268 sys32_rt_sigreturn();
276 else 269 else
277#endif 270#endif
278 if (instruction == 0x0a77) 271 if (instruction == 0x0a77)
279 sys_sigreturn(regs); 272 sys_sigreturn();
280 else if (instruction == 0x0aad) 273 else if (instruction == 0x0aad)
281 sys_rt_sigreturn(regs); 274 sys_rt_sigreturn();
282 else { 275 else {
283 current->thread.prot_addr = address; 276 current->thread.prot_addr = address;
284 current->thread.trap_no = error_code; 277 current->thread.trap_no = error_code;
@@ -424,7 +417,7 @@ no_context:
424} 417}
425 418
426void __kprobes do_protection_exception(struct pt_regs *regs, 419void __kprobes do_protection_exception(struct pt_regs *regs,
427 unsigned long error_code) 420 long error_code)
428{ 421{
429 /* Protection exception is supressing, decrement psw address. */ 422 /* Protection exception is supressing, decrement psw address. */
430 regs->psw.addr -= (error_code >> 16); 423 regs->psw.addr -= (error_code >> 16);
@@ -440,7 +433,7 @@ void __kprobes do_protection_exception(struct pt_regs *regs,
440 do_exception(regs, 4, 1); 433 do_exception(regs, 4, 1);
441} 434}
442 435
443void __kprobes do_dat_exception(struct pt_regs *regs, unsigned long error_code) 436void __kprobes do_dat_exception(struct pt_regs *regs, long error_code)
444{ 437{
445 do_exception(regs, error_code & 0xff, 0); 438 do_exception(regs, error_code & 0xff, 0);
446} 439}
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 8053245fe259..202c952a29b4 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -50,7 +50,6 @@ void show_mem(void)
50 50
51 printk("Mem-info:\n"); 51 printk("Mem-info:\n");
52 show_free_areas(); 52 show_free_areas();
53 printk("Free swap: %6ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10));
54 i = max_mapnr; 53 i = max_mapnr;
55 while (i-- > 0) { 54 while (i-- > 0) {
56 if (!pfn_valid(i)) 55 if (!pfn_valid(i))
diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32
index 62bf373266f7..4bbdce36b92b 100644
--- a/arch/sh/kernel/Makefile_32
+++ b/arch/sh/kernel/Makefile_32
@@ -5,7 +5,7 @@
5extra-y := head_32.o init_task.o vmlinux.lds 5extra-y := head_32.o init_task.o vmlinux.lds
6 6
7obj-y := debugtraps.o io.o io_generic.o irq.o machvec.o process_32.o \ 7obj-y := debugtraps.o io.o io_generic.o irq.o machvec.o process_32.o \
8 ptrace_32.o semaphore.o setup.o signal_32.o sys_sh.o sys_sh32.o \ 8 ptrace_32.o setup.o signal_32.o sys_sh.o sys_sh32.o \
9 syscalls_32.o time_32.o topology.o traps.o traps_32.o 9 syscalls_32.o time_32.o topology.o traps.o traps_32.o
10 10
11obj-y += cpu/ timers/ 11obj-y += cpu/ timers/
diff --git a/arch/sh/kernel/Makefile_64 b/arch/sh/kernel/Makefile_64
index e01283d49cbf..6edf53b93d94 100644
--- a/arch/sh/kernel/Makefile_64
+++ b/arch/sh/kernel/Makefile_64
@@ -1,7 +1,7 @@
1extra-y := head_64.o init_task.o vmlinux.lds 1extra-y := head_64.o init_task.o vmlinux.lds
2 2
3obj-y := debugtraps.o io.o io_generic.o irq.o machvec.o process_64.o \ 3obj-y := debugtraps.o io.o io_generic.o irq.o machvec.o process_64.o \
4 ptrace_64.o semaphore.o setup.o signal_64.o sys_sh.o sys_sh64.o \ 4 ptrace_64.o setup.o signal_64.o sys_sh.o sys_sh64.o \
5 syscalls_64.o time_64.o topology.o traps.o traps_64.o 5 syscalls_64.o time_64.o topology.o traps.o traps_64.o
6 6
7obj-y += cpu/ timers/ 7obj-y += cpu/ timers/
diff --git a/arch/sh/kernel/semaphore.c b/arch/sh/kernel/semaphore.c
deleted file mode 100644
index 184119eeae56..000000000000
--- a/arch/sh/kernel/semaphore.c
+++ /dev/null
@@ -1,139 +0,0 @@
1/*
2 * Just taken from alpha implementation.
3 * This can't work well, perhaps.
4 */
5/*
6 * Generic semaphore code. Buyer beware. Do your own
7 * specific changes in <asm/semaphore-helper.h>
8 */
9
10#include <linux/errno.h>
11#include <linux/sched.h>
12#include <linux/wait.h>
13#include <linux/init.h>
14#include <asm/semaphore.h>
15#include <asm/semaphore-helper.h>
16
17DEFINE_SPINLOCK(semaphore_wake_lock);
18
19/*
20 * Semaphores are implemented using a two-way counter:
21 * The "count" variable is decremented for each process
22 * that tries to sleep, while the "waking" variable is
23 * incremented when the "up()" code goes to wake up waiting
24 * processes.
25 *
26 * Notably, the inline "up()" and "down()" functions can
27 * efficiently test if they need to do any extra work (up
28 * needs to do something only if count was negative before
29 * the increment operation.
30 *
31 * waking_non_zero() (from asm/semaphore.h) must execute
32 * atomically.
33 *
34 * When __up() is called, the count was negative before
35 * incrementing it, and we need to wake up somebody.
36 *
37 * This routine adds one to the count of processes that need to
38 * wake up and exit. ALL waiting processes actually wake up but
39 * only the one that gets to the "waking" field first will gate
40 * through and acquire the semaphore. The others will go back
41 * to sleep.
42 *
43 * Note that these functions are only called when there is
44 * contention on the lock, and as such all this is the
45 * "non-critical" part of the whole semaphore business. The
46 * critical part is the inline stuff in <asm/semaphore.h>
47 * where we want to avoid any extra jumps and calls.
48 */
49void __up(struct semaphore *sem)
50{
51 wake_one_more(sem);
52 wake_up(&sem->wait);
53}
54
55/*
56 * Perform the "down" function. Return zero for semaphore acquired,
57 * return negative for signalled out of the function.
58 *
59 * If called from __down, the return is ignored and the wait loop is
60 * not interruptible. This means that a task waiting on a semaphore
61 * using "down()" cannot be killed until someone does an "up()" on
62 * the semaphore.
63 *
64 * If called from __down_interruptible, the return value gets checked
65 * upon return. If the return value is negative then the task continues
66 * with the negative value in the return register (it can be tested by
67 * the caller).
68 *
69 * Either form may be used in conjunction with "up()".
70 *
71 */
72
73#define DOWN_VAR \
74 struct task_struct *tsk = current; \
75 wait_queue_t wait; \
76 init_waitqueue_entry(&wait, tsk);
77
78#define DOWN_HEAD(task_state) \
79 \
80 \
81 tsk->state = (task_state); \
82 add_wait_queue(&sem->wait, &wait); \
83 \
84 /* \
85 * Ok, we're set up. sem->count is known to be less than zero \
86 * so we must wait. \
87 * \
88 * We can let go the lock for purposes of waiting. \
89 * We re-acquire it after awaking so as to protect \
90 * all semaphore operations. \
91 * \
92 * If "up()" is called before we call waking_non_zero() then \
93 * we will catch it right away. If it is called later then \
94 * we will have to go through a wakeup cycle to catch it. \
95 * \
96 * Multiple waiters contend for the semaphore lock to see \
97 * who gets to gate through and who has to wait some more. \
98 */ \
99 for (;;) {
100
101#define DOWN_TAIL(task_state) \
102 tsk->state = (task_state); \
103 } \
104 tsk->state = TASK_RUNNING; \
105 remove_wait_queue(&sem->wait, &wait);
106
107void __sched __down(struct semaphore * sem)
108{
109 DOWN_VAR
110 DOWN_HEAD(TASK_UNINTERRUPTIBLE)
111 if (waking_non_zero(sem))
112 break;
113 schedule();
114 DOWN_TAIL(TASK_UNINTERRUPTIBLE)
115}
116
117int __sched __down_interruptible(struct semaphore * sem)
118{
119 int ret = 0;
120 DOWN_VAR
121 DOWN_HEAD(TASK_INTERRUPTIBLE)
122
123 ret = waking_non_zero_interruptible(sem, tsk);
124 if (ret)
125 {
126 if (ret == 1)
127 /* ret != 0 only if we get interrupted -arca */
128 ret = 0;
129 break;
130 }
131 schedule();
132 DOWN_TAIL(TASK_INTERRUPTIBLE)
133 return ret;
134}
135
136int __down_trylock(struct semaphore * sem)
137{
138 return waking_non_zero_trylock(sem);
139}
diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c
index 45bb333fd9ec..6d405462cee8 100644
--- a/arch/sh/kernel/sh_ksyms_32.c
+++ b/arch/sh/kernel/sh_ksyms_32.c
@@ -9,7 +9,6 @@
9#include <linux/pci.h> 9#include <linux/pci.h>
10#include <linux/irq.h> 10#include <linux/irq.h>
11#include <asm/sections.h> 11#include <asm/sections.h>
12#include <asm/semaphore.h>
13#include <asm/processor.h> 12#include <asm/processor.h>
14#include <asm/uaccess.h> 13#include <asm/uaccess.h>
15#include <asm/checksum.h> 14#include <asm/checksum.h>
@@ -48,12 +47,6 @@ EXPORT_SYMBOL(__copy_user);
48EXPORT_SYMBOL(get_vm_area); 47EXPORT_SYMBOL(get_vm_area);
49#endif 48#endif
50 49
51/* semaphore exports */
52EXPORT_SYMBOL(__up);
53EXPORT_SYMBOL(__down);
54EXPORT_SYMBOL(__down_interruptible);
55EXPORT_SYMBOL(__down_trylock);
56
57EXPORT_SYMBOL(__udelay); 50EXPORT_SYMBOL(__udelay);
58EXPORT_SYMBOL(__ndelay); 51EXPORT_SYMBOL(__ndelay);
59EXPORT_SYMBOL(__const_udelay); 52EXPORT_SYMBOL(__const_udelay);
diff --git a/arch/sh/kernel/sh_ksyms_64.c b/arch/sh/kernel/sh_ksyms_64.c
index b6410ce4bd1d..a310c9707f03 100644
--- a/arch/sh/kernel/sh_ksyms_64.c
+++ b/arch/sh/kernel/sh_ksyms_64.c
@@ -16,7 +16,6 @@
16#include <linux/in6.h> 16#include <linux/in6.h>
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/screen_info.h> 18#include <linux/screen_info.h>
19#include <asm/semaphore.h>
20#include <asm/processor.h> 19#include <asm/processor.h>
21#include <asm/uaccess.h> 20#include <asm/uaccess.h>
22#include <asm/checksum.h> 21#include <asm/checksum.h>
@@ -37,9 +36,6 @@ EXPORT_SYMBOL(csum_partial_copy_nocheck);
37EXPORT_SYMBOL(screen_info); 36EXPORT_SYMBOL(screen_info);
38#endif 37#endif
39 38
40EXPORT_SYMBOL(__down);
41EXPORT_SYMBOL(__down_trylock);
42EXPORT_SYMBOL(__up);
43EXPORT_SYMBOL(__put_user_asm_l); 39EXPORT_SYMBOL(__put_user_asm_l);
44EXPORT_SYMBOL(__get_user_asm_l); 40EXPORT_SYMBOL(__get_user_asm_l);
45EXPORT_SYMBOL(copy_page); 41EXPORT_SYMBOL(copy_page);
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index bf1b15d3f6f5..2712bb166f6f 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -12,7 +12,7 @@ obj-y := entry.o wof.o wuf.o etrap.o rtrap.o traps.o $(IRQ_OBJS) \
12 sys_sparc.o sunos_asm.o systbls.o \ 12 sys_sparc.o sunos_asm.o systbls.o \
13 time.o windows.o cpu.o devices.o sclow.o \ 13 time.o windows.o cpu.o devices.o sclow.o \
14 tadpole.o tick14.o ptrace.o sys_solaris.o \ 14 tadpole.o tick14.o ptrace.o sys_solaris.o \
15 unaligned.o una_asm.o muldiv.o semaphore.o \ 15 unaligned.o una_asm.o muldiv.o \
16 prom.o of_device.o devres.o 16 prom.o of_device.o devres.o
17 17
18devres-y = ../../../kernel/irq/devres.o 18devres-y = ../../../kernel/irq/devres.o
diff --git a/arch/sparc/kernel/semaphore.c b/arch/sparc/kernel/semaphore.c
deleted file mode 100644
index 0c37c1a7cd7e..000000000000
--- a/arch/sparc/kernel/semaphore.c
+++ /dev/null
@@ -1,155 +0,0 @@
1/* $Id: semaphore.c,v 1.7 2001/04/18 21:06:05 davem Exp $ */
2
3/* sparc32 semaphore implementation, based on i386 version */
4
5#include <linux/sched.h>
6#include <linux/errno.h>
7#include <linux/init.h>
8
9#include <asm/semaphore.h>
10
11/*
12 * Semaphores are implemented using a two-way counter:
13 * The "count" variable is decremented for each process
14 * that tries to acquire the semaphore, while the "sleeping"
15 * variable is a count of such acquires.
16 *
17 * Notably, the inline "up()" and "down()" functions can
18 * efficiently test if they need to do any extra work (up
19 * needs to do something only if count was negative before
20 * the increment operation.
21 *
22 * "sleeping" and the contention routine ordering is
23 * protected by the semaphore spinlock.
24 *
25 * Note that these functions are only called when there is
26 * contention on the lock, and as such all this is the
27 * "non-critical" part of the whole semaphore business. The
28 * critical part is the inline stuff in <asm/semaphore.h>
29 * where we want to avoid any extra jumps and calls.
30 */
31
32/*
33 * Logic:
34 * - only on a boundary condition do we need to care. When we go
35 * from a negative count to a non-negative, we wake people up.
36 * - when we go from a non-negative count to a negative do we
37 * (a) synchronize with the "sleeper" count and (b) make sure
38 * that we're on the wakeup list before we synchronize so that
39 * we cannot lose wakeup events.
40 */
41
42void __up(struct semaphore *sem)
43{
44 wake_up(&sem->wait);
45}
46
47static DEFINE_SPINLOCK(semaphore_lock);
48
49void __sched __down(struct semaphore * sem)
50{
51 struct task_struct *tsk = current;
52 DECLARE_WAITQUEUE(wait, tsk);
53 tsk->state = TASK_UNINTERRUPTIBLE;
54 add_wait_queue_exclusive(&sem->wait, &wait);
55
56 spin_lock_irq(&semaphore_lock);
57 sem->sleepers++;
58 for (;;) {
59 int sleepers = sem->sleepers;
60
61 /*
62 * Add "everybody else" into it. They aren't
63 * playing, because we own the spinlock.
64 */
65 if (!atomic24_add_negative(sleepers - 1, &sem->count)) {
66 sem->sleepers = 0;
67 break;
68 }
69 sem->sleepers = 1; /* us - see -1 above */
70 spin_unlock_irq(&semaphore_lock);
71
72 schedule();
73 tsk->state = TASK_UNINTERRUPTIBLE;
74 spin_lock_irq(&semaphore_lock);
75 }
76 spin_unlock_irq(&semaphore_lock);
77 remove_wait_queue(&sem->wait, &wait);
78 tsk->state = TASK_RUNNING;
79 wake_up(&sem->wait);
80}
81
82int __sched __down_interruptible(struct semaphore * sem)
83{
84 int retval = 0;
85 struct task_struct *tsk = current;
86 DECLARE_WAITQUEUE(wait, tsk);
87 tsk->state = TASK_INTERRUPTIBLE;
88 add_wait_queue_exclusive(&sem->wait, &wait);
89
90 spin_lock_irq(&semaphore_lock);
91 sem->sleepers ++;
92 for (;;) {
93 int sleepers = sem->sleepers;
94
95 /*
96 * With signals pending, this turns into
97 * the trylock failure case - we won't be
98 * sleeping, and we* can't get the lock as
99 * it has contention. Just correct the count
100 * and exit.
101 */
102 if (signal_pending(current)) {
103 retval = -EINTR;
104 sem->sleepers = 0;
105 atomic24_add(sleepers, &sem->count);
106 break;
107 }
108
109 /*
110 * Add "everybody else" into it. They aren't
111 * playing, because we own the spinlock. The
112 * "-1" is because we're still hoping to get
113 * the lock.
114 */
115 if (!atomic24_add_negative(sleepers - 1, &sem->count)) {
116 sem->sleepers = 0;
117 break;
118 }
119 sem->sleepers = 1; /* us - see -1 above */
120 spin_unlock_irq(&semaphore_lock);
121
122 schedule();
123 tsk->state = TASK_INTERRUPTIBLE;
124 spin_lock_irq(&semaphore_lock);
125 }
126 spin_unlock_irq(&semaphore_lock);
127 tsk->state = TASK_RUNNING;
128 remove_wait_queue(&sem->wait, &wait);
129 wake_up(&sem->wait);
130 return retval;
131}
132
133/*
134 * Trylock failed - make sure we correct for
135 * having decremented the count.
136 */
137int __down_trylock(struct semaphore * sem)
138{
139 int sleepers;
140 unsigned long flags;
141
142 spin_lock_irqsave(&semaphore_lock, flags);
143 sleepers = sem->sleepers + 1;
144 sem->sleepers = 0;
145
146 /*
147 * Add "everybody else" and us into it. They aren't
148 * playing, because we own the spinlock.
149 */
150 if (!atomic24_add_negative(sleepers, &sem->count))
151 wake_up(&sem->wait);
152
153 spin_unlock_irqrestore(&semaphore_lock, flags);
154 return 1;
155}
diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c
index c1025e551650..97b1de0e9094 100644
--- a/arch/sparc/kernel/sparc_ksyms.c
+++ b/arch/sparc/kernel/sparc_ksyms.c
@@ -107,11 +107,6 @@ EXPORT_SYMBOL(___rw_read_try);
107EXPORT_SYMBOL(___rw_read_exit); 107EXPORT_SYMBOL(___rw_read_exit);
108EXPORT_SYMBOL(___rw_write_enter); 108EXPORT_SYMBOL(___rw_write_enter);
109#endif 109#endif
110/* semaphores */
111EXPORT_SYMBOL(__up);
112EXPORT_SYMBOL(__down);
113EXPORT_SYMBOL(__down_trylock);
114EXPORT_SYMBOL(__down_interruptible);
115 110
116EXPORT_SYMBOL(sparc_valid_addr_bitmap); 111EXPORT_SYMBOL(sparc_valid_addr_bitmap);
117EXPORT_SYMBOL(phys_base); 112EXPORT_SYMBOL(phys_base);
diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile
index 1bf5b187de49..459462e80a12 100644
--- a/arch/sparc64/kernel/Makefile
+++ b/arch/sparc64/kernel/Makefile
@@ -10,7 +10,7 @@ extra-y := head.o init_task.o vmlinux.lds
10obj-y := process.o setup.o cpu.o idprom.o \ 10obj-y := process.o setup.o cpu.o idprom.o \
11 traps.o auxio.o una_asm.o sysfs.o iommu.o \ 11 traps.o auxio.o una_asm.o sysfs.o iommu.o \
12 irq.o ptrace.o time.o sys_sparc.o signal.o \ 12 irq.o ptrace.o time.o sys_sparc.o signal.o \
13 unaligned.o central.o pci.o starfire.o semaphore.o \ 13 unaligned.o central.o pci.o starfire.o \
14 power.o sbus.o sparc64_ksyms.o chmc.o \ 14 power.o sbus.o sparc64_ksyms.o chmc.o \
15 visemul.o prom.o of_device.o hvapi.o sstate.o mdesc.o 15 visemul.o prom.o of_device.o hvapi.o sstate.o mdesc.o
16 16
diff --git a/arch/sparc64/kernel/semaphore.c b/arch/sparc64/kernel/semaphore.c
deleted file mode 100644
index 9974a6899551..000000000000
--- a/arch/sparc64/kernel/semaphore.c
+++ /dev/null
@@ -1,254 +0,0 @@
1/* semaphore.c: Sparc64 semaphore implementation.
2 *
3 * This is basically the PPC semaphore scheme ported to use
4 * the sparc64 atomic instructions, so see the PPC code for
5 * credits.
6 */
7
8#include <linux/sched.h>
9#include <linux/errno.h>
10#include <linux/init.h>
11
12/*
13 * Atomically update sem->count.
14 * This does the equivalent of the following:
15 *
16 * old_count = sem->count;
17 * tmp = MAX(old_count, 0) + incr;
18 * sem->count = tmp;
19 * return old_count;
20 */
21static inline int __sem_update_count(struct semaphore *sem, int incr)
22{
23 int old_count, tmp;
24
25 __asm__ __volatile__("\n"
26" ! __sem_update_count old_count(%0) tmp(%1) incr(%4) &sem->count(%3)\n"
27"1: ldsw [%3], %0\n"
28" mov %0, %1\n"
29" cmp %0, 0\n"
30" movl %%icc, 0, %1\n"
31" add %1, %4, %1\n"
32" cas [%3], %0, %1\n"
33" cmp %0, %1\n"
34" membar #StoreLoad | #StoreStore\n"
35" bne,pn %%icc, 1b\n"
36" nop\n"
37 : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
38 : "r" (&sem->count), "r" (incr), "m" (sem->count)
39 : "cc");
40
41 return old_count;
42}
43
44static void __up(struct semaphore *sem)
45{
46 __sem_update_count(sem, 1);
47 wake_up(&sem->wait);
48}
49
50void up(struct semaphore *sem)
51{
52 /* This atomically does:
53 * old_val = sem->count;
54 * new_val = sem->count + 1;
55 * sem->count = new_val;
56 * if (old_val < 0)
57 * __up(sem);
58 *
59 * The (old_val < 0) test is equivalent to
60 * the more straightforward (new_val <= 0),
61 * but it is easier to test the former because
62 * of how the CAS instruction works.
63 */
64
65 __asm__ __volatile__("\n"
66" ! up sem(%0)\n"
67" membar #StoreLoad | #LoadLoad\n"
68"1: lduw [%0], %%g1\n"
69" add %%g1, 1, %%g7\n"
70" cas [%0], %%g1, %%g7\n"
71" cmp %%g1, %%g7\n"
72" bne,pn %%icc, 1b\n"
73" addcc %%g7, 1, %%g0\n"
74" membar #StoreLoad | #StoreStore\n"
75" ble,pn %%icc, 3f\n"
76" nop\n"
77"2:\n"
78" .subsection 2\n"
79"3: mov %0, %%g1\n"
80" save %%sp, -160, %%sp\n"
81" call %1\n"
82" mov %%g1, %%o0\n"
83" ba,pt %%xcc, 2b\n"
84" restore\n"
85" .previous\n"
86 : : "r" (sem), "i" (__up)
87 : "g1", "g2", "g3", "g7", "memory", "cc");
88}
89
90static void __sched __down(struct semaphore * sem)
91{
92 struct task_struct *tsk = current;
93 DECLARE_WAITQUEUE(wait, tsk);
94
95 tsk->state = TASK_UNINTERRUPTIBLE;
96 add_wait_queue_exclusive(&sem->wait, &wait);
97
98 while (__sem_update_count(sem, -1) <= 0) {
99 schedule();
100 tsk->state = TASK_UNINTERRUPTIBLE;
101 }
102 remove_wait_queue(&sem->wait, &wait);
103 tsk->state = TASK_RUNNING;
104
105 wake_up(&sem->wait);
106}
107
108void __sched down(struct semaphore *sem)
109{
110 might_sleep();
111 /* This atomically does:
112 * old_val = sem->count;
113 * new_val = sem->count - 1;
114 * sem->count = new_val;
115 * if (old_val < 1)
116 * __down(sem);
117 *
118 * The (old_val < 1) test is equivalent to
119 * the more straightforward (new_val < 0),
120 * but it is easier to test the former because
121 * of how the CAS instruction works.
122 */
123
124 __asm__ __volatile__("\n"
125" ! down sem(%0)\n"
126"1: lduw [%0], %%g1\n"
127" sub %%g1, 1, %%g7\n"
128" cas [%0], %%g1, %%g7\n"
129" cmp %%g1, %%g7\n"
130" bne,pn %%icc, 1b\n"
131" cmp %%g7, 1\n"
132" membar #StoreLoad | #StoreStore\n"
133" bl,pn %%icc, 3f\n"
134" nop\n"
135"2:\n"
136" .subsection 2\n"
137"3: mov %0, %%g1\n"
138" save %%sp, -160, %%sp\n"
139" call %1\n"
140" mov %%g1, %%o0\n"
141" ba,pt %%xcc, 2b\n"
142" restore\n"
143" .previous\n"
144 : : "r" (sem), "i" (__down)
145 : "g1", "g2", "g3", "g7", "memory", "cc");
146}
147
148int down_trylock(struct semaphore *sem)
149{
150 int ret;
151
152 /* This atomically does:
153 * old_val = sem->count;
154 * new_val = sem->count - 1;
155 * if (old_val < 1) {
156 * ret = 1;
157 * } else {
158 * sem->count = new_val;
159 * ret = 0;
160 * }
161 *
162 * The (old_val < 1) test is equivalent to
163 * the more straightforward (new_val < 0),
164 * but it is easier to test the former because
165 * of how the CAS instruction works.
166 */
167
168 __asm__ __volatile__("\n"
169" ! down_trylock sem(%1) ret(%0)\n"
170"1: lduw [%1], %%g1\n"
171" sub %%g1, 1, %%g7\n"
172" cmp %%g1, 1\n"
173" bl,pn %%icc, 2f\n"
174" mov 1, %0\n"
175" cas [%1], %%g1, %%g7\n"
176" cmp %%g1, %%g7\n"
177" bne,pn %%icc, 1b\n"
178" mov 0, %0\n"
179" membar #StoreLoad | #StoreStore\n"
180"2:\n"
181 : "=&r" (ret)
182 : "r" (sem)
183 : "g1", "g7", "memory", "cc");
184
185 return ret;
186}
187
188static int __sched __down_interruptible(struct semaphore * sem)
189{
190 int retval = 0;
191 struct task_struct *tsk = current;
192 DECLARE_WAITQUEUE(wait, tsk);
193
194 tsk->state = TASK_INTERRUPTIBLE;
195 add_wait_queue_exclusive(&sem->wait, &wait);
196
197 while (__sem_update_count(sem, -1) <= 0) {
198 if (signal_pending(current)) {
199 __sem_update_count(sem, 0);
200 retval = -EINTR;
201 break;
202 }
203 schedule();
204 tsk->state = TASK_INTERRUPTIBLE;
205 }
206 tsk->state = TASK_RUNNING;
207 remove_wait_queue(&sem->wait, &wait);
208 wake_up(&sem->wait);
209 return retval;
210}
211
212int __sched down_interruptible(struct semaphore *sem)
213{
214 int ret = 0;
215
216 might_sleep();
217 /* This atomically does:
218 * old_val = sem->count;
219 * new_val = sem->count - 1;
220 * sem->count = new_val;
221 * if (old_val < 1)
222 * ret = __down_interruptible(sem);
223 *
224 * The (old_val < 1) test is equivalent to
225 * the more straightforward (new_val < 0),
226 * but it is easier to test the former because
227 * of how the CAS instruction works.
228 */
229
230 __asm__ __volatile__("\n"
231" ! down_interruptible sem(%2) ret(%0)\n"
232"1: lduw [%2], %%g1\n"
233" sub %%g1, 1, %%g7\n"
234" cas [%2], %%g1, %%g7\n"
235" cmp %%g1, %%g7\n"
236" bne,pn %%icc, 1b\n"
237" cmp %%g7, 1\n"
238" membar #StoreLoad | #StoreStore\n"
239" bl,pn %%icc, 3f\n"
240" nop\n"
241"2:\n"
242" .subsection 2\n"
243"3: mov %2, %%g1\n"
244" save %%sp, -160, %%sp\n"
245" call %3\n"
246" mov %%g1, %%o0\n"
247" ba,pt %%xcc, 2b\n"
248" restore\n"
249" .previous\n"
250 : "=r" (ret)
251 : "0" (ret), "r" (sem), "i" (__down_interruptible)
252 : "g1", "g2", "g3", "g7", "memory", "cc");
253 return ret;
254}
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index 51fa773f38c9..051b8d9cb989 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -130,12 +130,6 @@ EXPORT_SYMBOL(_mcount);
130 130
131EXPORT_SYMBOL(sparc64_get_clock_tick); 131EXPORT_SYMBOL(sparc64_get_clock_tick);
132 132
133/* semaphores */
134EXPORT_SYMBOL(down);
135EXPORT_SYMBOL(down_trylock);
136EXPORT_SYMBOL(down_interruptible);
137EXPORT_SYMBOL(up);
138
139/* RW semaphores */ 133/* RW semaphores */
140EXPORT_SYMBOL(__down_read); 134EXPORT_SYMBOL(__down_read);
141EXPORT_SYMBOL(__down_read_trylock); 135EXPORT_SYMBOL(__down_read_trylock);
diff --git a/arch/um/Kconfig.i386 b/arch/um/Kconfig.i386
index 3cd8a04d66d8..e09edfa560da 100644
--- a/arch/um/Kconfig.i386
+++ b/arch/um/Kconfig.i386
@@ -19,10 +19,6 @@ config 64BIT
19 bool 19 bool
20 default n 20 default n
21 21
22config SEMAPHORE_SLEEPERS
23 bool
24 default y
25
26config 3_LEVEL_PGTABLES 22config 3_LEVEL_PGTABLES
27 bool "Three-level pagetables (EXPERIMENTAL)" 23 bool "Three-level pagetables (EXPERIMENTAL)"
28 default n 24 default n
diff --git a/arch/um/Kconfig.x86_64 b/arch/um/Kconfig.x86_64
index 6533b349f061..3fbe69e359ed 100644
--- a/arch/um/Kconfig.x86_64
+++ b/arch/um/Kconfig.x86_64
@@ -11,10 +11,6 @@ config RWSEM_GENERIC_SPINLOCK
11 bool 11 bool
12 default y 12 default y
13 13
14config SEMAPHORE_SLEEPERS
15 bool
16 default y
17
18config 3_LEVEL_PGTABLES 14config 3_LEVEL_PGTABLES
19 bool 15 bool
20 default y 16 default y
diff --git a/arch/um/sys-i386/ksyms.c b/arch/um/sys-i386/ksyms.c
index 2a1eac1859ce..bfbefd30db8f 100644
--- a/arch/um/sys-i386/ksyms.c
+++ b/arch/um/sys-i386/ksyms.c
@@ -1,17 +1,5 @@
1#include "linux/module.h" 1#include "linux/module.h"
2#include "linux/in6.h"
3#include "linux/rwsem.h"
4#include "asm/byteorder.h"
5#include "asm/delay.h"
6#include "asm/semaphore.h"
7#include "asm/uaccess.h"
8#include "asm/checksum.h" 2#include "asm/checksum.h"
9#include "asm/errno.h"
10
11EXPORT_SYMBOL(__down_failed);
12EXPORT_SYMBOL(__down_failed_interruptible);
13EXPORT_SYMBOL(__down_failed_trylock);
14EXPORT_SYMBOL(__up_wakeup);
15 3
16/* Networking helper routines. */ 4/* Networking helper routines. */
17EXPORT_SYMBOL(csum_partial); 5EXPORT_SYMBOL(csum_partial);
diff --git a/arch/um/sys-ppc/Makefile b/arch/um/sys-ppc/Makefile
index 08901526e893..b8bc844fd2c4 100644
--- a/arch/um/sys-ppc/Makefile
+++ b/arch/um/sys-ppc/Makefile
@@ -3,7 +3,7 @@ OBJ = built-in.o
3.S.o: 3.S.o:
4 $(CC) $(KBUILD_AFLAGS) -D__ASSEMBLY__ -D__UM_PPC__ -c $< -o $*.o 4 $(CC) $(KBUILD_AFLAGS) -D__ASSEMBLY__ -D__UM_PPC__ -c $< -o $*.o
5 5
6OBJS = ptrace.o sigcontext.o semaphore.o checksum.o miscthings.o misc.o \ 6OBJS = ptrace.o sigcontext.o checksum.o miscthings.o misc.o \
7 ptrace_user.o sysrq.o 7 ptrace_user.o sysrq.o
8 8
9EXTRA_AFLAGS := -DCONFIG_PPC32 -I. -I$(srctree)/arch/ppc/kernel 9EXTRA_AFLAGS := -DCONFIG_PPC32 -I. -I$(srctree)/arch/ppc/kernel
@@ -20,10 +20,6 @@ ptrace_user.o: ptrace_user.c
20sigcontext.o: sigcontext.c 20sigcontext.o: sigcontext.c
21 $(CC) $(USER_CFLAGS) $(EXTRA_CFLAGS) -c -o $@ $< 21 $(CC) $(USER_CFLAGS) $(EXTRA_CFLAGS) -c -o $@ $<
22 22
23semaphore.c:
24 rm -f $@
25 ln -s $(srctree)/arch/ppc/kernel/$@ $@
26
27checksum.S: 23checksum.S:
28 rm -f $@ 24 rm -f $@
29 ln -s $(srctree)/arch/ppc/lib/$@ $@ 25 ln -s $(srctree)/arch/ppc/lib/$@ $@
@@ -66,4 +62,4 @@ misc.o: misc.S ppc_defs.h
66 $(CC) $(EXTRA_AFLAGS) $(KBUILD_AFLAGS) -D__ASSEMBLY__ -D__UM_PPC__ -c $< -o $*.o 62 $(CC) $(EXTRA_AFLAGS) $(KBUILD_AFLAGS) -D__ASSEMBLY__ -D__UM_PPC__ -c $< -o $*.o
67 rm -f asm 63 rm -f asm
68 64
69clean-files := $(OBJS) ppc_defs.h checksum.S semaphore.c mk_defs.c 65clean-files := $(OBJS) ppc_defs.h checksum.S mk_defs.c
diff --git a/arch/um/sys-x86_64/ksyms.c b/arch/um/sys-x86_64/ksyms.c
index 12c593607c59..4d7d1a812d8f 100644
--- a/arch/um/sys-x86_64/ksyms.c
+++ b/arch/um/sys-x86_64/ksyms.c
@@ -1,16 +1,5 @@
1#include "linux/module.h" 1#include "linux/module.h"
2#include "linux/in6.h" 2#include "asm/string.h"
3#include "linux/rwsem.h"
4#include "asm/byteorder.h"
5#include "asm/semaphore.h"
6#include "asm/uaccess.h"
7#include "asm/checksum.h"
8#include "asm/errno.h"
9
10EXPORT_SYMBOL(__down_failed);
11EXPORT_SYMBOL(__down_failed_interruptible);
12EXPORT_SYMBOL(__down_failed_trylock);
13EXPORT_SYMBOL(__up_wakeup);
14 3
15/*XXX: we need them because they would be exported by x86_64 */ 4/*XXX: we need them because they would be exported by x86_64 */
16EXPORT_SYMBOL(__memcpy); 5EXPORT_SYMBOL(__memcpy);
diff --git a/arch/v850/kernel/Makefile b/arch/v850/kernel/Makefile
index 3930482bddc4..da5889c53576 100644
--- a/arch/v850/kernel/Makefile
+++ b/arch/v850/kernel/Makefile
@@ -11,7 +11,7 @@
11 11
12extra-y := head.o init_task.o vmlinux.lds 12extra-y := head.o init_task.o vmlinux.lds
13 13
14obj-y += intv.o entry.o process.o syscalls.o time.o semaphore.o setup.o \ 14obj-y += intv.o entry.o process.o syscalls.o time.o setup.o \
15 signal.o irq.o mach.o ptrace.o bug.o 15 signal.o irq.o mach.o ptrace.o bug.o
16obj-$(CONFIG_MODULES) += module.o v850_ksyms.o 16obj-$(CONFIG_MODULES) += module.o v850_ksyms.o
17# chip-specific code 17# chip-specific code
diff --git a/arch/v850/kernel/semaphore.c b/arch/v850/kernel/semaphore.c
deleted file mode 100644
index fc89fd661c99..000000000000
--- a/arch/v850/kernel/semaphore.c
+++ /dev/null
@@ -1,166 +0,0 @@
1/*
2 * arch/v850/kernel/semaphore.c -- Semaphore support
3 *
4 * Copyright (C) 1998-2000 IBM Corporation
5 * Copyright (C) 1999 Linus Torvalds
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * This file is a copy of the s390 version, arch/s390/kernel/semaphore.c
12 * Author(s): Martin Schwidefsky
13 * which was derived from the i386 version, linux/arch/i386/kernel/semaphore.c
14 */
15
16#include <linux/errno.h>
17#include <linux/sched.h>
18#include <linux/init.h>
19
20#include <asm/semaphore.h>
21
22/*
23 * Semaphores are implemented using a two-way counter:
24 * The "count" variable is decremented for each process
25 * that tries to acquire the semaphore, while the "sleeping"
26 * variable is a count of such acquires.
27 *
28 * Notably, the inline "up()" and "down()" functions can
29 * efficiently test if they need to do any extra work (up
30 * needs to do something only if count was negative before
31 * the increment operation.
32 *
33 * "sleeping" and the contention routine ordering is
34 * protected by the semaphore spinlock.
35 *
36 * Note that these functions are only called when there is
37 * contention on the lock, and as such all this is the
38 * "non-critical" part of the whole semaphore business. The
39 * critical part is the inline stuff in <asm/semaphore.h>
40 * where we want to avoid any extra jumps and calls.
41 */
42
43/*
44 * Logic:
45 * - only on a boundary condition do we need to care. When we go
46 * from a negative count to a non-negative, we wake people up.
47 * - when we go from a non-negative count to a negative do we
48 * (a) synchronize with the "sleeper" count and (b) make sure
49 * that we're on the wakeup list before we synchronize so that
50 * we cannot lose wakeup events.
51 */
52
53void __up(struct semaphore *sem)
54{
55 wake_up(&sem->wait);
56}
57
58static DEFINE_SPINLOCK(semaphore_lock);
59
60void __sched __down(struct semaphore * sem)
61{
62 struct task_struct *tsk = current;
63 DECLARE_WAITQUEUE(wait, tsk);
64 tsk->state = TASK_UNINTERRUPTIBLE;
65 add_wait_queue_exclusive(&sem->wait, &wait);
66
67 spin_lock_irq(&semaphore_lock);
68 sem->sleepers++;
69 for (;;) {
70 int sleepers = sem->sleepers;
71
72 /*
73 * Add "everybody else" into it. They aren't
74 * playing, because we own the spinlock.
75 */
76 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
77 sem->sleepers = 0;
78 break;
79 }
80 sem->sleepers = 1; /* us - see -1 above */
81 spin_unlock_irq(&semaphore_lock);
82
83 schedule();
84 tsk->state = TASK_UNINTERRUPTIBLE;
85 spin_lock_irq(&semaphore_lock);
86 }
87 spin_unlock_irq(&semaphore_lock);
88 remove_wait_queue(&sem->wait, &wait);
89 tsk->state = TASK_RUNNING;
90 wake_up(&sem->wait);
91}
92
93int __sched __down_interruptible(struct semaphore * sem)
94{
95 int retval = 0;
96 struct task_struct *tsk = current;
97 DECLARE_WAITQUEUE(wait, tsk);
98 tsk->state = TASK_INTERRUPTIBLE;
99 add_wait_queue_exclusive(&sem->wait, &wait);
100
101 spin_lock_irq(&semaphore_lock);
102 sem->sleepers ++;
103 for (;;) {
104 int sleepers = sem->sleepers;
105
106 /*
107 * With signals pending, this turns into
108 * the trylock failure case - we won't be
109 * sleeping, and we* can't get the lock as
110 * it has contention. Just correct the count
111 * and exit.
112 */
113 if (signal_pending(current)) {
114 retval = -EINTR;
115 sem->sleepers = 0;
116 atomic_add(sleepers, &sem->count);
117 break;
118 }
119
120 /*
121 * Add "everybody else" into it. They aren't
122 * playing, because we own the spinlock. The
123 * "-1" is because we're still hoping to get
124 * the lock.
125 */
126 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
127 sem->sleepers = 0;
128 break;
129 }
130 sem->sleepers = 1; /* us - see -1 above */
131 spin_unlock_irq(&semaphore_lock);
132
133 schedule();
134 tsk->state = TASK_INTERRUPTIBLE;
135 spin_lock_irq(&semaphore_lock);
136 }
137 spin_unlock_irq(&semaphore_lock);
138 tsk->state = TASK_RUNNING;
139 remove_wait_queue(&sem->wait, &wait);
140 wake_up(&sem->wait);
141 return retval;
142}
143
144/*
145 * Trylock failed - make sure we correct for
146 * having decremented the count.
147 */
148int __down_trylock(struct semaphore * sem)
149{
150 unsigned long flags;
151 int sleepers;
152
153 spin_lock_irqsave(&semaphore_lock, flags);
154 sleepers = sem->sleepers + 1;
155 sem->sleepers = 0;
156
157 /*
158 * Add "everybody else" and us into it. They aren't
159 * playing, because we own the spinlock.
160 */
161 if (!atomic_add_negative(sleepers, &sem->count))
162 wake_up(&sem->wait);
163
164 spin_unlock_irqrestore(&semaphore_lock, flags);
165 return 1;
166}
diff --git a/arch/v850/kernel/v850_ksyms.c b/arch/v850/kernel/v850_ksyms.c
index 93575fdc874d..8d386a5dbc4a 100644
--- a/arch/v850/kernel/v850_ksyms.c
+++ b/arch/v850/kernel/v850_ksyms.c
@@ -11,7 +11,6 @@
11#include <asm/pgalloc.h> 11#include <asm/pgalloc.h>
12#include <asm/irq.h> 12#include <asm/irq.h>
13#include <asm/io.h> 13#include <asm/io.h>
14#include <asm/semaphore.h>
15#include <asm/checksum.h> 14#include <asm/checksum.h>
16#include <asm/current.h> 15#include <asm/current.h>
17 16
@@ -34,12 +33,6 @@ EXPORT_SYMBOL (memset);
34EXPORT_SYMBOL (memcpy); 33EXPORT_SYMBOL (memcpy);
35EXPORT_SYMBOL (memmove); 34EXPORT_SYMBOL (memmove);
36 35
37/* semaphores */
38EXPORT_SYMBOL (__down);
39EXPORT_SYMBOL (__down_interruptible);
40EXPORT_SYMBOL (__down_trylock);
41EXPORT_SYMBOL (__up);
42
43/* 36/*
44 * libgcc functions - functions that are used internally by the 37 * libgcc functions - functions that are used internally by the
45 * compiler... (prototypes are not correct though, but that 38 * compiler... (prototypes are not correct though, but that
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 4e32b6f7d31a..701c4a27a731 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -53,9 +53,6 @@ config STACKTRACE_SUPPORT
53config HAVE_LATENCYTOP_SUPPORT 53config HAVE_LATENCYTOP_SUPPORT
54 def_bool y 54 def_bool y
55 55
56config SEMAPHORE_SLEEPERS
57 def_bool y
58
59config FAST_CMPXCHG_LOCAL 56config FAST_CMPXCHG_LOCAL
60 bool 57 bool
61 default y 58 default y
diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
index 061627806a2d..deb43785e923 100644
--- a/arch/x86/kernel/i386_ksyms_32.c
+++ b/arch/x86/kernel/i386_ksyms_32.c
@@ -1,13 +1,8 @@
1#include <linux/module.h> 1#include <linux/module.h>
2#include <asm/semaphore.h>
3#include <asm/checksum.h> 2#include <asm/checksum.h>
4#include <asm/desc.h> 3#include <asm/desc.h>
5#include <asm/pgtable.h> 4#include <asm/pgtable.h>
6 5
7EXPORT_SYMBOL(__down_failed);
8EXPORT_SYMBOL(__down_failed_interruptible);
9EXPORT_SYMBOL(__down_failed_trylock);
10EXPORT_SYMBOL(__up_wakeup);
11/* Networking helper routines. */ 6/* Networking helper routines. */
12EXPORT_SYMBOL(csum_partial_copy_generic); 7EXPORT_SYMBOL(csum_partial_copy_generic);
13 8
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
index e63d96823a16..58882f9f2637 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -4,7 +4,6 @@
4#include <linux/module.h> 4#include <linux/module.h>
5#include <linux/smp.h> 5#include <linux/smp.h>
6 6
7#include <asm/semaphore.h>
8#include <asm/processor.h> 7#include <asm/processor.h>
9#include <asm/uaccess.h> 8#include <asm/uaccess.h>
10#include <asm/pgtable.h> 9#include <asm/pgtable.h>
@@ -12,11 +11,6 @@
12 11
13EXPORT_SYMBOL(kernel_thread); 12EXPORT_SYMBOL(kernel_thread);
14 13
15EXPORT_SYMBOL(__down_failed);
16EXPORT_SYMBOL(__down_failed_interruptible);
17EXPORT_SYMBOL(__down_failed_trylock);
18EXPORT_SYMBOL(__up_wakeup);
19
20EXPORT_SYMBOL(__get_user_1); 14EXPORT_SYMBOL(__get_user_1);
21EXPORT_SYMBOL(__get_user_2); 15EXPORT_SYMBOL(__get_user_2);
22EXPORT_SYMBOL(__get_user_4); 16EXPORT_SYMBOL(__get_user_4);
diff --git a/arch/x86/lib/semaphore_32.S b/arch/x86/lib/semaphore_32.S
index 3899bd37fdf0..648fe4741782 100644
--- a/arch/x86/lib/semaphore_32.S
+++ b/arch/x86/lib/semaphore_32.S
@@ -30,89 +30,6 @@
30 * value or just clobbered.. 30 * value or just clobbered..
31 */ 31 */
32 .section .sched.text, "ax" 32 .section .sched.text, "ax"
33ENTRY(__down_failed)
34 CFI_STARTPROC
35 FRAME
36 pushl %edx
37 CFI_ADJUST_CFA_OFFSET 4
38 CFI_REL_OFFSET edx,0
39 pushl %ecx
40 CFI_ADJUST_CFA_OFFSET 4
41 CFI_REL_OFFSET ecx,0
42 call __down
43 popl %ecx
44 CFI_ADJUST_CFA_OFFSET -4
45 CFI_RESTORE ecx
46 popl %edx
47 CFI_ADJUST_CFA_OFFSET -4
48 CFI_RESTORE edx
49 ENDFRAME
50 ret
51 CFI_ENDPROC
52 ENDPROC(__down_failed)
53
54ENTRY(__down_failed_interruptible)
55 CFI_STARTPROC
56 FRAME
57 pushl %edx
58 CFI_ADJUST_CFA_OFFSET 4
59 CFI_REL_OFFSET edx,0
60 pushl %ecx
61 CFI_ADJUST_CFA_OFFSET 4
62 CFI_REL_OFFSET ecx,0
63 call __down_interruptible
64 popl %ecx
65 CFI_ADJUST_CFA_OFFSET -4
66 CFI_RESTORE ecx
67 popl %edx
68 CFI_ADJUST_CFA_OFFSET -4
69 CFI_RESTORE edx
70 ENDFRAME
71 ret
72 CFI_ENDPROC
73 ENDPROC(__down_failed_interruptible)
74
75ENTRY(__down_failed_trylock)
76 CFI_STARTPROC
77 FRAME
78 pushl %edx
79 CFI_ADJUST_CFA_OFFSET 4
80 CFI_REL_OFFSET edx,0
81 pushl %ecx
82 CFI_ADJUST_CFA_OFFSET 4
83 CFI_REL_OFFSET ecx,0
84 call __down_trylock
85 popl %ecx
86 CFI_ADJUST_CFA_OFFSET -4
87 CFI_RESTORE ecx
88 popl %edx
89 CFI_ADJUST_CFA_OFFSET -4
90 CFI_RESTORE edx
91 ENDFRAME
92 ret
93 CFI_ENDPROC
94 ENDPROC(__down_failed_trylock)
95
96ENTRY(__up_wakeup)
97 CFI_STARTPROC
98 FRAME
99 pushl %edx
100 CFI_ADJUST_CFA_OFFSET 4
101 CFI_REL_OFFSET edx,0
102 pushl %ecx
103 CFI_ADJUST_CFA_OFFSET 4
104 CFI_REL_OFFSET ecx,0
105 call __up
106 popl %ecx
107 CFI_ADJUST_CFA_OFFSET -4
108 CFI_RESTORE ecx
109 popl %edx
110 CFI_ADJUST_CFA_OFFSET -4
111 CFI_RESTORE edx
112 ENDFRAME
113 ret
114 CFI_ENDPROC
115 ENDPROC(__up_wakeup)
116 33
117/* 34/*
118 * rw spinlock fallbacks 35 * rw spinlock fallbacks
diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
index 8b92d428ab02..e009251d4e9f 100644
--- a/arch/x86/lib/thunk_64.S
+++ b/arch/x86/lib/thunk_64.S
@@ -41,11 +41,6 @@
41 thunk rwsem_downgrade_thunk,rwsem_downgrade_wake 41 thunk rwsem_downgrade_thunk,rwsem_downgrade_wake
42#endif 42#endif
43 43
44 thunk __down_failed,__down
45 thunk_retrax __down_failed_interruptible,__down_interruptible
46 thunk_retrax __down_failed_trylock,__down_trylock
47 thunk __up_wakeup,__up
48
49#ifdef CONFIG_TRACE_IRQFLAGS 44#ifdef CONFIG_TRACE_IRQFLAGS
50 thunk trace_hardirqs_on_thunk,trace_hardirqs_on 45 thunk trace_hardirqs_on_thunk,trace_hardirqs_on
51 thunk trace_hardirqs_off_thunk,trace_hardirqs_off 46 thunk trace_hardirqs_off_thunk,trace_hardirqs_off
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile
index f582d6a24ec2..7419dbccf027 100644
--- a/arch/xtensa/kernel/Makefile
+++ b/arch/xtensa/kernel/Makefile
@@ -5,7 +5,7 @@
5extra-y := head.o vmlinux.lds 5extra-y := head.o vmlinux.lds
6 6
7 7
8obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o semaphore.o \ 8obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o \
9 setup.o signal.o syscall.o time.o traps.o vectors.o platform.o \ 9 setup.o signal.o syscall.o time.o traps.o vectors.o platform.o \
10 pci-dma.o init_task.o io.o 10 pci-dma.o init_task.o io.o
11 11
diff --git a/arch/xtensa/kernel/semaphore.c b/arch/xtensa/kernel/semaphore.c
deleted file mode 100644
index 995c6410ae10..000000000000
--- a/arch/xtensa/kernel/semaphore.c
+++ /dev/null
@@ -1,226 +0,0 @@
1/*
2 * arch/xtensa/kernel/semaphore.c
3 *
4 * Generic semaphore code. Buyer beware. Do your own specific changes
5 * in <asm/semaphore-helper.h>
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 *
11 * Copyright (C) 2001 - 2005 Tensilica Inc.
12 *
13 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
14 * Chris Zankel <chris@zankel.net>
15 * Marc Gauthier<marc@tensilica.com, marc@alumni.uwaterloo.ca>
16 * Kevin Chea
17 */
18
19#include <linux/sched.h>
20#include <linux/wait.h>
21#include <linux/init.h>
22#include <asm/semaphore.h>
23#include <asm/errno.h>
24
25/*
26 * These two _must_ execute atomically wrt each other.
27 */
28
29static __inline__ void wake_one_more(struct semaphore * sem)
30{
31 atomic_inc((atomic_t *)&sem->sleepers);
32}
33
34static __inline__ int waking_non_zero(struct semaphore *sem)
35{
36 unsigned long flags;
37 int ret = 0;
38
39 spin_lock_irqsave(&semaphore_wake_lock, flags);
40 if (sem->sleepers > 0) {
41 sem->sleepers--;
42 ret = 1;
43 }
44 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
45 return ret;
46}
47
48/*
49 * waking_non_zero_interruptible:
50 * 1 got the lock
51 * 0 go to sleep
52 * -EINTR interrupted
53 *
54 * We must undo the sem->count down_interruptible() increment while we are
55 * protected by the spinlock in order to make atomic this atomic_inc() with the
56 * atomic_read() in wake_one_more(), otherwise we can race. -arca
57 */
58
59static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
60 struct task_struct *tsk)
61{
62 unsigned long flags;
63 int ret = 0;
64
65 spin_lock_irqsave(&semaphore_wake_lock, flags);
66 if (sem->sleepers > 0) {
67 sem->sleepers--;
68 ret = 1;
69 } else if (signal_pending(tsk)) {
70 atomic_inc(&sem->count);
71 ret = -EINTR;
72 }
73 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
74 return ret;
75}
76
77/*
78 * waking_non_zero_trylock:
79 * 1 failed to lock
80 * 0 got the lock
81 *
82 * We must undo the sem->count down_trylock() increment while we are
83 * protected by the spinlock in order to make atomic this atomic_inc() with the
84 * atomic_read() in wake_one_more(), otherwise we can race. -arca
85 */
86
87static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
88{
89 unsigned long flags;
90 int ret = 1;
91
92 spin_lock_irqsave(&semaphore_wake_lock, flags);
93 if (sem->sleepers <= 0)
94 atomic_inc(&sem->count);
95 else {
96 sem->sleepers--;
97 ret = 0;
98 }
99 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
100 return ret;
101}
102
103DEFINE_SPINLOCK(semaphore_wake_lock);
104
105/*
106 * Semaphores are implemented using a two-way counter:
107 * The "count" variable is decremented for each process
108 * that tries to sleep, while the "waking" variable is
109 * incremented when the "up()" code goes to wake up waiting
110 * processes.
111 *
112 * Notably, the inline "up()" and "down()" functions can
113 * efficiently test if they need to do any extra work (up
114 * needs to do something only if count was negative before
115 * the increment operation.
116 *
117 * waking_non_zero() (from asm/semaphore.h) must execute
118 * atomically.
119 *
120 * When __up() is called, the count was negative before
121 * incrementing it, and we need to wake up somebody.
122 *
123 * This routine adds one to the count of processes that need to
124 * wake up and exit. ALL waiting processes actually wake up but
125 * only the one that gets to the "waking" field first will gate
126 * through and acquire the semaphore. The others will go back
127 * to sleep.
128 *
129 * Note that these functions are only called when there is
130 * contention on the lock, and as such all this is the
131 * "non-critical" part of the whole semaphore business. The
132 * critical part is the inline stuff in <asm/semaphore.h>
133 * where we want to avoid any extra jumps and calls.
134 */
135
136void __up(struct semaphore *sem)
137{
138 wake_one_more(sem);
139 wake_up(&sem->wait);
140}
141
142/*
143 * Perform the "down" function. Return zero for semaphore acquired,
144 * return negative for signalled out of the function.
145 *
146 * If called from __down, the return is ignored and the wait loop is
147 * not interruptible. This means that a task waiting on a semaphore
148 * using "down()" cannot be killed until someone does an "up()" on
149 * the semaphore.
150 *
151 * If called from __down_interruptible, the return value gets checked
152 * upon return. If the return value is negative then the task continues
153 * with the negative value in the return register (it can be tested by
154 * the caller).
155 *
156 * Either form may be used in conjunction with "up()".
157 *
158 */
159
160#define DOWN_VAR \
161 struct task_struct *tsk = current; \
162 wait_queue_t wait; \
163 init_waitqueue_entry(&wait, tsk);
164
165#define DOWN_HEAD(task_state) \
166 \
167 \
168 tsk->state = (task_state); \
169 add_wait_queue(&sem->wait, &wait); \
170 \
171 /* \
172 * Ok, we're set up. sem->count is known to be less than zero \
173 * so we must wait. \
174 * \
175 * We can let go the lock for purposes of waiting. \
176 * We re-acquire it after awaking so as to protect \
177 * all semaphore operations. \
178 * \
179 * If "up()" is called before we call waking_non_zero() then \
180 * we will catch it right away. If it is called later then \
181 * we will have to go through a wakeup cycle to catch it. \
182 * \
183 * Multiple waiters contend for the semaphore lock to see \
184 * who gets to gate through and who has to wait some more. \
185 */ \
186 for (;;) {
187
188#define DOWN_TAIL(task_state) \
189 tsk->state = (task_state); \
190 } \
191 tsk->state = TASK_RUNNING; \
192 remove_wait_queue(&sem->wait, &wait);
193
194void __sched __down(struct semaphore * sem)
195{
196 DOWN_VAR
197 DOWN_HEAD(TASK_UNINTERRUPTIBLE)
198 if (waking_non_zero(sem))
199 break;
200 schedule();
201 DOWN_TAIL(TASK_UNINTERRUPTIBLE)
202}
203
204int __sched __down_interruptible(struct semaphore * sem)
205{
206 int ret = 0;
207 DOWN_VAR
208 DOWN_HEAD(TASK_INTERRUPTIBLE)
209
210 ret = waking_non_zero_interruptible(sem, tsk);
211 if (ret)
212 {
213 if (ret == 1)
214 /* ret != 0 only if we get interrupted -arca */
215 ret = 0;
216 break;
217 }
218 schedule();
219 DOWN_TAIL(TASK_INTERRUPTIBLE)
220 return ret;
221}
222
223int __down_trylock(struct semaphore * sem)
224{
225 return waking_non_zero_trylock(sem);
226}
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
index 60dbdb43fb4c..6e52cdd6166f 100644
--- a/arch/xtensa/kernel/xtensa_ksyms.c
+++ b/arch/xtensa/kernel/xtensa_ksyms.c
@@ -26,7 +26,6 @@
26#include <asm/io.h> 26#include <asm/io.h>
27#include <asm/page.h> 27#include <asm/page.h>
28#include <asm/pgalloc.h> 28#include <asm/pgalloc.h>
29#include <asm/semaphore.h>
30#ifdef CONFIG_BLK_DEV_FD 29#ifdef CONFIG_BLK_DEV_FD
31#include <asm/floppy.h> 30#include <asm/floppy.h>
32#endif 31#endif
@@ -71,14 +70,6 @@ EXPORT_SYMBOL(__umodsi3);
71EXPORT_SYMBOL(__udivdi3); 70EXPORT_SYMBOL(__udivdi3);
72EXPORT_SYMBOL(__umoddi3); 71EXPORT_SYMBOL(__umoddi3);
73 72
74/*
75 * Semaphore operations
76 */
77EXPORT_SYMBOL(__down);
78EXPORT_SYMBOL(__down_interruptible);
79EXPORT_SYMBOL(__down_trylock);
80EXPORT_SYMBOL(__up);
81
82#ifdef CONFIG_NET 73#ifdef CONFIG_NET
83/* 74/*
84 * Networking support 75 * Networking support
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index a697fb6cf050..a498a6cc68fe 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -4,6 +4,8 @@
4 * Copyright (C) 2000 Andrew Henroid 4 * Copyright (C) 2000 Andrew Henroid
5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7 * Copyright (c) 2008 Intel Corporation
8 * Author: Matthew Wilcox <willy@linux.intel.com>
7 * 9 *
8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9 * 11 *
@@ -37,15 +39,18 @@
37#include <linux/workqueue.h> 39#include <linux/workqueue.h>
38#include <linux/nmi.h> 40#include <linux/nmi.h>
39#include <linux/acpi.h> 41#include <linux/acpi.h>
40#include <acpi/acpi.h>
41#include <asm/io.h>
42#include <acpi/acpi_bus.h>
43#include <acpi/processor.h>
44#include <asm/uaccess.h>
45
46#include <linux/efi.h> 42#include <linux/efi.h>
47#include <linux/ioport.h> 43#include <linux/ioport.h>
48#include <linux/list.h> 44#include <linux/list.h>
45#include <linux/jiffies.h>
46#include <linux/semaphore.h>
47
48#include <asm/io.h>
49#include <asm/uaccess.h>
50
51#include <acpi/acpi.h>
52#include <acpi/acpi_bus.h>
53#include <acpi/processor.h>
49 54
50#define _COMPONENT ACPI_OS_SERVICES 55#define _COMPONENT ACPI_OS_SERVICES
51ACPI_MODULE_NAME("osl"); 56ACPI_MODULE_NAME("osl");
@@ -764,7 +769,6 @@ acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
764{ 769{
765 struct semaphore *sem = NULL; 770 struct semaphore *sem = NULL;
766 771
767
768 sem = acpi_os_allocate(sizeof(struct semaphore)); 772 sem = acpi_os_allocate(sizeof(struct semaphore));
769 if (!sem) 773 if (!sem)
770 return AE_NO_MEMORY; 774 return AE_NO_MEMORY;
@@ -791,12 +795,12 @@ acpi_status acpi_os_delete_semaphore(acpi_handle handle)
791{ 795{
792 struct semaphore *sem = (struct semaphore *)handle; 796 struct semaphore *sem = (struct semaphore *)handle;
793 797
794
795 if (!sem) 798 if (!sem)
796 return AE_BAD_PARAMETER; 799 return AE_BAD_PARAMETER;
797 800
798 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle)); 801 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
799 802
803 BUG_ON(!list_empty(&sem->wait_list));
800 kfree(sem); 804 kfree(sem);
801 sem = NULL; 805 sem = NULL;
802 806
@@ -804,21 +808,15 @@ acpi_status acpi_os_delete_semaphore(acpi_handle handle)
804} 808}
805 809
806/* 810/*
807 * TODO: The kernel doesn't have a 'down_timeout' function -- had to
808 * improvise. The process is to sleep for one scheduler quantum
809 * until the semaphore becomes available. Downside is that this
810 * may result in starvation for timeout-based waits when there's
811 * lots of semaphore activity.
812 *
813 * TODO: Support for units > 1? 811 * TODO: Support for units > 1?
814 */ 812 */
815acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout) 813acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
816{ 814{
817 acpi_status status = AE_OK; 815 acpi_status status = AE_OK;
818 struct semaphore *sem = (struct semaphore *)handle; 816 struct semaphore *sem = (struct semaphore *)handle;
817 long jiffies;
819 int ret = 0; 818 int ret = 0;
820 819
821
822 if (!sem || (units < 1)) 820 if (!sem || (units < 1))
823 return AE_BAD_PARAMETER; 821 return AE_BAD_PARAMETER;
824 822
@@ -828,58 +826,14 @@ acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
828 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n", 826 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
829 handle, units, timeout)); 827 handle, units, timeout));
830 828
831 /* 829 if (timeout == ACPI_WAIT_FOREVER)
832 * This can be called during resume with interrupts off. 830 jiffies = MAX_SCHEDULE_TIMEOUT;
833 * Like boot-time, we should be single threaded and will 831 else
834 * always get the lock if we try -- timeout or not. 832 jiffies = msecs_to_jiffies(timeout);
835 * If this doesn't succeed, then we will oops courtesy of 833
836 * might_sleep() in down(). 834 ret = down_timeout(sem, jiffies);
837 */ 835 if (ret)
838 if (!down_trylock(sem)) 836 status = AE_TIME;
839 return AE_OK;
840
841 switch (timeout) {
842 /*
843 * No Wait:
844 * --------
845 * A zero timeout value indicates that we shouldn't wait - just
846 * acquire the semaphore if available otherwise return AE_TIME
847 * (a.k.a. 'would block').
848 */
849 case 0:
850 if (down_trylock(sem))
851 status = AE_TIME;
852 break;
853
854 /*
855 * Wait Indefinitely:
856 * ------------------
857 */
858 case ACPI_WAIT_FOREVER:
859 down(sem);
860 break;
861
862 /*
863 * Wait w/ Timeout:
864 * ----------------
865 */
866 default:
867 // TODO: A better timeout algorithm?
868 {
869 int i = 0;
870 static const int quantum_ms = 1000 / HZ;
871
872 ret = down_trylock(sem);
873 for (i = timeout; (i > 0 && ret != 0); i -= quantum_ms) {
874 schedule_timeout_interruptible(1);
875 ret = down_trylock(sem);
876 }
877
878 if (ret != 0)
879 status = AE_TIME;
880 }
881 break;
882 }
883 837
884 if (ACPI_FAILURE(status)) { 838 if (ACPI_FAILURE(status)) {
885 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 839 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
@@ -902,7 +856,6 @@ acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
902{ 856{
903 struct semaphore *sem = (struct semaphore *)handle; 857 struct semaphore *sem = (struct semaphore *)handle;
904 858
905
906 if (!sem || (units < 1)) 859 if (!sem || (units < 1))
907 return AE_BAD_PARAMETER; 860 return AE_BAD_PARAMETER;
908 861
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 84cdf9025737..349b6edc5794 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -116,6 +116,10 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
116 err = -EAGAIN; 116 err = -EAGAIN;
117 if (!bytes_read && (filp->f_flags & O_NONBLOCK)) 117 if (!bytes_read && (filp->f_flags & O_NONBLOCK))
118 goto out; 118 goto out;
119 if (bytes_read < 0) {
120 err = bytes_read;
121 goto out;
122 }
119 123
120 err = -EFAULT; 124 err = -EFAULT;
121 while (bytes_read && size) { 125 while (bytes_read && size) {
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 6b658d84d521..6d2f0c8d419a 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -64,6 +64,7 @@ config ZCRYPT
64 tristate "Support for PCI-attached cryptographic adapters" 64 tristate "Support for PCI-attached cryptographic adapters"
65 depends on S390 65 depends on S390
66 select ZCRYPT_MONOLITHIC if ZCRYPT="y" 66 select ZCRYPT_MONOLITHIC if ZCRYPT="y"
67 select HW_RANDOM
67 help 68 help
68 Select this option if you want to use a PCI-attached cryptographic 69 Select this option if you want to use a PCI-attached cryptographic
69 adapter like: 70 adapter like:
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index faa7ce318a6d..a47fe64e5c39 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -467,6 +467,31 @@ static int cm_compare_private_data(u8 *private_data,
467 return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE); 467 return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE);
468} 468}
469 469
470/*
471 * Trivial helpers to strip endian annotation and compare; the
472 * endianness doesn't actually matter since we just need a stable
473 * order for the RB tree.
474 */
475static int be32_lt(__be32 a, __be32 b)
476{
477 return (__force u32) a < (__force u32) b;
478}
479
480static int be32_gt(__be32 a, __be32 b)
481{
482 return (__force u32) a > (__force u32) b;
483}
484
485static int be64_lt(__be64 a, __be64 b)
486{
487 return (__force u64) a < (__force u64) b;
488}
489
490static int be64_gt(__be64 a, __be64 b)
491{
492 return (__force u64) a > (__force u64) b;
493}
494
470static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv) 495static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
471{ 496{
472 struct rb_node **link = &cm.listen_service_table.rb_node; 497 struct rb_node **link = &cm.listen_service_table.rb_node;
@@ -492,9 +517,9 @@ static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
492 link = &(*link)->rb_left; 517 link = &(*link)->rb_left;
493 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device) 518 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
494 link = &(*link)->rb_right; 519 link = &(*link)->rb_right;
495 else if (service_id < cur_cm_id_priv->id.service_id) 520 else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
496 link = &(*link)->rb_left; 521 link = &(*link)->rb_left;
497 else if (service_id > cur_cm_id_priv->id.service_id) 522 else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
498 link = &(*link)->rb_right; 523 link = &(*link)->rb_right;
499 else if (data_cmp < 0) 524 else if (data_cmp < 0)
500 link = &(*link)->rb_left; 525 link = &(*link)->rb_left;
@@ -527,9 +552,9 @@ static struct cm_id_private * cm_find_listen(struct ib_device *device,
527 node = node->rb_left; 552 node = node->rb_left;
528 else if (device > cm_id_priv->id.device) 553 else if (device > cm_id_priv->id.device)
529 node = node->rb_right; 554 node = node->rb_right;
530 else if (service_id < cm_id_priv->id.service_id) 555 else if (be64_lt(service_id, cm_id_priv->id.service_id))
531 node = node->rb_left; 556 node = node->rb_left;
532 else if (service_id > cm_id_priv->id.service_id) 557 else if (be64_gt(service_id, cm_id_priv->id.service_id))
533 node = node->rb_right; 558 node = node->rb_right;
534 else if (data_cmp < 0) 559 else if (data_cmp < 0)
535 node = node->rb_left; 560 node = node->rb_left;
@@ -552,13 +577,13 @@ static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
552 parent = *link; 577 parent = *link;
553 cur_timewait_info = rb_entry(parent, struct cm_timewait_info, 578 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
554 remote_id_node); 579 remote_id_node);
555 if (remote_id < cur_timewait_info->work.remote_id) 580 if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
556 link = &(*link)->rb_left; 581 link = &(*link)->rb_left;
557 else if (remote_id > cur_timewait_info->work.remote_id) 582 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
558 link = &(*link)->rb_right; 583 link = &(*link)->rb_right;
559 else if (remote_ca_guid < cur_timewait_info->remote_ca_guid) 584 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
560 link = &(*link)->rb_left; 585 link = &(*link)->rb_left;
561 else if (remote_ca_guid > cur_timewait_info->remote_ca_guid) 586 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
562 link = &(*link)->rb_right; 587 link = &(*link)->rb_right;
563 else 588 else
564 return cur_timewait_info; 589 return cur_timewait_info;
@@ -578,13 +603,13 @@ static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
578 while (node) { 603 while (node) {
579 timewait_info = rb_entry(node, struct cm_timewait_info, 604 timewait_info = rb_entry(node, struct cm_timewait_info,
580 remote_id_node); 605 remote_id_node);
581 if (remote_id < timewait_info->work.remote_id) 606 if (be32_lt(remote_id, timewait_info->work.remote_id))
582 node = node->rb_left; 607 node = node->rb_left;
583 else if (remote_id > timewait_info->work.remote_id) 608 else if (be32_gt(remote_id, timewait_info->work.remote_id))
584 node = node->rb_right; 609 node = node->rb_right;
585 else if (remote_ca_guid < timewait_info->remote_ca_guid) 610 else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid))
586 node = node->rb_left; 611 node = node->rb_left;
587 else if (remote_ca_guid > timewait_info->remote_ca_guid) 612 else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
588 node = node->rb_right; 613 node = node->rb_right;
589 else 614 else
590 return timewait_info; 615 return timewait_info;
@@ -605,13 +630,13 @@ static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
605 parent = *link; 630 parent = *link;
606 cur_timewait_info = rb_entry(parent, struct cm_timewait_info, 631 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
607 remote_qp_node); 632 remote_qp_node);
608 if (remote_qpn < cur_timewait_info->remote_qpn) 633 if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn))
609 link = &(*link)->rb_left; 634 link = &(*link)->rb_left;
610 else if (remote_qpn > cur_timewait_info->remote_qpn) 635 else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn))
611 link = &(*link)->rb_right; 636 link = &(*link)->rb_right;
612 else if (remote_ca_guid < cur_timewait_info->remote_ca_guid) 637 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
613 link = &(*link)->rb_left; 638 link = &(*link)->rb_left;
614 else if (remote_ca_guid > cur_timewait_info->remote_ca_guid) 639 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
615 link = &(*link)->rb_right; 640 link = &(*link)->rb_right;
616 else 641 else
617 return cur_timewait_info; 642 return cur_timewait_info;
@@ -635,9 +660,9 @@ static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
635 parent = *link; 660 parent = *link;
636 cur_cm_id_priv = rb_entry(parent, struct cm_id_private, 661 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
637 sidr_id_node); 662 sidr_id_node);
638 if (remote_id < cur_cm_id_priv->id.remote_id) 663 if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id))
639 link = &(*link)->rb_left; 664 link = &(*link)->rb_left;
640 else if (remote_id > cur_cm_id_priv->id.remote_id) 665 else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
641 link = &(*link)->rb_right; 666 link = &(*link)->rb_right;
642 else { 667 else {
643 int cmp; 668 int cmp;
@@ -2848,7 +2873,7 @@ static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
2848 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID, 2873 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
2849 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR)); 2874 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
2850 sidr_req_msg->request_id = cm_id_priv->id.local_id; 2875 sidr_req_msg->request_id = cm_id_priv->id.local_id;
2851 sidr_req_msg->pkey = cpu_to_be16(param->path->pkey); 2876 sidr_req_msg->pkey = param->path->pkey;
2852 sidr_req_msg->service_id = param->service_id; 2877 sidr_req_msg->service_id = param->service_id;
2853 2878
2854 if (param->private_data && param->private_data_len) 2879 if (param->private_data && param->private_data_len)
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index d81c156a22b4..671f13738054 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1289,7 +1289,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1289 new_cm_id = rdma_create_id(listen_id->id.event_handler, 1289 new_cm_id = rdma_create_id(listen_id->id.event_handler,
1290 listen_id->id.context, 1290 listen_id->id.context,
1291 RDMA_PS_TCP); 1291 RDMA_PS_TCP);
1292 if (!new_cm_id) { 1292 if (IS_ERR(new_cm_id)) {
1293 ret = -ENOMEM; 1293 ret = -ENOMEM;
1294 goto out; 1294 goto out;
1295 } 1295 }
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index 06d502c06a4d..1286dc1b98b2 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -158,8 +158,7 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
158#endif 158#endif
159 } 159 }
160 160
161 list_splice(&pool->dirty_list, &unmap_list); 161 list_splice_init(&pool->dirty_list, &unmap_list);
162 INIT_LIST_HEAD(&pool->dirty_list);
163 pool->dirty_len = 0; 162 pool->dirty_len = 0;
164 163
165 spin_unlock_irq(&pool->pool_lock); 164 spin_unlock_irq(&pool->pool_lock);
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 15937eb38aae..ca4cf3a511ab 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -614,7 +614,7 @@ static ssize_t ucma_query_route(struct ucma_file *file,
614 if (!ctx->cm_id->device) 614 if (!ctx->cm_id->device)
615 goto out; 615 goto out;
616 616
617 resp.node_guid = ctx->cm_id->device->node_guid; 617 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
618 resp.port_num = ctx->cm_id->port_num; 618 resp.port_num = ctx->cm_id->port_num;
619 switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) { 619 switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
620 case RDMA_TRANSPORT_IB: 620 case RDMA_TRANSPORT_IB:
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index c75eb6c9bd49..2cad8b4b5292 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -81,13 +81,13 @@ struct ib_uverbs_device {
81 81
82struct ib_uverbs_event_file { 82struct ib_uverbs_event_file {
83 struct kref ref; 83 struct kref ref;
84 struct file *file;
85 struct ib_uverbs_file *uverbs_file; 84 struct ib_uverbs_file *uverbs_file;
86 spinlock_t lock; 85 spinlock_t lock;
87 int is_async;
88 wait_queue_head_t poll_wait; 86 wait_queue_head_t poll_wait;
89 struct fasync_struct *async_queue; 87 struct fasync_struct *async_queue;
90 struct list_head event_list; 88 struct list_head event_list;
89 int is_async;
90 int is_closed;
91}; 91};
92 92
93struct ib_uverbs_file { 93struct ib_uverbs_file {
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 495c803fb11d..2c3bff5fe867 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1065,6 +1065,7 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1065 attr.srq = srq; 1065 attr.srq = srq;
1066 attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; 1066 attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
1067 attr.qp_type = cmd.qp_type; 1067 attr.qp_type = cmd.qp_type;
1068 attr.create_flags = 0;
1068 1069
1069 attr.cap.max_send_wr = cmd.max_send_wr; 1070 attr.cap.max_send_wr = cmd.max_send_wr;
1070 attr.cap.max_recv_wr = cmd.max_recv_wr; 1071 attr.cap.max_recv_wr = cmd.max_recv_wr;
@@ -1462,7 +1463,6 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
1462 next->num_sge = user_wr->num_sge; 1463 next->num_sge = user_wr->num_sge;
1463 next->opcode = user_wr->opcode; 1464 next->opcode = user_wr->opcode;
1464 next->send_flags = user_wr->send_flags; 1465 next->send_flags = user_wr->send_flags;
1465 next->imm_data = (__be32 __force) user_wr->imm_data;
1466 1466
1467 if (is_ud) { 1467 if (is_ud) {
1468 next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah, 1468 next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
@@ -1475,14 +1475,24 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
1475 next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey; 1475 next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey;
1476 } else { 1476 } else {
1477 switch (next->opcode) { 1477 switch (next->opcode) {
1478 case IB_WR_RDMA_WRITE:
1479 case IB_WR_RDMA_WRITE_WITH_IMM: 1478 case IB_WR_RDMA_WRITE_WITH_IMM:
1479 next->ex.imm_data =
1480 (__be32 __force) user_wr->ex.imm_data;
1481 case IB_WR_RDMA_WRITE:
1480 case IB_WR_RDMA_READ: 1482 case IB_WR_RDMA_READ:
1481 next->wr.rdma.remote_addr = 1483 next->wr.rdma.remote_addr =
1482 user_wr->wr.rdma.remote_addr; 1484 user_wr->wr.rdma.remote_addr;
1483 next->wr.rdma.rkey = 1485 next->wr.rdma.rkey =
1484 user_wr->wr.rdma.rkey; 1486 user_wr->wr.rdma.rkey;
1485 break; 1487 break;
1488 case IB_WR_SEND_WITH_IMM:
1489 next->ex.imm_data =
1490 (__be32 __force) user_wr->ex.imm_data;
1491 break;
1492 case IB_WR_SEND_WITH_INV:
1493 next->ex.invalidate_rkey =
1494 user_wr->ex.invalidate_rkey;
1495 break;
1486 case IB_WR_ATOMIC_CMP_AND_SWP: 1496 case IB_WR_ATOMIC_CMP_AND_SWP:
1487 case IB_WR_ATOMIC_FETCH_AND_ADD: 1497 case IB_WR_ATOMIC_FETCH_AND_ADD:
1488 next->wr.atomic.remote_addr = 1498 next->wr.atomic.remote_addr =
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 7c2ac3905582..f49f94653a96 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -352,7 +352,7 @@ static int ib_uverbs_event_close(struct inode *inode, struct file *filp)
352 struct ib_uverbs_event *entry, *tmp; 352 struct ib_uverbs_event *entry, *tmp;
353 353
354 spin_lock_irq(&file->lock); 354 spin_lock_irq(&file->lock);
355 file->file = NULL; 355 file->is_closed = 1;
356 list_for_each_entry_safe(entry, tmp, &file->event_list, list) { 356 list_for_each_entry_safe(entry, tmp, &file->event_list, list) {
357 if (entry->counter) 357 if (entry->counter)
358 list_del(&entry->obj_list); 358 list_del(&entry->obj_list);
@@ -390,7 +390,7 @@ void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
390 return; 390 return;
391 391
392 spin_lock_irqsave(&file->lock, flags); 392 spin_lock_irqsave(&file->lock, flags);
393 if (!file->file) { 393 if (file->is_closed) {
394 spin_unlock_irqrestore(&file->lock, flags); 394 spin_unlock_irqrestore(&file->lock, flags);
395 return; 395 return;
396 } 396 }
@@ -423,7 +423,7 @@ static void ib_uverbs_async_handler(struct ib_uverbs_file *file,
423 unsigned long flags; 423 unsigned long flags;
424 424
425 spin_lock_irqsave(&file->async_file->lock, flags); 425 spin_lock_irqsave(&file->async_file->lock, flags);
426 if (!file->async_file->file) { 426 if (!file->async_file->is_closed) {
427 spin_unlock_irqrestore(&file->async_file->lock, flags); 427 spin_unlock_irqrestore(&file->async_file->lock, flags);
428 return; 428 return;
429 } 429 }
@@ -509,6 +509,7 @@ struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
509 ev_file->uverbs_file = uverbs_file; 509 ev_file->uverbs_file = uverbs_file;
510 ev_file->async_queue = NULL; 510 ev_file->async_queue = NULL;
511 ev_file->is_async = is_async; 511 ev_file->is_async = is_async;
512 ev_file->is_closed = 0;
512 513
513 *fd = get_unused_fd(); 514 *fd = get_unused_fd();
514 if (*fd < 0) { 515 if (*fd < 0) {
@@ -516,25 +517,18 @@ struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
516 goto err; 517 goto err;
517 } 518 }
518 519
519 filp = get_empty_filp();
520 if (!filp) {
521 ret = -ENFILE;
522 goto err_fd;
523 }
524
525 ev_file->file = filp;
526
527 /* 520 /*
528 * fops_get() can't fail here, because we're coming from a 521 * fops_get() can't fail here, because we're coming from a
529 * system call on a uverbs file, which will already have a 522 * system call on a uverbs file, which will already have a
530 * module reference. 523 * module reference.
531 */ 524 */
532 filp->f_op = fops_get(&uverbs_event_fops); 525 filp = alloc_file(uverbs_event_mnt, dget(uverbs_event_mnt->mnt_root),
533 filp->f_path.mnt = mntget(uverbs_event_mnt); 526 FMODE_READ, fops_get(&uverbs_event_fops));
534 filp->f_path.dentry = dget(uverbs_event_mnt->mnt_root); 527 if (!filp) {
535 filp->f_mapping = filp->f_path.dentry->d_inode->i_mapping; 528 ret = -ENFILE;
536 filp->f_flags = O_RDONLY; 529 goto err_fd;
537 filp->f_mode = FMODE_READ; 530 }
531
538 filp->private_data = ev_file; 532 filp->private_data = ev_file;
539 533
540 return filp; 534 return filp;
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 86ed8af9c7e6..05042089de6e 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -248,7 +248,9 @@ int ib_modify_srq(struct ib_srq *srq,
248 struct ib_srq_attr *srq_attr, 248 struct ib_srq_attr *srq_attr,
249 enum ib_srq_attr_mask srq_attr_mask) 249 enum ib_srq_attr_mask srq_attr_mask)
250{ 250{
251 return srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL); 251 return srq->device->modify_srq ?
252 srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) :
253 -ENOSYS;
252} 254}
253EXPORT_SYMBOL(ib_modify_srq); 255EXPORT_SYMBOL(ib_modify_srq);
254 256
@@ -628,6 +630,13 @@ struct ib_cq *ib_create_cq(struct ib_device *device,
628} 630}
629EXPORT_SYMBOL(ib_create_cq); 631EXPORT_SYMBOL(ib_create_cq);
630 632
633int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
634{
635 return cq->device->modify_cq ?
636 cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
637}
638EXPORT_SYMBOL(ib_modify_cq);
639
631int ib_destroy_cq(struct ib_cq *cq) 640int ib_destroy_cq(struct ib_cq *cq)
632{ 641{
633 if (atomic_read(&cq->usecnt)) 642 if (atomic_read(&cq->usecnt))
@@ -672,6 +681,9 @@ struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
672{ 681{
673 struct ib_mr *mr; 682 struct ib_mr *mr;
674 683
684 if (!pd->device->reg_phys_mr)
685 return ERR_PTR(-ENOSYS);
686
675 mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf, 687 mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
676 mr_access_flags, iova_start); 688 mr_access_flags, iova_start);
677 689
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c
index f283a9f0c23b..113f3c03c5b5 100644
--- a/drivers/infiniband/hw/amso1100/c2.c
+++ b/drivers/infiniband/hw/amso1100/c2.c
@@ -130,10 +130,10 @@ static int c2_tx_ring_alloc(struct c2_ring *tx_ring, void *vaddr,
130 tx_desc->status = 0; 130 tx_desc->status = 0;
131 131
132 /* Set TXP_HTXD_UNINIT */ 132 /* Set TXP_HTXD_UNINIT */
133 __raw_writeq(cpu_to_be64(0x1122334455667788ULL), 133 __raw_writeq((__force u64) cpu_to_be64(0x1122334455667788ULL),
134 (void __iomem *) txp_desc + C2_TXP_ADDR); 134 (void __iomem *) txp_desc + C2_TXP_ADDR);
135 __raw_writew(0, (void __iomem *) txp_desc + C2_TXP_LEN); 135 __raw_writew(0, (void __iomem *) txp_desc + C2_TXP_LEN);
136 __raw_writew(cpu_to_be16(TXP_HTXD_UNINIT), 136 __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_UNINIT),
137 (void __iomem *) txp_desc + C2_TXP_FLAGS); 137 (void __iomem *) txp_desc + C2_TXP_FLAGS);
138 138
139 elem->skb = NULL; 139 elem->skb = NULL;
@@ -179,13 +179,13 @@ static int c2_rx_ring_alloc(struct c2_ring *rx_ring, void *vaddr,
179 rx_desc->status = 0; 179 rx_desc->status = 0;
180 180
181 /* Set RXP_HRXD_UNINIT */ 181 /* Set RXP_HRXD_UNINIT */
182 __raw_writew(cpu_to_be16(RXP_HRXD_OK), 182 __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_OK),
183 (void __iomem *) rxp_desc + C2_RXP_STATUS); 183 (void __iomem *) rxp_desc + C2_RXP_STATUS);
184 __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_COUNT); 184 __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_COUNT);
185 __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_LEN); 185 __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_LEN);
186 __raw_writeq(cpu_to_be64(0x99aabbccddeeffULL), 186 __raw_writeq((__force u64) cpu_to_be64(0x99aabbccddeeffULL),
187 (void __iomem *) rxp_desc + C2_RXP_ADDR); 187 (void __iomem *) rxp_desc + C2_RXP_ADDR);
188 __raw_writew(cpu_to_be16(RXP_HRXD_UNINIT), 188 __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_UNINIT),
189 (void __iomem *) rxp_desc + C2_RXP_FLAGS); 189 (void __iomem *) rxp_desc + C2_RXP_FLAGS);
190 190
191 elem->skb = NULL; 191 elem->skb = NULL;
@@ -239,10 +239,11 @@ static inline int c2_rx_alloc(struct c2_port *c2_port, struct c2_element *elem)
239 rxp_hdr->flags = RXP_HRXD_READY; 239 rxp_hdr->flags = RXP_HRXD_READY;
240 240
241 __raw_writew(0, elem->hw_desc + C2_RXP_STATUS); 241 __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
242 __raw_writew(cpu_to_be16((u16) maplen - sizeof(*rxp_hdr)), 242 __raw_writew((__force u16) cpu_to_be16((u16) maplen - sizeof(*rxp_hdr)),
243 elem->hw_desc + C2_RXP_LEN); 243 elem->hw_desc + C2_RXP_LEN);
244 __raw_writeq(cpu_to_be64(mapaddr), elem->hw_desc + C2_RXP_ADDR); 244 __raw_writeq((__force u64) cpu_to_be64(mapaddr), elem->hw_desc + C2_RXP_ADDR);
245 __raw_writew(cpu_to_be16(RXP_HRXD_READY), elem->hw_desc + C2_RXP_FLAGS); 245 __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
246 elem->hw_desc + C2_RXP_FLAGS);
246 247
247 elem->skb = skb; 248 elem->skb = skb;
248 elem->mapaddr = mapaddr; 249 elem->mapaddr = mapaddr;
@@ -290,9 +291,9 @@ static void c2_rx_clean(struct c2_port *c2_port)
290 __raw_writew(0, elem->hw_desc + C2_RXP_STATUS); 291 __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
291 __raw_writew(0, elem->hw_desc + C2_RXP_COUNT); 292 __raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
292 __raw_writew(0, elem->hw_desc + C2_RXP_LEN); 293 __raw_writew(0, elem->hw_desc + C2_RXP_LEN);
293 __raw_writeq(cpu_to_be64(0x99aabbccddeeffULL), 294 __raw_writeq((__force u64) cpu_to_be64(0x99aabbccddeeffULL),
294 elem->hw_desc + C2_RXP_ADDR); 295 elem->hw_desc + C2_RXP_ADDR);
295 __raw_writew(cpu_to_be16(RXP_HRXD_UNINIT), 296 __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_UNINIT),
296 elem->hw_desc + C2_RXP_FLAGS); 297 elem->hw_desc + C2_RXP_FLAGS);
297 298
298 if (elem->skb) { 299 if (elem->skb) {
@@ -346,16 +347,16 @@ static void c2_tx_clean(struct c2_port *c2_port)
346 elem->hw_desc + C2_TXP_LEN); 347 elem->hw_desc + C2_TXP_LEN);
347 __raw_writeq(0, 348 __raw_writeq(0,
348 elem->hw_desc + C2_TXP_ADDR); 349 elem->hw_desc + C2_TXP_ADDR);
349 __raw_writew(cpu_to_be16(TXP_HTXD_DONE), 350 __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_DONE),
350 elem->hw_desc + C2_TXP_FLAGS); 351 elem->hw_desc + C2_TXP_FLAGS);
351 c2_port->netstats.tx_dropped++; 352 c2_port->netstats.tx_dropped++;
352 break; 353 break;
353 } else { 354 } else {
354 __raw_writew(0, 355 __raw_writew(0,
355 elem->hw_desc + C2_TXP_LEN); 356 elem->hw_desc + C2_TXP_LEN);
356 __raw_writeq(cpu_to_be64(0x1122334455667788ULL), 357 __raw_writeq((__force u64) cpu_to_be64(0x1122334455667788ULL),
357 elem->hw_desc + C2_TXP_ADDR); 358 elem->hw_desc + C2_TXP_ADDR);
358 __raw_writew(cpu_to_be16(TXP_HTXD_UNINIT), 359 __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_UNINIT),
359 elem->hw_desc + C2_TXP_FLAGS); 360 elem->hw_desc + C2_TXP_FLAGS);
360 } 361 }
361 362
@@ -390,7 +391,7 @@ static void c2_tx_interrupt(struct net_device *netdev)
390 for (elem = tx_ring->to_clean; elem != tx_ring->to_use; 391 for (elem = tx_ring->to_clean; elem != tx_ring->to_use;
391 elem = elem->next) { 392 elem = elem->next) {
392 txp_htxd.flags = 393 txp_htxd.flags =
393 be16_to_cpu(readw(elem->hw_desc + C2_TXP_FLAGS)); 394 be16_to_cpu((__force __be16) readw(elem->hw_desc + C2_TXP_FLAGS));
394 395
395 if (txp_htxd.flags != TXP_HTXD_DONE) 396 if (txp_htxd.flags != TXP_HTXD_DONE)
396 break; 397 break;
@@ -398,7 +399,7 @@ static void c2_tx_interrupt(struct net_device *netdev)
398 if (netif_msg_tx_done(c2_port)) { 399 if (netif_msg_tx_done(c2_port)) {
399 /* PCI reads are expensive in fast path */ 400 /* PCI reads are expensive in fast path */
400 txp_htxd.len = 401 txp_htxd.len =
401 be16_to_cpu(readw(elem->hw_desc + C2_TXP_LEN)); 402 be16_to_cpu((__force __be16) readw(elem->hw_desc + C2_TXP_LEN));
402 pr_debug("%s: tx done slot %3Zu status 0x%x len " 403 pr_debug("%s: tx done slot %3Zu status 0x%x len "
403 "%5u bytes\n", 404 "%5u bytes\n",
404 netdev->name, elem - tx_ring->start, 405 netdev->name, elem - tx_ring->start,
@@ -448,10 +449,12 @@ static void c2_rx_error(struct c2_port *c2_port, struct c2_element *elem)
448 /* Write the descriptor to the adapter's rx ring */ 449 /* Write the descriptor to the adapter's rx ring */
449 __raw_writew(0, elem->hw_desc + C2_RXP_STATUS); 450 __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
450 __raw_writew(0, elem->hw_desc + C2_RXP_COUNT); 451 __raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
451 __raw_writew(cpu_to_be16((u16) elem->maplen - sizeof(*rxp_hdr)), 452 __raw_writew((__force u16) cpu_to_be16((u16) elem->maplen - sizeof(*rxp_hdr)),
452 elem->hw_desc + C2_RXP_LEN); 453 elem->hw_desc + C2_RXP_LEN);
453 __raw_writeq(cpu_to_be64(elem->mapaddr), elem->hw_desc + C2_RXP_ADDR); 454 __raw_writeq((__force u64) cpu_to_be64(elem->mapaddr),
454 __raw_writew(cpu_to_be16(RXP_HRXD_READY), elem->hw_desc + C2_RXP_FLAGS); 455 elem->hw_desc + C2_RXP_ADDR);
456 __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
457 elem->hw_desc + C2_RXP_FLAGS);
455 458
456 pr_debug("packet dropped\n"); 459 pr_debug("packet dropped\n");
457 c2_port->netstats.rx_dropped++; 460 c2_port->netstats.rx_dropped++;
@@ -653,7 +656,7 @@ static int c2_up(struct net_device *netdev)
653 i++, elem++) { 656 i++, elem++) {
654 rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data; 657 rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;
655 rxp_hdr->flags = 0; 658 rxp_hdr->flags = 0;
656 __raw_writew(cpu_to_be16(RXP_HRXD_READY), 659 __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
657 elem->hw_desc + C2_RXP_FLAGS); 660 elem->hw_desc + C2_RXP_FLAGS);
658 } 661 }
659 662
@@ -787,9 +790,12 @@ static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
787 elem->maplen = maplen; 790 elem->maplen = maplen;
788 791
789 /* Tell HW to xmit */ 792 /* Tell HW to xmit */
790 __raw_writeq(cpu_to_be64(mapaddr), elem->hw_desc + C2_TXP_ADDR); 793 __raw_writeq((__force u64) cpu_to_be64(mapaddr),
791 __raw_writew(cpu_to_be16(maplen), elem->hw_desc + C2_TXP_LEN); 794 elem->hw_desc + C2_TXP_ADDR);
792 __raw_writew(cpu_to_be16(TXP_HTXD_READY), elem->hw_desc + C2_TXP_FLAGS); 795 __raw_writew((__force u16) cpu_to_be16(maplen),
796 elem->hw_desc + C2_TXP_LEN);
797 __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY),
798 elem->hw_desc + C2_TXP_FLAGS);
793 799
794 c2_port->netstats.tx_packets++; 800 c2_port->netstats.tx_packets++;
795 c2_port->netstats.tx_bytes += maplen; 801 c2_port->netstats.tx_bytes += maplen;
@@ -810,11 +816,11 @@ static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
810 elem->maplen = maplen; 816 elem->maplen = maplen;
811 817
812 /* Tell HW to xmit */ 818 /* Tell HW to xmit */
813 __raw_writeq(cpu_to_be64(mapaddr), 819 __raw_writeq((__force u64) cpu_to_be64(mapaddr),
814 elem->hw_desc + C2_TXP_ADDR); 820 elem->hw_desc + C2_TXP_ADDR);
815 __raw_writew(cpu_to_be16(maplen), 821 __raw_writew((__force u16) cpu_to_be16(maplen),
816 elem->hw_desc + C2_TXP_LEN); 822 elem->hw_desc + C2_TXP_LEN);
817 __raw_writew(cpu_to_be16(TXP_HTXD_READY), 823 __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY),
818 elem->hw_desc + C2_TXP_FLAGS); 824 elem->hw_desc + C2_TXP_FLAGS);
819 825
820 c2_port->netstats.tx_packets++; 826 c2_port->netstats.tx_packets++;
@@ -1005,7 +1011,7 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
1005 /* Remap the adapter PCI registers in BAR4 */ 1011 /* Remap the adapter PCI registers in BAR4 */
1006 mmio_regs = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET, 1012 mmio_regs = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET,
1007 sizeof(struct c2_adapter_pci_regs)); 1013 sizeof(struct c2_adapter_pci_regs));
1008 if (mmio_regs == 0UL) { 1014 if (!mmio_regs) {
1009 printk(KERN_ERR PFX 1015 printk(KERN_ERR PFX
1010 "Unable to remap adapter PCI registers in BAR4\n"); 1016 "Unable to remap adapter PCI registers in BAR4\n");
1011 ret = -EIO; 1017 ret = -EIO;
@@ -1029,10 +1035,10 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
1029 } 1035 }
1030 1036
1031 /* Validate the adapter version */ 1037 /* Validate the adapter version */
1032 if (be32_to_cpu(readl(mmio_regs + C2_REGS_VERS)) != C2_VERSION) { 1038 if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)) != C2_VERSION) {
1033 printk(KERN_ERR PFX "Version mismatch " 1039 printk(KERN_ERR PFX "Version mismatch "
1034 "[fw=%u, c2=%u], Adapter not claimed\n", 1040 "[fw=%u, c2=%u], Adapter not claimed\n",
1035 be32_to_cpu(readl(mmio_regs + C2_REGS_VERS)), 1041 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)),
1036 C2_VERSION); 1042 C2_VERSION);
1037 ret = -EINVAL; 1043 ret = -EINVAL;
1038 iounmap(mmio_regs); 1044 iounmap(mmio_regs);
@@ -1040,12 +1046,12 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
1040 } 1046 }
1041 1047
1042 /* Validate the adapter IVN */ 1048 /* Validate the adapter IVN */
1043 if (be32_to_cpu(readl(mmio_regs + C2_REGS_IVN)) != C2_IVN) { 1049 if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)) != C2_IVN) {
1044 printk(KERN_ERR PFX "Downlevel FIrmware level. You should be using " 1050 printk(KERN_ERR PFX "Downlevel FIrmware level. You should be using "
1045 "the OpenIB device support kit. " 1051 "the OpenIB device support kit. "
1046 "[fw=0x%x, c2=0x%x], Adapter not claimed\n", 1052 "[fw=0x%x, c2=0x%x], Adapter not claimed\n",
1047 be32_to_cpu(readl(mmio_regs + C2_REGS_IVN)), 1053 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)),
1048 C2_IVN); 1054 C2_IVN);
1049 ret = -EINVAL; 1055 ret = -EINVAL;
1050 iounmap(mmio_regs); 1056 iounmap(mmio_regs);
1051 goto bail2; 1057 goto bail2;
@@ -1068,7 +1074,7 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
1068 1074
1069 /* Get the last RX index */ 1075 /* Get the last RX index */
1070 c2dev->cur_rx = 1076 c2dev->cur_rx =
1071 (be32_to_cpu(readl(mmio_regs + C2_REGS_HRX_CUR)) - 1077 (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_HRX_CUR)) -
1072 0xffffc000) / sizeof(struct c2_rxp_desc); 1078 0xffffc000) / sizeof(struct c2_rxp_desc);
1073 1079
1074 /* Request an interrupt line for the driver */ 1080 /* Request an interrupt line for the driver */
@@ -1090,7 +1096,7 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
1090 } 1096 }
1091 1097
1092 /* Save off the actual size prior to unmapping mmio_regs */ 1098 /* Save off the actual size prior to unmapping mmio_regs */
1093 kva_map_size = be32_to_cpu(readl(mmio_regs + C2_REGS_PCI_WINSIZE)); 1099 kva_map_size = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_PCI_WINSIZE));
1094 1100
1095 /* Unmap the adapter PCI registers in BAR4 */ 1101 /* Unmap the adapter PCI registers in BAR4 */
1096 iounmap(mmio_regs); 1102 iounmap(mmio_regs);
@@ -1109,7 +1115,7 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
1109 /* Remap the adapter HRXDQ PA space to kernel VA space */ 1115 /* Remap the adapter HRXDQ PA space to kernel VA space */
1110 c2dev->mmio_rxp_ring = ioremap_nocache(reg4_start + C2_RXP_HRXDQ_OFFSET, 1116 c2dev->mmio_rxp_ring = ioremap_nocache(reg4_start + C2_RXP_HRXDQ_OFFSET,
1111 C2_RXP_HRXDQ_SIZE); 1117 C2_RXP_HRXDQ_SIZE);
1112 if (c2dev->mmio_rxp_ring == 0UL) { 1118 if (!c2dev->mmio_rxp_ring) {
1113 printk(KERN_ERR PFX "Unable to remap MMIO HRXDQ region\n"); 1119 printk(KERN_ERR PFX "Unable to remap MMIO HRXDQ region\n");
1114 ret = -EIO; 1120 ret = -EIO;
1115 goto bail6; 1121 goto bail6;
@@ -1118,7 +1124,7 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
1118 /* Remap the adapter HTXDQ PA space to kernel VA space */ 1124 /* Remap the adapter HTXDQ PA space to kernel VA space */
1119 c2dev->mmio_txp_ring = ioremap_nocache(reg4_start + C2_TXP_HTXDQ_OFFSET, 1125 c2dev->mmio_txp_ring = ioremap_nocache(reg4_start + C2_TXP_HTXDQ_OFFSET,
1120 C2_TXP_HTXDQ_SIZE); 1126 C2_TXP_HTXDQ_SIZE);
1121 if (c2dev->mmio_txp_ring == 0UL) { 1127 if (!c2dev->mmio_txp_ring) {
1122 printk(KERN_ERR PFX "Unable to remap MMIO HTXDQ region\n"); 1128 printk(KERN_ERR PFX "Unable to remap MMIO HTXDQ region\n");
1123 ret = -EIO; 1129 ret = -EIO;
1124 goto bail7; 1130 goto bail7;
@@ -1129,7 +1135,7 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
1129 1135
1130 /* Remap the PCI registers in adapter BAR0 to kernel VA space */ 1136 /* Remap the PCI registers in adapter BAR0 to kernel VA space */
1131 c2dev->regs = ioremap_nocache(reg0_start, reg0_len); 1137 c2dev->regs = ioremap_nocache(reg0_start, reg0_len);
1132 if (c2dev->regs == 0UL) { 1138 if (!c2dev->regs) {
1133 printk(KERN_ERR PFX "Unable to remap BAR0\n"); 1139 printk(KERN_ERR PFX "Unable to remap BAR0\n");
1134 ret = -EIO; 1140 ret = -EIO;
1135 goto bail8; 1141 goto bail8;
@@ -1139,7 +1145,7 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
1139 c2dev->pa = reg4_start + C2_PCI_REGS_OFFSET; 1145 c2dev->pa = reg4_start + C2_PCI_REGS_OFFSET;
1140 c2dev->kva = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET, 1146 c2dev->kva = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET,
1141 kva_map_size); 1147 kva_map_size);
1142 if (c2dev->kva == 0UL) { 1148 if (!c2dev->kva) {
1143 printk(KERN_ERR PFX "Unable to remap BAR4\n"); 1149 printk(KERN_ERR PFX "Unable to remap BAR4\n");
1144 ret = -EIO; 1150 ret = -EIO;
1145 goto bail9; 1151 goto bail9;
diff --git a/drivers/infiniband/hw/amso1100/c2.h b/drivers/infiniband/hw/amso1100/c2.h
index fa58200217a1..ed38ab8d9c0c 100644
--- a/drivers/infiniband/hw/amso1100/c2.h
+++ b/drivers/infiniband/hw/amso1100/c2.h
@@ -346,7 +346,7 @@ struct c2_dev {
346 // spinlock_t aeq_lock; 346 // spinlock_t aeq_lock;
347 // spinlock_t rnic_lock; 347 // spinlock_t rnic_lock;
348 348
349 u16 *hint_count; 349 __be16 *hint_count;
350 dma_addr_t hint_count_dma; 350 dma_addr_t hint_count_dma;
351 u16 hints_read; 351 u16 hints_read;
352 352
@@ -425,10 +425,10 @@ static inline void __raw_writeq(u64 val, void __iomem * addr)
425#endif 425#endif
426 426
427#define C2_SET_CUR_RX(c2dev, cur_rx) \ 427#define C2_SET_CUR_RX(c2dev, cur_rx) \
428 __raw_writel(cpu_to_be32(cur_rx), c2dev->mmio_txp_ring + 4092) 428 __raw_writel((__force u32) cpu_to_be32(cur_rx), c2dev->mmio_txp_ring + 4092)
429 429
430#define C2_GET_CUR_RX(c2dev) \ 430#define C2_GET_CUR_RX(c2dev) \
431 be32_to_cpu(readl(c2dev->mmio_txp_ring + 4092)) 431 be32_to_cpu((__force __be32) readl(c2dev->mmio_txp_ring + 4092))
432 432
433static inline struct c2_dev *to_c2dev(struct ib_device *ibdev) 433static inline struct c2_dev *to_c2dev(struct ib_device *ibdev)
434{ 434{
@@ -485,8 +485,8 @@ extern void c2_unregister_device(struct c2_dev *c2dev);
485extern int c2_rnic_init(struct c2_dev *c2dev); 485extern int c2_rnic_init(struct c2_dev *c2dev);
486extern void c2_rnic_term(struct c2_dev *c2dev); 486extern void c2_rnic_term(struct c2_dev *c2dev);
487extern void c2_rnic_interrupt(struct c2_dev *c2dev); 487extern void c2_rnic_interrupt(struct c2_dev *c2dev);
488extern int c2_del_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask); 488extern int c2_del_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask);
489extern int c2_add_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask); 489extern int c2_add_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask);
490 490
491/* QPs */ 491/* QPs */
492extern int c2_alloc_qp(struct c2_dev *c2dev, struct c2_pd *pd, 492extern int c2_alloc_qp(struct c2_dev *c2dev, struct c2_pd *pd,
@@ -545,7 +545,7 @@ extern void c2_ae_event(struct c2_dev *c2dev, u32 mq_index);
545extern int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask, 545extern int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask,
546 struct sp_chunk **root); 546 struct sp_chunk **root);
547extern void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root); 547extern void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root);
548extern u16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head, 548extern __be16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
549 dma_addr_t *dma_addr, gfp_t gfp_mask); 549 dma_addr_t *dma_addr, gfp_t gfp_mask);
550extern void c2_free_mqsp(u16 * mqsp); 550extern void c2_free_mqsp(__be16* mqsp);
551#endif 551#endif
diff --git a/drivers/infiniband/hw/amso1100/c2_ae.c b/drivers/infiniband/hw/amso1100/c2_ae.c
index a31439bd3b67..62af74295dbe 100644
--- a/drivers/infiniband/hw/amso1100/c2_ae.c
+++ b/drivers/infiniband/hw/amso1100/c2_ae.c
@@ -61,7 +61,7 @@ static int c2_convert_cm_status(u32 c2_status)
61 default: 61 default:
62 printk(KERN_ERR PFX 62 printk(KERN_ERR PFX
63 "%s - Unable to convert CM status: %d\n", 63 "%s - Unable to convert CM status: %d\n",
64 __FUNCTION__, c2_status); 64 __func__, c2_status);
65 return -EIO; 65 return -EIO;
66 } 66 }
67} 67}
@@ -193,9 +193,9 @@ void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
193 pr_debug("%s: event = %s, user_context=%llx, " 193 pr_debug("%s: event = %s, user_context=%llx, "
194 "resource_type=%x, " 194 "resource_type=%x, "
195 "resource=%x, qp_state=%s\n", 195 "resource=%x, qp_state=%s\n",
196 __FUNCTION__, 196 __func__,
197 to_event_str(event_id), 197 to_event_str(event_id),
198 (unsigned long long) be64_to_cpu(wr->ae.ae_generic.user_context), 198 (unsigned long long) wr->ae.ae_generic.user_context,
199 be32_to_cpu(wr->ae.ae_generic.resource_type), 199 be32_to_cpu(wr->ae.ae_generic.resource_type),
200 be32_to_cpu(wr->ae.ae_generic.resource), 200 be32_to_cpu(wr->ae.ae_generic.resource),
201 to_qp_state_str(be32_to_cpu(wr->ae.ae_generic.qp_state))); 201 to_qp_state_str(be32_to_cpu(wr->ae.ae_generic.qp_state)));
@@ -259,7 +259,7 @@ void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
259 BUG_ON(1); 259 BUG_ON(1);
260 pr_debug("%s:%d Unexpected event_id=%d on QP=%p, " 260 pr_debug("%s:%d Unexpected event_id=%d on QP=%p, "
261 "CM_ID=%p\n", 261 "CM_ID=%p\n",
262 __FUNCTION__, __LINE__, 262 __func__, __LINE__,
263 event_id, qp, cm_id); 263 event_id, qp, cm_id);
264 break; 264 break;
265 } 265 }
@@ -276,7 +276,7 @@ void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
276 pr_debug("C2_RES_IND_EP event_id=%d\n", event_id); 276 pr_debug("C2_RES_IND_EP event_id=%d\n", event_id);
277 if (event_id != CCAE_CONNECTION_REQUEST) { 277 if (event_id != CCAE_CONNECTION_REQUEST) {
278 pr_debug("%s: Invalid event_id: %d\n", 278 pr_debug("%s: Invalid event_id: %d\n",
279 __FUNCTION__, event_id); 279 __func__, event_id);
280 break; 280 break;
281 } 281 }
282 cm_event.event = IW_CM_EVENT_CONNECT_REQUEST; 282 cm_event.event = IW_CM_EVENT_CONNECT_REQUEST;
diff --git a/drivers/infiniband/hw/amso1100/c2_alloc.c b/drivers/infiniband/hw/amso1100/c2_alloc.c
index 0315f99e4191..e9110163aeff 100644
--- a/drivers/infiniband/hw/amso1100/c2_alloc.c
+++ b/drivers/infiniband/hw/amso1100/c2_alloc.c
@@ -87,8 +87,8 @@ void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root)
87 } 87 }
88} 88}
89 89
90u16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head, 90__be16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
91 dma_addr_t *dma_addr, gfp_t gfp_mask) 91 dma_addr_t *dma_addr, gfp_t gfp_mask)
92{ 92{
93 u16 mqsp; 93 u16 mqsp;
94 94
@@ -113,14 +113,14 @@ u16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
113 *dma_addr = head->dma_addr + 113 *dma_addr = head->dma_addr +
114 ((unsigned long) &(head->shared_ptr[mqsp]) - 114 ((unsigned long) &(head->shared_ptr[mqsp]) -
115 (unsigned long) head); 115 (unsigned long) head);
116 pr_debug("%s addr %p dma_addr %llx\n", __FUNCTION__, 116 pr_debug("%s addr %p dma_addr %llx\n", __func__,
117 &(head->shared_ptr[mqsp]), (unsigned long long) *dma_addr); 117 &(head->shared_ptr[mqsp]), (unsigned long long) *dma_addr);
118 return &(head->shared_ptr[mqsp]); 118 return (__force __be16 *) &(head->shared_ptr[mqsp]);
119 } 119 }
120 return NULL; 120 return NULL;
121} 121}
122 122
123void c2_free_mqsp(u16 * mqsp) 123void c2_free_mqsp(__be16 *mqsp)
124{ 124{
125 struct sp_chunk *head; 125 struct sp_chunk *head;
126 u16 idx; 126 u16 idx;
@@ -129,7 +129,7 @@ void c2_free_mqsp(u16 * mqsp)
129 head = (struct sp_chunk *) ((unsigned long) mqsp & PAGE_MASK); 129 head = (struct sp_chunk *) ((unsigned long) mqsp & PAGE_MASK);
130 130
131 /* Link head to new mqsp */ 131 /* Link head to new mqsp */
132 *mqsp = head->head; 132 *mqsp = (__force __be16) head->head;
133 133
134 /* Compute the shared_ptr index */ 134 /* Compute the shared_ptr index */
135 idx = ((unsigned long) mqsp & ~PAGE_MASK) >> 1; 135 idx = ((unsigned long) mqsp & ~PAGE_MASK) >> 1;
diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c
index d2b3366786d6..bb17cce3cb59 100644
--- a/drivers/infiniband/hw/amso1100/c2_cq.c
+++ b/drivers/infiniband/hw/amso1100/c2_cq.c
@@ -422,8 +422,8 @@ void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq)
422 goto bail1; 422 goto bail1;
423 423
424 reply = (struct c2wr_cq_destroy_rep *) (unsigned long) (vq_req->reply_msg); 424 reply = (struct c2wr_cq_destroy_rep *) (unsigned long) (vq_req->reply_msg);
425 425 if (reply)
426 vq_repbuf_free(c2dev, reply); 426 vq_repbuf_free(c2dev, reply);
427 bail1: 427 bail1:
428 vq_req_free(c2dev, vq_req); 428 vq_req_free(c2dev, vq_req);
429 bail0: 429 bail0:
diff --git a/drivers/infiniband/hw/amso1100/c2_intr.c b/drivers/infiniband/hw/amso1100/c2_intr.c
index 0d0bc33ca30a..3b5095470cb3 100644
--- a/drivers/infiniband/hw/amso1100/c2_intr.c
+++ b/drivers/infiniband/hw/amso1100/c2_intr.c
@@ -174,7 +174,11 @@ static void handle_vq(struct c2_dev *c2dev, u32 mq_index)
174 return; 174 return;
175 } 175 }
176 176
177 err = c2_errno(reply_msg); 177 if (reply_msg)
178 err = c2_errno(reply_msg);
179 else
180 err = -ENOMEM;
181
178 if (!err) switch (req->event) { 182 if (!err) switch (req->event) {
179 case IW_CM_EVENT_ESTABLISHED: 183 case IW_CM_EVENT_ESTABLISHED:
180 c2_set_qp_state(req->qp, 184 c2_set_qp_state(req->qp,
diff --git a/drivers/infiniband/hw/amso1100/c2_mm.c b/drivers/infiniband/hw/amso1100/c2_mm.c
index 1e4f46493fcb..b506fe22b4d4 100644
--- a/drivers/infiniband/hw/amso1100/c2_mm.c
+++ b/drivers/infiniband/hw/amso1100/c2_mm.c
@@ -45,7 +45,7 @@
45 * Reply buffer _is_ freed by this function. 45 * Reply buffer _is_ freed by this function.
46 */ 46 */
47static int 47static int
48send_pbl_messages(struct c2_dev *c2dev, u32 stag_index, 48send_pbl_messages(struct c2_dev *c2dev, __be32 stag_index,
49 unsigned long va, u32 pbl_depth, 49 unsigned long va, u32 pbl_depth,
50 struct c2_vq_req *vq_req, int pbl_type) 50 struct c2_vq_req *vq_req, int pbl_type)
51{ 51{
diff --git a/drivers/infiniband/hw/amso1100/c2_mq.c b/drivers/infiniband/hw/amso1100/c2_mq.c
index b88a75592102..0cddc49beae1 100644
--- a/drivers/infiniband/hw/amso1100/c2_mq.c
+++ b/drivers/infiniband/hw/amso1100/c2_mq.c
@@ -64,7 +64,7 @@ void c2_mq_produce(struct c2_mq *q)
64 q->priv = (q->priv + 1) % q->q_size; 64 q->priv = (q->priv + 1) % q->q_size;
65 q->hint_count++; 65 q->hint_count++;
66 /* Update peer's offset. */ 66 /* Update peer's offset. */
67 __raw_writew(cpu_to_be16(q->priv), &q->peer->shared); 67 __raw_writew((__force u16) cpu_to_be16(q->priv), &q->peer->shared);
68 } 68 }
69} 69}
70 70
@@ -105,7 +105,7 @@ void c2_mq_free(struct c2_mq *q)
105#endif 105#endif
106 q->priv = (q->priv + 1) % q->q_size; 106 q->priv = (q->priv + 1) % q->q_size;
107 /* Update peer's offset. */ 107 /* Update peer's offset. */
108 __raw_writew(cpu_to_be16(q->priv), &q->peer->shared); 108 __raw_writew((__force u16) cpu_to_be16(q->priv), &q->peer->shared);
109 } 109 }
110} 110}
111 111
diff --git a/drivers/infiniband/hw/amso1100/c2_mq.h b/drivers/infiniband/hw/amso1100/c2_mq.h
index 9185bbb21658..acede007b94a 100644
--- a/drivers/infiniband/hw/amso1100/c2_mq.h
+++ b/drivers/infiniband/hw/amso1100/c2_mq.h
@@ -75,7 +75,7 @@ struct c2_mq {
75 u16 hint_count; 75 u16 hint_count;
76 u16 priv; 76 u16 priv;
77 struct c2_mq_shared __iomem *peer; 77 struct c2_mq_shared __iomem *peer;
78 u16 *shared; 78 __be16 *shared;
79 dma_addr_t shared_dma; 79 dma_addr_t shared_dma;
80 u32 q_size; 80 u32 q_size;
81 u32 msg_size; 81 u32 msg_size;
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
index 7a6cece6ea9d..e10d27a6e145 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.c
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -67,7 +67,7 @@ static int c2_query_device(struct ib_device *ibdev,
67{ 67{
68 struct c2_dev *c2dev = to_c2dev(ibdev); 68 struct c2_dev *c2dev = to_c2dev(ibdev);
69 69
70 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 70 pr_debug("%s:%u\n", __func__, __LINE__);
71 71
72 *props = c2dev->props; 72 *props = c2dev->props;
73 return 0; 73 return 0;
@@ -76,7 +76,7 @@ static int c2_query_device(struct ib_device *ibdev,
76static int c2_query_port(struct ib_device *ibdev, 76static int c2_query_port(struct ib_device *ibdev,
77 u8 port, struct ib_port_attr *props) 77 u8 port, struct ib_port_attr *props)
78{ 78{
79 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 79 pr_debug("%s:%u\n", __func__, __LINE__);
80 80
81 props->max_mtu = IB_MTU_4096; 81 props->max_mtu = IB_MTU_4096;
82 props->lid = 0; 82 props->lid = 0;
@@ -102,14 +102,14 @@ static int c2_modify_port(struct ib_device *ibdev,
102 u8 port, int port_modify_mask, 102 u8 port, int port_modify_mask,
103 struct ib_port_modify *props) 103 struct ib_port_modify *props)
104{ 104{
105 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 105 pr_debug("%s:%u\n", __func__, __LINE__);
106 return 0; 106 return 0;
107} 107}
108 108
109static int c2_query_pkey(struct ib_device *ibdev, 109static int c2_query_pkey(struct ib_device *ibdev,
110 u8 port, u16 index, u16 * pkey) 110 u8 port, u16 index, u16 * pkey)
111{ 111{
112 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 112 pr_debug("%s:%u\n", __func__, __LINE__);
113 *pkey = 0; 113 *pkey = 0;
114 return 0; 114 return 0;
115} 115}
@@ -119,7 +119,7 @@ static int c2_query_gid(struct ib_device *ibdev, u8 port,
119{ 119{
120 struct c2_dev *c2dev = to_c2dev(ibdev); 120 struct c2_dev *c2dev = to_c2dev(ibdev);
121 121
122 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 122 pr_debug("%s:%u\n", __func__, __LINE__);
123 memset(&(gid->raw[0]), 0, sizeof(gid->raw)); 123 memset(&(gid->raw[0]), 0, sizeof(gid->raw));
124 memcpy(&(gid->raw[0]), c2dev->pseudo_netdev->dev_addr, 6); 124 memcpy(&(gid->raw[0]), c2dev->pseudo_netdev->dev_addr, 6);
125 125
@@ -134,7 +134,7 @@ static struct ib_ucontext *c2_alloc_ucontext(struct ib_device *ibdev,
134{ 134{
135 struct c2_ucontext *context; 135 struct c2_ucontext *context;
136 136
137 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 137 pr_debug("%s:%u\n", __func__, __LINE__);
138 context = kmalloc(sizeof(*context), GFP_KERNEL); 138 context = kmalloc(sizeof(*context), GFP_KERNEL);
139 if (!context) 139 if (!context)
140 return ERR_PTR(-ENOMEM); 140 return ERR_PTR(-ENOMEM);
@@ -144,14 +144,14 @@ static struct ib_ucontext *c2_alloc_ucontext(struct ib_device *ibdev,
144 144
145static int c2_dealloc_ucontext(struct ib_ucontext *context) 145static int c2_dealloc_ucontext(struct ib_ucontext *context)
146{ 146{
147 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 147 pr_debug("%s:%u\n", __func__, __LINE__);
148 kfree(context); 148 kfree(context);
149 return 0; 149 return 0;
150} 150}
151 151
152static int c2_mmap_uar(struct ib_ucontext *context, struct vm_area_struct *vma) 152static int c2_mmap_uar(struct ib_ucontext *context, struct vm_area_struct *vma)
153{ 153{
154 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 154 pr_debug("%s:%u\n", __func__, __LINE__);
155 return -ENOSYS; 155 return -ENOSYS;
156} 156}
157 157
@@ -162,7 +162,7 @@ static struct ib_pd *c2_alloc_pd(struct ib_device *ibdev,
162 struct c2_pd *pd; 162 struct c2_pd *pd;
163 int err; 163 int err;
164 164
165 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 165 pr_debug("%s:%u\n", __func__, __LINE__);
166 166
167 pd = kmalloc(sizeof(*pd), GFP_KERNEL); 167 pd = kmalloc(sizeof(*pd), GFP_KERNEL);
168 if (!pd) 168 if (!pd)
@@ -187,7 +187,7 @@ static struct ib_pd *c2_alloc_pd(struct ib_device *ibdev,
187 187
188static int c2_dealloc_pd(struct ib_pd *pd) 188static int c2_dealloc_pd(struct ib_pd *pd)
189{ 189{
190 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 190 pr_debug("%s:%u\n", __func__, __LINE__);
191 c2_pd_free(to_c2dev(pd->device), to_c2pd(pd)); 191 c2_pd_free(to_c2dev(pd->device), to_c2pd(pd));
192 kfree(pd); 192 kfree(pd);
193 193
@@ -196,13 +196,13 @@ static int c2_dealloc_pd(struct ib_pd *pd)
196 196
197static struct ib_ah *c2_ah_create(struct ib_pd *pd, struct ib_ah_attr *ah_attr) 197static struct ib_ah *c2_ah_create(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
198{ 198{
199 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 199 pr_debug("%s:%u\n", __func__, __LINE__);
200 return ERR_PTR(-ENOSYS); 200 return ERR_PTR(-ENOSYS);
201} 201}
202 202
203static int c2_ah_destroy(struct ib_ah *ah) 203static int c2_ah_destroy(struct ib_ah *ah)
204{ 204{
205 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 205 pr_debug("%s:%u\n", __func__, __LINE__);
206 return -ENOSYS; 206 return -ENOSYS;
207} 207}
208 208
@@ -230,7 +230,7 @@ struct ib_qp *c2_get_qp(struct ib_device *device, int qpn)
230 230
231 qp = c2_find_qpn(c2dev, qpn); 231 qp = c2_find_qpn(c2dev, qpn);
232 pr_debug("%s Returning QP=%p for QPN=%d, device=%p, refcount=%d\n", 232 pr_debug("%s Returning QP=%p for QPN=%d, device=%p, refcount=%d\n",
233 __FUNCTION__, qp, qpn, device, 233 __func__, qp, qpn, device,
234 (qp?atomic_read(&qp->refcount):0)); 234 (qp?atomic_read(&qp->refcount):0));
235 235
236 return (qp?&qp->ibqp:NULL); 236 return (qp?&qp->ibqp:NULL);
@@ -243,13 +243,16 @@ static struct ib_qp *c2_create_qp(struct ib_pd *pd,
243 struct c2_qp *qp; 243 struct c2_qp *qp;
244 int err; 244 int err;
245 245
246 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 246 pr_debug("%s:%u\n", __func__, __LINE__);
247
248 if (init_attr->create_flags)
249 return ERR_PTR(-EINVAL);
247 250
248 switch (init_attr->qp_type) { 251 switch (init_attr->qp_type) {
249 case IB_QPT_RC: 252 case IB_QPT_RC:
250 qp = kzalloc(sizeof(*qp), GFP_KERNEL); 253 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
251 if (!qp) { 254 if (!qp) {
252 pr_debug("%s: Unable to allocate QP\n", __FUNCTION__); 255 pr_debug("%s: Unable to allocate QP\n", __func__);
253 return ERR_PTR(-ENOMEM); 256 return ERR_PTR(-ENOMEM);
254 } 257 }
255 spin_lock_init(&qp->lock); 258 spin_lock_init(&qp->lock);
@@ -266,7 +269,7 @@ static struct ib_qp *c2_create_qp(struct ib_pd *pd,
266 269
267 break; 270 break;
268 default: 271 default:
269 pr_debug("%s: Invalid QP type: %d\n", __FUNCTION__, 272 pr_debug("%s: Invalid QP type: %d\n", __func__,
270 init_attr->qp_type); 273 init_attr->qp_type);
271 return ERR_PTR(-EINVAL); 274 return ERR_PTR(-EINVAL);
272 break; 275 break;
@@ -285,7 +288,7 @@ static int c2_destroy_qp(struct ib_qp *ib_qp)
285 struct c2_qp *qp = to_c2qp(ib_qp); 288 struct c2_qp *qp = to_c2qp(ib_qp);
286 289
287 pr_debug("%s:%u qp=%p,qp->state=%d\n", 290 pr_debug("%s:%u qp=%p,qp->state=%d\n",
288 __FUNCTION__, __LINE__,ib_qp,qp->state); 291 __func__, __LINE__, ib_qp, qp->state);
289 c2_free_qp(to_c2dev(ib_qp->device), qp); 292 c2_free_qp(to_c2dev(ib_qp->device), qp);
290 kfree(qp); 293 kfree(qp);
291 return 0; 294 return 0;
@@ -300,13 +303,13 @@ static struct ib_cq *c2_create_cq(struct ib_device *ibdev, int entries, int vect
300 303
301 cq = kmalloc(sizeof(*cq), GFP_KERNEL); 304 cq = kmalloc(sizeof(*cq), GFP_KERNEL);
302 if (!cq) { 305 if (!cq) {
303 pr_debug("%s: Unable to allocate CQ\n", __FUNCTION__); 306 pr_debug("%s: Unable to allocate CQ\n", __func__);
304 return ERR_PTR(-ENOMEM); 307 return ERR_PTR(-ENOMEM);
305 } 308 }
306 309
307 err = c2_init_cq(to_c2dev(ibdev), entries, NULL, cq); 310 err = c2_init_cq(to_c2dev(ibdev), entries, NULL, cq);
308 if (err) { 311 if (err) {
309 pr_debug("%s: error initializing CQ\n", __FUNCTION__); 312 pr_debug("%s: error initializing CQ\n", __func__);
310 kfree(cq); 313 kfree(cq);
311 return ERR_PTR(err); 314 return ERR_PTR(err);
312 } 315 }
@@ -318,7 +321,7 @@ static int c2_destroy_cq(struct ib_cq *ib_cq)
318{ 321{
319 struct c2_cq *cq = to_c2cq(ib_cq); 322 struct c2_cq *cq = to_c2cq(ib_cq);
320 323
321 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 324 pr_debug("%s:%u\n", __func__, __LINE__);
322 325
323 c2_free_cq(to_c2dev(ib_cq->device), cq); 326 c2_free_cq(to_c2dev(ib_cq->device), cq);
324 kfree(cq); 327 kfree(cq);
@@ -400,7 +403,7 @@ static struct ib_mr *c2_reg_phys_mr(struct ib_pd *ib_pd,
400 mr->umem = NULL; 403 mr->umem = NULL;
401 pr_debug("%s - page shift %d, pbl_depth %d, total_len %u, " 404 pr_debug("%s - page shift %d, pbl_depth %d, total_len %u, "
402 "*iova_start %llx, first pa %llx, last pa %llx\n", 405 "*iova_start %llx, first pa %llx, last pa %llx\n",
403 __FUNCTION__, page_shift, pbl_depth, total_len, 406 __func__, page_shift, pbl_depth, total_len,
404 (unsigned long long) *iova_start, 407 (unsigned long long) *iova_start,
405 (unsigned long long) page_list[0], 408 (unsigned long long) page_list[0],
406 (unsigned long long) page_list[pbl_depth-1]); 409 (unsigned long long) page_list[pbl_depth-1]);
@@ -422,7 +425,7 @@ static struct ib_mr *c2_get_dma_mr(struct ib_pd *pd, int acc)
422 struct ib_phys_buf bl; 425 struct ib_phys_buf bl;
423 u64 kva = 0; 426 u64 kva = 0;
424 427
425 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 428 pr_debug("%s:%u\n", __func__, __LINE__);
426 429
427 /* AMSO1100 limit */ 430 /* AMSO1100 limit */
428 bl.size = 0xffffffff; 431 bl.size = 0xffffffff;
@@ -442,7 +445,7 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
442 struct c2_pd *c2pd = to_c2pd(pd); 445 struct c2_pd *c2pd = to_c2pd(pd);
443 struct c2_mr *c2mr; 446 struct c2_mr *c2mr;
444 447
445 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 448 pr_debug("%s:%u\n", __func__, __LINE__);
446 449
447 c2mr = kmalloc(sizeof(*c2mr), GFP_KERNEL); 450 c2mr = kmalloc(sizeof(*c2mr), GFP_KERNEL);
448 if (!c2mr) 451 if (!c2mr)
@@ -506,7 +509,7 @@ static int c2_dereg_mr(struct ib_mr *ib_mr)
506 struct c2_mr *mr = to_c2mr(ib_mr); 509 struct c2_mr *mr = to_c2mr(ib_mr);
507 int err; 510 int err;
508 511
509 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 512 pr_debug("%s:%u\n", __func__, __LINE__);
510 513
511 err = c2_stag_dealloc(to_c2dev(ib_mr->device), ib_mr->lkey); 514 err = c2_stag_dealloc(to_c2dev(ib_mr->device), ib_mr->lkey);
512 if (err) 515 if (err)
@@ -523,14 +526,14 @@ static int c2_dereg_mr(struct ib_mr *ib_mr)
523static ssize_t show_rev(struct class_device *cdev, char *buf) 526static ssize_t show_rev(struct class_device *cdev, char *buf)
524{ 527{
525 struct c2_dev *dev = container_of(cdev, struct c2_dev, ibdev.class_dev); 528 struct c2_dev *dev = container_of(cdev, struct c2_dev, ibdev.class_dev);
526 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 529 pr_debug("%s:%u\n", __func__, __LINE__);
527 return sprintf(buf, "%x\n", dev->props.hw_ver); 530 return sprintf(buf, "%x\n", dev->props.hw_ver);
528} 531}
529 532
530static ssize_t show_fw_ver(struct class_device *cdev, char *buf) 533static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
531{ 534{
532 struct c2_dev *dev = container_of(cdev, struct c2_dev, ibdev.class_dev); 535 struct c2_dev *dev = container_of(cdev, struct c2_dev, ibdev.class_dev);
533 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 536 pr_debug("%s:%u\n", __func__, __LINE__);
534 return sprintf(buf, "%x.%x.%x\n", 537 return sprintf(buf, "%x.%x.%x\n",
535 (int) (dev->props.fw_ver >> 32), 538 (int) (dev->props.fw_ver >> 32),
536 (int) (dev->props.fw_ver >> 16) & 0xffff, 539 (int) (dev->props.fw_ver >> 16) & 0xffff,
@@ -539,13 +542,13 @@ static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
539 542
540static ssize_t show_hca(struct class_device *cdev, char *buf) 543static ssize_t show_hca(struct class_device *cdev, char *buf)
541{ 544{
542 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 545 pr_debug("%s:%u\n", __func__, __LINE__);
543 return sprintf(buf, "AMSO1100\n"); 546 return sprintf(buf, "AMSO1100\n");
544} 547}
545 548
546static ssize_t show_board(struct class_device *cdev, char *buf) 549static ssize_t show_board(struct class_device *cdev, char *buf)
547{ 550{
548 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 551 pr_debug("%s:%u\n", __func__, __LINE__);
549 return sprintf(buf, "%.*s\n", 32, "AMSO1100 Board ID"); 552 return sprintf(buf, "%.*s\n", 32, "AMSO1100 Board ID");
550} 553}
551 554
@@ -575,13 +578,13 @@ static int c2_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
575 578
576static int c2_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 579static int c2_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
577{ 580{
578 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 581 pr_debug("%s:%u\n", __func__, __LINE__);
579 return -ENOSYS; 582 return -ENOSYS;
580} 583}
581 584
582static int c2_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 585static int c2_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
583{ 586{
584 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 587 pr_debug("%s:%u\n", __func__, __LINE__);
585 return -ENOSYS; 588 return -ENOSYS;
586} 589}
587 590
@@ -592,13 +595,13 @@ static int c2_process_mad(struct ib_device *ibdev,
592 struct ib_grh *in_grh, 595 struct ib_grh *in_grh,
593 struct ib_mad *in_mad, struct ib_mad *out_mad) 596 struct ib_mad *in_mad, struct ib_mad *out_mad)
594{ 597{
595 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 598 pr_debug("%s:%u\n", __func__, __LINE__);
596 return -ENOSYS; 599 return -ENOSYS;
597} 600}
598 601
599static int c2_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) 602static int c2_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
600{ 603{
601 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 604 pr_debug("%s:%u\n", __func__, __LINE__);
602 605
603 /* Request a connection */ 606 /* Request a connection */
604 return c2_llp_connect(cm_id, iw_param); 607 return c2_llp_connect(cm_id, iw_param);
@@ -606,7 +609,7 @@ static int c2_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
606 609
607static int c2_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) 610static int c2_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
608{ 611{
609 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 612 pr_debug("%s:%u\n", __func__, __LINE__);
610 613
611 /* Accept the new connection */ 614 /* Accept the new connection */
612 return c2_llp_accept(cm_id, iw_param); 615 return c2_llp_accept(cm_id, iw_param);
@@ -616,7 +619,7 @@ static int c2_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
616{ 619{
617 int err; 620 int err;
618 621
619 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 622 pr_debug("%s:%u\n", __func__, __LINE__);
620 623
621 err = c2_llp_reject(cm_id, pdata, pdata_len); 624 err = c2_llp_reject(cm_id, pdata, pdata_len);
622 return err; 625 return err;
@@ -626,10 +629,10 @@ static int c2_service_create(struct iw_cm_id *cm_id, int backlog)
626{ 629{
627 int err; 630 int err;
628 631
629 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 632 pr_debug("%s:%u\n", __func__, __LINE__);
630 err = c2_llp_service_create(cm_id, backlog); 633 err = c2_llp_service_create(cm_id, backlog);
631 pr_debug("%s:%u err=%d\n", 634 pr_debug("%s:%u err=%d\n",
632 __FUNCTION__, __LINE__, 635 __func__, __LINE__,
633 err); 636 err);
634 return err; 637 return err;
635} 638}
@@ -637,7 +640,7 @@ static int c2_service_create(struct iw_cm_id *cm_id, int backlog)
637static int c2_service_destroy(struct iw_cm_id *cm_id) 640static int c2_service_destroy(struct iw_cm_id *cm_id)
638{ 641{
639 int err; 642 int err;
640 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 643 pr_debug("%s:%u\n", __func__, __LINE__);
641 644
642 err = c2_llp_service_destroy(cm_id); 645 err = c2_llp_service_destroy(cm_id);
643 646
@@ -743,7 +746,7 @@ static struct net_device *c2_pseudo_netdev_init(struct c2_dev *c2dev)
743 netdev = alloc_netdev(sizeof(*netdev), name, setup); 746 netdev = alloc_netdev(sizeof(*netdev), name, setup);
744 if (!netdev) { 747 if (!netdev) {
745 printk(KERN_ERR PFX "%s - etherdev alloc failed", 748 printk(KERN_ERR PFX "%s - etherdev alloc failed",
746 __FUNCTION__); 749 __func__);
747 return NULL; 750 return NULL;
748 } 751 }
749 752
@@ -780,7 +783,7 @@ int c2_register_device(struct c2_dev *dev)
780 if (ret) 783 if (ret)
781 goto out2; 784 goto out2;
782 785
783 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 786 pr_debug("%s:%u\n", __func__, __LINE__);
784 strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX); 787 strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX);
785 dev->ibdev.owner = THIS_MODULE; 788 dev->ibdev.owner = THIS_MODULE;
786 dev->ibdev.uverbs_cmd_mask = 789 dev->ibdev.uverbs_cmd_mask =
@@ -873,13 +876,13 @@ out1:
873out2: 876out2:
874 free_netdev(dev->pseudo_netdev); 877 free_netdev(dev->pseudo_netdev);
875out3: 878out3:
876 pr_debug("%s:%u ret=%d\n", __FUNCTION__, __LINE__, ret); 879 pr_debug("%s:%u ret=%d\n", __func__, __LINE__, ret);
877 return ret; 880 return ret;
878} 881}
879 882
880void c2_unregister_device(struct c2_dev *dev) 883void c2_unregister_device(struct c2_dev *dev)
881{ 884{
882 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 885 pr_debug("%s:%u\n", __func__, __LINE__);
883 unregister_netdev(dev->pseudo_netdev); 886 unregister_netdev(dev->pseudo_netdev);
884 free_netdev(dev->pseudo_netdev); 887 free_netdev(dev->pseudo_netdev);
885 ib_unregister_device(&dev->ibdev); 888 ib_unregister_device(&dev->ibdev);
diff --git a/drivers/infiniband/hw/amso1100/c2_qp.c b/drivers/infiniband/hw/amso1100/c2_qp.c
index 01d07862ea86..a6d89440ad2c 100644
--- a/drivers/infiniband/hw/amso1100/c2_qp.c
+++ b/drivers/infiniband/hw/amso1100/c2_qp.c
@@ -121,7 +121,7 @@ void c2_set_qp_state(struct c2_qp *qp, int c2_state)
121 int new_state = to_ib_state(c2_state); 121 int new_state = to_ib_state(c2_state);
122 122
123 pr_debug("%s: qp[%p] state modify %s --> %s\n", 123 pr_debug("%s: qp[%p] state modify %s --> %s\n",
124 __FUNCTION__, 124 __func__,
125 qp, 125 qp,
126 to_ib_state_str(qp->state), 126 to_ib_state_str(qp->state),
127 to_ib_state_str(new_state)); 127 to_ib_state_str(new_state));
@@ -141,7 +141,7 @@ int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
141 int err; 141 int err;
142 142
143 pr_debug("%s:%d qp=%p, %s --> %s\n", 143 pr_debug("%s:%d qp=%p, %s --> %s\n",
144 __FUNCTION__, __LINE__, 144 __func__, __LINE__,
145 qp, 145 qp,
146 to_ib_state_str(qp->state), 146 to_ib_state_str(qp->state),
147 to_ib_state_str(attr->qp_state)); 147 to_ib_state_str(attr->qp_state));
@@ -224,7 +224,7 @@ int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
224 qp->state = next_state; 224 qp->state = next_state;
225#ifdef DEBUG 225#ifdef DEBUG
226 else 226 else
227 pr_debug("%s: c2_errno=%d\n", __FUNCTION__, err); 227 pr_debug("%s: c2_errno=%d\n", __func__, err);
228#endif 228#endif
229 /* 229 /*
230 * If we're going to error and generating the event here, then 230 * If we're going to error and generating the event here, then
@@ -243,7 +243,7 @@ int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
243 vq_req_free(c2dev, vq_req); 243 vq_req_free(c2dev, vq_req);
244 244
245 pr_debug("%s:%d qp=%p, cur_state=%s\n", 245 pr_debug("%s:%d qp=%p, cur_state=%s\n",
246 __FUNCTION__, __LINE__, 246 __func__, __LINE__,
247 qp, 247 qp,
248 to_ib_state_str(qp->state)); 248 to_ib_state_str(qp->state));
249 return err; 249 return err;
@@ -811,16 +811,24 @@ int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
811 811
812 switch (ib_wr->opcode) { 812 switch (ib_wr->opcode) {
813 case IB_WR_SEND: 813 case IB_WR_SEND:
814 if (ib_wr->send_flags & IB_SEND_SOLICITED) { 814 case IB_WR_SEND_WITH_INV:
815 c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE); 815 if (ib_wr->opcode == IB_WR_SEND) {
816 msg_size = sizeof(struct c2wr_send_req); 816 if (ib_wr->send_flags & IB_SEND_SOLICITED)
817 c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE);
818 else
819 c2_wr_set_id(&wr, C2_WR_TYPE_SEND);
820 wr.sqwr.send.remote_stag = 0;
817 } else { 821 } else {
818 c2_wr_set_id(&wr, C2_WR_TYPE_SEND); 822 if (ib_wr->send_flags & IB_SEND_SOLICITED)
819 msg_size = sizeof(struct c2wr_send_req); 823 c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE_INV);
824 else
825 c2_wr_set_id(&wr, C2_WR_TYPE_SEND_INV);
826 wr.sqwr.send.remote_stag =
827 cpu_to_be32(ib_wr->ex.invalidate_rkey);
820 } 828 }
821 829
822 wr.sqwr.send.remote_stag = 0; 830 msg_size = sizeof(struct c2wr_send_req) +
823 msg_size += sizeof(struct c2_data_addr) * ib_wr->num_sge; 831 sizeof(struct c2_data_addr) * ib_wr->num_sge;
824 if (ib_wr->num_sge > qp->send_sgl_depth) { 832 if (ib_wr->num_sge > qp->send_sgl_depth) {
825 err = -EINVAL; 833 err = -EINVAL;
826 break; 834 break;
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
index 1687c511cb2f..9a054c6941a4 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -208,7 +208,7 @@ static int c2_rnic_query(struct c2_dev *c2dev, struct ib_device_attr *props)
208/* 208/*
209 * Add an IP address to the RNIC interface 209 * Add an IP address to the RNIC interface
210 */ 210 */
211int c2_add_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask) 211int c2_add_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask)
212{ 212{
213 struct c2_vq_req *vq_req; 213 struct c2_vq_req *vq_req;
214 struct c2wr_rnic_setconfig_req *wr; 214 struct c2wr_rnic_setconfig_req *wr;
@@ -270,7 +270,7 @@ int c2_add_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask)
270/* 270/*
271 * Delete an IP address from the RNIC interface 271 * Delete an IP address from the RNIC interface
272 */ 272 */
273int c2_del_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask) 273int c2_del_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask)
274{ 274{
275 struct c2_vq_req *vq_req; 275 struct c2_vq_req *vq_req;
276 struct c2wr_rnic_setconfig_req *wr; 276 struct c2wr_rnic_setconfig_req *wr;
@@ -455,7 +455,8 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
455 IB_DEVICE_CURR_QP_STATE_MOD | 455 IB_DEVICE_CURR_QP_STATE_MOD |
456 IB_DEVICE_SYS_IMAGE_GUID | 456 IB_DEVICE_SYS_IMAGE_GUID |
457 IB_DEVICE_ZERO_STAG | 457 IB_DEVICE_ZERO_STAG |
458 IB_DEVICE_SEND_W_INV | IB_DEVICE_MEM_WINDOW); 458 IB_DEVICE_MEM_WINDOW |
459 IB_DEVICE_SEND_W_INV);
459 460
460 /* Allocate the qptr_array */ 461 /* Allocate the qptr_array */
461 c2dev->qptr_array = vmalloc(C2_MAX_CQS * sizeof(void *)); 462 c2dev->qptr_array = vmalloc(C2_MAX_CQS * sizeof(void *));
@@ -506,17 +507,17 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
506 mmio_regs = c2dev->kva; 507 mmio_regs = c2dev->kva;
507 /* Initialize the Verbs Request Queue */ 508 /* Initialize the Verbs Request Queue */
508 c2_mq_req_init(&c2dev->req_vq, 0, 509 c2_mq_req_init(&c2dev->req_vq, 0,
509 be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_QSIZE)), 510 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_QSIZE)),
510 be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_MSGSIZE)), 511 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_MSGSIZE)),
511 mmio_regs + 512 mmio_regs +
512 be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_POOLSTART)), 513 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_POOLSTART)),
513 mmio_regs + 514 mmio_regs +
514 be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_SHARED)), 515 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_SHARED)),
515 C2_MQ_ADAPTER_TARGET); 516 C2_MQ_ADAPTER_TARGET);
516 517
517 /* Initialize the Verbs Reply Queue */ 518 /* Initialize the Verbs Reply Queue */
518 qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_QSIZE)); 519 qsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_QSIZE));
519 msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_MSGSIZE)); 520 msgsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_MSGSIZE));
520 q1_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize, 521 q1_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
521 &c2dev->rep_vq.host_dma, GFP_KERNEL); 522 &c2dev->rep_vq.host_dma, GFP_KERNEL);
522 if (!q1_pages) { 523 if (!q1_pages) {
@@ -524,7 +525,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
524 goto bail1; 525 goto bail1;
525 } 526 }
526 pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma); 527 pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma);
527 pr_debug("%s rep_vq va %p dma %llx\n", __FUNCTION__, q1_pages, 528 pr_debug("%s rep_vq va %p dma %llx\n", __func__, q1_pages,
528 (unsigned long long) c2dev->rep_vq.host_dma); 529 (unsigned long long) c2dev->rep_vq.host_dma);
529 c2_mq_rep_init(&c2dev->rep_vq, 530 c2_mq_rep_init(&c2dev->rep_vq,
530 1, 531 1,
@@ -532,12 +533,12 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
532 msgsize, 533 msgsize,
533 q1_pages, 534 q1_pages,
534 mmio_regs + 535 mmio_regs +
535 be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_SHARED)), 536 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_SHARED)),
536 C2_MQ_HOST_TARGET); 537 C2_MQ_HOST_TARGET);
537 538
538 /* Initialize the Asynchronus Event Queue */ 539 /* Initialize the Asynchronus Event Queue */
539 qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_QSIZE)); 540 qsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_QSIZE));
540 msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_MSGSIZE)); 541 msgsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_MSGSIZE));
541 q2_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize, 542 q2_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
542 &c2dev->aeq.host_dma, GFP_KERNEL); 543 &c2dev->aeq.host_dma, GFP_KERNEL);
543 if (!q2_pages) { 544 if (!q2_pages) {
@@ -545,7 +546,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
545 goto bail2; 546 goto bail2;
546 } 547 }
547 pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma); 548 pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma);
548 pr_debug("%s aeq va %p dma %llx\n", __FUNCTION__, q2_pages, 549 pr_debug("%s aeq va %p dma %llx\n", __func__, q2_pages,
549 (unsigned long long) c2dev->aeq.host_dma); 550 (unsigned long long) c2dev->aeq.host_dma);
550 c2_mq_rep_init(&c2dev->aeq, 551 c2_mq_rep_init(&c2dev->aeq,
551 2, 552 2,
@@ -553,7 +554,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
553 msgsize, 554 msgsize,
554 q2_pages, 555 q2_pages,
555 mmio_regs + 556 mmio_regs +
556 be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_SHARED)), 557 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_SHARED)),
557 C2_MQ_HOST_TARGET); 558 C2_MQ_HOST_TARGET);
558 559
559 /* Initialize the verbs request allocator */ 560 /* Initialize the verbs request allocator */
diff --git a/drivers/infiniband/hw/amso1100/c2_vq.c b/drivers/infiniband/hw/amso1100/c2_vq.c
index cfdacb1ec279..9ce7819b7b2e 100644
--- a/drivers/infiniband/hw/amso1100/c2_vq.c
+++ b/drivers/infiniband/hw/amso1100/c2_vq.c
@@ -197,7 +197,7 @@ int vq_send_wr(struct c2_dev *c2dev, union c2wr *wr)
197 */ 197 */
198 while (msg == NULL) { 198 while (msg == NULL) {
199 pr_debug("%s:%d no available msg in VQ, waiting...\n", 199 pr_debug("%s:%d no available msg in VQ, waiting...\n",
200 __FUNCTION__, __LINE__); 200 __func__, __LINE__);
201 init_waitqueue_entry(&__wait, current); 201 init_waitqueue_entry(&__wait, current);
202 add_wait_queue(&c2dev->req_vq_wo, &__wait); 202 add_wait_queue(&c2dev->req_vq_wo, &__wait);
203 spin_unlock(&c2dev->vqlock); 203 spin_unlock(&c2dev->vqlock);
diff --git a/drivers/infiniband/hw/amso1100/c2_wr.h b/drivers/infiniband/hw/amso1100/c2_wr.h
index 3ec6c43bb0ef..c65fbdd6e469 100644
--- a/drivers/infiniband/hw/amso1100/c2_wr.h
+++ b/drivers/infiniband/hw/amso1100/c2_wr.h
@@ -180,8 +180,8 @@ enum c2_wr_type {
180}; 180};
181 181
182struct c2_netaddr { 182struct c2_netaddr {
183 u32 ip_addr; 183 __be32 ip_addr;
184 u32 netmask; 184 __be32 netmask;
185 u32 mtu; 185 u32 mtu;
186}; 186};
187 187
@@ -199,9 +199,9 @@ struct c2_route {
199 * A Scatter Gather Entry. 199 * A Scatter Gather Entry.
200 */ 200 */
201struct c2_data_addr { 201struct c2_data_addr {
202 u32 stag; 202 __be32 stag;
203 u32 length; 203 __be32 length;
204 u64 to; 204 __be64 to;
205}; 205};
206 206
207/* 207/*
@@ -274,7 +274,7 @@ struct c2wr_hdr {
274 * from the host to adapter by libccil, but we copy it anyway 274 * from the host to adapter by libccil, but we copy it anyway
275 * to make the memcpy to the adapter better aligned. 275 * to make the memcpy to the adapter better aligned.
276 */ 276 */
277 u32 wqe_count; 277 __be32 wqe_count;
278 278
279 /* Put these fields next so that later 32- and 64-bit 279 /* Put these fields next so that later 32- and 64-bit
280 * quantities are naturally aligned. 280 * quantities are naturally aligned.
@@ -316,8 +316,8 @@ enum c2_rnic_flags {
316struct c2wr_rnic_open_req { 316struct c2wr_rnic_open_req {
317 struct c2wr_hdr hdr; 317 struct c2wr_hdr hdr;
318 u64 user_context; 318 u64 user_context;
319 u16 flags; /* See enum c2_rnic_flags */ 319 __be16 flags; /* See enum c2_rnic_flags */
320 u16 port_num; 320 __be16 port_num;
321} __attribute__((packed)); 321} __attribute__((packed));
322 322
323struct c2wr_rnic_open_rep { 323struct c2wr_rnic_open_rep {
@@ -341,30 +341,30 @@ struct c2wr_rnic_query_req {
341struct c2wr_rnic_query_rep { 341struct c2wr_rnic_query_rep {
342 struct c2wr_hdr hdr; 342 struct c2wr_hdr hdr;
343 u64 user_context; 343 u64 user_context;
344 u32 vendor_id; 344 __be32 vendor_id;
345 u32 part_number; 345 __be32 part_number;
346 u32 hw_version; 346 __be32 hw_version;
347 u32 fw_ver_major; 347 __be32 fw_ver_major;
348 u32 fw_ver_minor; 348 __be32 fw_ver_minor;
349 u32 fw_ver_patch; 349 __be32 fw_ver_patch;
350 char fw_ver_build_str[WR_BUILD_STR_LEN]; 350 char fw_ver_build_str[WR_BUILD_STR_LEN];
351 u32 max_qps; 351 __be32 max_qps;
352 u32 max_qp_depth; 352 __be32 max_qp_depth;
353 u32 max_srq_depth; 353 u32 max_srq_depth;
354 u32 max_send_sgl_depth; 354 u32 max_send_sgl_depth;
355 u32 max_rdma_sgl_depth; 355 u32 max_rdma_sgl_depth;
356 u32 max_cqs; 356 __be32 max_cqs;
357 u32 max_cq_depth; 357 __be32 max_cq_depth;
358 u32 max_cq_event_handlers; 358 u32 max_cq_event_handlers;
359 u32 max_mrs; 359 __be32 max_mrs;
360 u32 max_pbl_depth; 360 u32 max_pbl_depth;
361 u32 max_pds; 361 __be32 max_pds;
362 u32 max_global_ird; 362 __be32 max_global_ird;
363 u32 max_global_ord; 363 u32 max_global_ord;
364 u32 max_qp_ird; 364 __be32 max_qp_ird;
365 u32 max_qp_ord; 365 __be32 max_qp_ord;
366 u32 flags; 366 u32 flags;
367 u32 max_mws; 367 __be32 max_mws;
368 u32 pbe_range_low; 368 u32 pbe_range_low;
369 u32 pbe_range_high; 369 u32 pbe_range_high;
370 u32 max_srqs; 370 u32 max_srqs;
@@ -405,7 +405,7 @@ union c2wr_rnic_getconfig {
405struct c2wr_rnic_setconfig_req { 405struct c2wr_rnic_setconfig_req {
406 struct c2wr_hdr hdr; 406 struct c2wr_hdr hdr;
407 u32 rnic_handle; 407 u32 rnic_handle;
408 u32 option; /* See c2_setconfig_cmd_t */ 408 __be32 option; /* See c2_setconfig_cmd_t */
409 /* variable data and pad. See c2_netaddr and c2_route */ 409 /* variable data and pad. See c2_netaddr and c2_route */
410 u8 data[0]; 410 u8 data[0];
411} __attribute__((packed)) ; 411} __attribute__((packed)) ;
@@ -441,18 +441,18 @@ union c2wr_rnic_close {
441 */ 441 */
442struct c2wr_cq_create_req { 442struct c2wr_cq_create_req {
443 struct c2wr_hdr hdr; 443 struct c2wr_hdr hdr;
444 u64 shared_ht; 444 __be64 shared_ht;
445 u64 user_context; 445 u64 user_context;
446 u64 msg_pool; 446 __be64 msg_pool;
447 u32 rnic_handle; 447 u32 rnic_handle;
448 u32 msg_size; 448 __be32 msg_size;
449 u32 depth; 449 __be32 depth;
450} __attribute__((packed)) ; 450} __attribute__((packed)) ;
451 451
452struct c2wr_cq_create_rep { 452struct c2wr_cq_create_rep {
453 struct c2wr_hdr hdr; 453 struct c2wr_hdr hdr;
454 u32 mq_index; 454 __be32 mq_index;
455 u32 adapter_shared; 455 __be32 adapter_shared;
456 u32 cq_handle; 456 u32 cq_handle;
457} __attribute__((packed)) ; 457} __attribute__((packed)) ;
458 458
@@ -585,40 +585,40 @@ enum c2wr_qp_flags {
585 585
586struct c2wr_qp_create_req { 586struct c2wr_qp_create_req {
587 struct c2wr_hdr hdr; 587 struct c2wr_hdr hdr;
588 u64 shared_sq_ht; 588 __be64 shared_sq_ht;
589 u64 shared_rq_ht; 589 __be64 shared_rq_ht;
590 u64 user_context; 590 u64 user_context;
591 u32 rnic_handle; 591 u32 rnic_handle;
592 u32 sq_cq_handle; 592 u32 sq_cq_handle;
593 u32 rq_cq_handle; 593 u32 rq_cq_handle;
594 u32 sq_depth; 594 __be32 sq_depth;
595 u32 rq_depth; 595 __be32 rq_depth;
596 u32 srq_handle; 596 u32 srq_handle;
597 u32 srq_limit; 597 u32 srq_limit;
598 u32 flags; /* see enum c2wr_qp_flags */ 598 __be32 flags; /* see enum c2wr_qp_flags */
599 u32 send_sgl_depth; 599 __be32 send_sgl_depth;
600 u32 recv_sgl_depth; 600 __be32 recv_sgl_depth;
601 u32 rdma_write_sgl_depth; 601 __be32 rdma_write_sgl_depth;
602 u32 ord; 602 __be32 ord;
603 u32 ird; 603 __be32 ird;
604 u32 pd_id; 604 u32 pd_id;
605} __attribute__((packed)) ; 605} __attribute__((packed)) ;
606 606
607struct c2wr_qp_create_rep { 607struct c2wr_qp_create_rep {
608 struct c2wr_hdr hdr; 608 struct c2wr_hdr hdr;
609 u32 sq_depth; 609 __be32 sq_depth;
610 u32 rq_depth; 610 __be32 rq_depth;
611 u32 send_sgl_depth; 611 u32 send_sgl_depth;
612 u32 recv_sgl_depth; 612 u32 recv_sgl_depth;
613 u32 rdma_write_sgl_depth; 613 u32 rdma_write_sgl_depth;
614 u32 ord; 614 u32 ord;
615 u32 ird; 615 u32 ird;
616 u32 sq_msg_size; 616 __be32 sq_msg_size;
617 u32 sq_mq_index; 617 __be32 sq_mq_index;
618 u32 sq_mq_start; 618 __be32 sq_mq_start;
619 u32 rq_msg_size; 619 __be32 rq_msg_size;
620 u32 rq_mq_index; 620 __be32 rq_mq_index;
621 u32 rq_mq_start; 621 __be32 rq_mq_start;
622 u32 qp_handle; 622 u32 qp_handle;
623} __attribute__((packed)) ; 623} __attribute__((packed)) ;
624 624
@@ -667,11 +667,11 @@ struct c2wr_qp_modify_req {
667 u32 stream_msg_length; 667 u32 stream_msg_length;
668 u32 rnic_handle; 668 u32 rnic_handle;
669 u32 qp_handle; 669 u32 qp_handle;
670 u32 next_qp_state; 670 __be32 next_qp_state;
671 u32 ord; 671 __be32 ord;
672 u32 ird; 672 __be32 ird;
673 u32 sq_depth; 673 __be32 sq_depth;
674 u32 rq_depth; 674 __be32 rq_depth;
675 u32 llp_ep_handle; 675 u32 llp_ep_handle;
676} __attribute__((packed)) ; 676} __attribute__((packed)) ;
677 677
@@ -721,10 +721,10 @@ struct c2wr_qp_connect_req {
721 struct c2wr_hdr hdr; 721 struct c2wr_hdr hdr;
722 u32 rnic_handle; 722 u32 rnic_handle;
723 u32 qp_handle; 723 u32 qp_handle;
724 u32 remote_addr; 724 __be32 remote_addr;
725 u16 remote_port; 725 __be16 remote_port;
726 u16 pad; 726 u16 pad;
727 u32 private_data_length; 727 __be32 private_data_length;
728 u8 private_data[0]; /* Private data in-line. */ 728 u8 private_data[0]; /* Private data in-line. */
729} __attribute__((packed)) ; 729} __attribute__((packed)) ;
730 730
@@ -759,25 +759,25 @@ union c2wr_nsmr_stag_alloc {
759 759
760struct c2wr_nsmr_register_req { 760struct c2wr_nsmr_register_req {
761 struct c2wr_hdr hdr; 761 struct c2wr_hdr hdr;
762 u64 va; 762 __be64 va;
763 u32 rnic_handle; 763 u32 rnic_handle;
764 u16 flags; 764 __be16 flags;
765 u8 stag_key; 765 u8 stag_key;
766 u8 pad; 766 u8 pad;
767 u32 pd_id; 767 u32 pd_id;
768 u32 pbl_depth; 768 __be32 pbl_depth;
769 u32 pbe_size; 769 __be32 pbe_size;
770 u32 fbo; 770 __be32 fbo;
771 u32 length; 771 __be32 length;
772 u32 addrs_length; 772 __be32 addrs_length;
773 /* array of paddrs (must be aligned on a 64bit boundary) */ 773 /* array of paddrs (must be aligned on a 64bit boundary) */
774 u64 paddrs[0]; 774 __be64 paddrs[0];
775} __attribute__((packed)) ; 775} __attribute__((packed)) ;
776 776
777struct c2wr_nsmr_register_rep { 777struct c2wr_nsmr_register_rep {
778 struct c2wr_hdr hdr; 778 struct c2wr_hdr hdr;
779 u32 pbl_depth; 779 u32 pbl_depth;
780 u32 stag_index; 780 __be32 stag_index;
781} __attribute__((packed)) ; 781} __attribute__((packed)) ;
782 782
783union c2wr_nsmr_register { 783union c2wr_nsmr_register {
@@ -788,11 +788,11 @@ union c2wr_nsmr_register {
788struct c2wr_nsmr_pbl_req { 788struct c2wr_nsmr_pbl_req {
789 struct c2wr_hdr hdr; 789 struct c2wr_hdr hdr;
790 u32 rnic_handle; 790 u32 rnic_handle;
791 u32 flags; 791 __be32 flags;
792 u32 stag_index; 792 __be32 stag_index;
793 u32 addrs_length; 793 __be32 addrs_length;
794 /* array of paddrs (must be aligned on a 64bit boundary) */ 794 /* array of paddrs (must be aligned on a 64bit boundary) */
795 u64 paddrs[0]; 795 __be64 paddrs[0];
796} __attribute__((packed)) ; 796} __attribute__((packed)) ;
797 797
798struct c2wr_nsmr_pbl_rep { 798struct c2wr_nsmr_pbl_rep {
@@ -847,7 +847,7 @@ union c2wr_mw_query {
847struct c2wr_stag_dealloc_req { 847struct c2wr_stag_dealloc_req {
848 struct c2wr_hdr hdr; 848 struct c2wr_hdr hdr;
849 u32 rnic_handle; 849 u32 rnic_handle;
850 u32 stag_index; 850 __be32 stag_index;
851} __attribute__((packed)) ; 851} __attribute__((packed)) ;
852 852
853struct c2wr_stag_dealloc_rep { 853struct c2wr_stag_dealloc_rep {
@@ -949,7 +949,7 @@ struct c2wr_ce {
949 u64 qp_user_context; /* c2_user_qp_t * */ 949 u64 qp_user_context; /* c2_user_qp_t * */
950 u32 qp_state; /* Current QP State */ 950 u32 qp_state; /* Current QP State */
951 u32 handle; /* QPID or EP Handle */ 951 u32 handle; /* QPID or EP Handle */
952 u32 bytes_rcvd; /* valid for RECV WCs */ 952 __be32 bytes_rcvd; /* valid for RECV WCs */
953 u32 stag; 953 u32 stag;
954} __attribute__((packed)) ; 954} __attribute__((packed)) ;
955 955
@@ -984,8 +984,8 @@ struct c2_rq_hdr {
984 */ 984 */
985struct c2wr_send_req { 985struct c2wr_send_req {
986 struct c2_sq_hdr sq_hdr; 986 struct c2_sq_hdr sq_hdr;
987 u32 sge_len; 987 __be32 sge_len;
988 u32 remote_stag; 988 __be32 remote_stag;
989 u8 data[0]; /* SGE array */ 989 u8 data[0]; /* SGE array */
990} __attribute__((packed)); 990} __attribute__((packed));
991 991
@@ -996,9 +996,9 @@ union c2wr_send {
996 996
997struct c2wr_rdma_write_req { 997struct c2wr_rdma_write_req {
998 struct c2_sq_hdr sq_hdr; 998 struct c2_sq_hdr sq_hdr;
999 u64 remote_to; 999 __be64 remote_to;
1000 u32 remote_stag; 1000 __be32 remote_stag;
1001 u32 sge_len; 1001 __be32 sge_len;
1002 u8 data[0]; /* SGE array */ 1002 u8 data[0]; /* SGE array */
1003} __attribute__((packed)); 1003} __attribute__((packed));
1004 1004
@@ -1009,11 +1009,11 @@ union c2wr_rdma_write {
1009 1009
1010struct c2wr_rdma_read_req { 1010struct c2wr_rdma_read_req {
1011 struct c2_sq_hdr sq_hdr; 1011 struct c2_sq_hdr sq_hdr;
1012 u64 local_to; 1012 __be64 local_to;
1013 u64 remote_to; 1013 __be64 remote_to;
1014 u32 local_stag; 1014 __be32 local_stag;
1015 u32 remote_stag; 1015 __be32 remote_stag;
1016 u32 length; 1016 __be32 length;
1017} __attribute__((packed)); 1017} __attribute__((packed));
1018 1018
1019union c2wr_rdma_read { 1019union c2wr_rdma_read {
@@ -1113,9 +1113,9 @@ union c2wr_recv {
1113struct c2wr_ae_hdr { 1113struct c2wr_ae_hdr {
1114 struct c2wr_hdr hdr; 1114 struct c2wr_hdr hdr;
1115 u64 user_context; /* user context for this res. */ 1115 u64 user_context; /* user context for this res. */
1116 u32 resource_type; /* see enum c2_resource_indicator */ 1116 __be32 resource_type; /* see enum c2_resource_indicator */
1117 u32 resource; /* handle for resource */ 1117 __be32 resource; /* handle for resource */
1118 u32 qp_state; /* current QP State */ 1118 __be32 qp_state; /* current QP State */
1119} __attribute__((packed)); 1119} __attribute__((packed));
1120 1120
1121/* 1121/*
@@ -1124,11 +1124,11 @@ struct c2wr_ae_hdr {
1124 */ 1124 */
1125struct c2wr_ae_active_connect_results { 1125struct c2wr_ae_active_connect_results {
1126 struct c2wr_ae_hdr ae_hdr; 1126 struct c2wr_ae_hdr ae_hdr;
1127 u32 laddr; 1127 __be32 laddr;
1128 u32 raddr; 1128 __be32 raddr;
1129 u16 lport; 1129 __be16 lport;
1130 u16 rport; 1130 __be16 rport;
1131 u32 private_data_length; 1131 __be32 private_data_length;
1132 u8 private_data[0]; /* data is in-line in the msg. */ 1132 u8 private_data[0]; /* data is in-line in the msg. */
1133} __attribute__((packed)); 1133} __attribute__((packed));
1134 1134
@@ -1142,11 +1142,11 @@ struct c2wr_ae_active_connect_results {
1142struct c2wr_ae_connection_request { 1142struct c2wr_ae_connection_request {
1143 struct c2wr_ae_hdr ae_hdr; 1143 struct c2wr_ae_hdr ae_hdr;
1144 u32 cr_handle; /* connreq handle (sock ptr) */ 1144 u32 cr_handle; /* connreq handle (sock ptr) */
1145 u32 laddr; 1145 __be32 laddr;
1146 u32 raddr; 1146 __be32 raddr;
1147 u16 lport; 1147 __be16 lport;
1148 u16 rport; 1148 __be16 rport;
1149 u32 private_data_length; 1149 __be32 private_data_length;
1150 u8 private_data[0]; /* data is in-line in the msg. */ 1150 u8 private_data[0]; /* data is in-line in the msg. */
1151} __attribute__((packed)); 1151} __attribute__((packed));
1152 1152
@@ -1158,12 +1158,12 @@ union c2wr_ae {
1158 1158
1159struct c2wr_init_req { 1159struct c2wr_init_req {
1160 struct c2wr_hdr hdr; 1160 struct c2wr_hdr hdr;
1161 u64 hint_count; 1161 __be64 hint_count;
1162 u64 q0_host_shared; 1162 __be64 q0_host_shared;
1163 u64 q1_host_shared; 1163 __be64 q1_host_shared;
1164 u64 q1_host_msg_pool; 1164 __be64 q1_host_msg_pool;
1165 u64 q2_host_shared; 1165 __be64 q2_host_shared;
1166 u64 q2_host_msg_pool; 1166 __be64 q2_host_msg_pool;
1167} __attribute__((packed)); 1167} __attribute__((packed));
1168 1168
1169struct c2wr_init_rep { 1169struct c2wr_init_rep {
@@ -1276,10 +1276,10 @@ struct c2wr_ep_listen_create_req {
1276 struct c2wr_hdr hdr; 1276 struct c2wr_hdr hdr;
1277 u64 user_context; /* returned in AEs. */ 1277 u64 user_context; /* returned in AEs. */
1278 u32 rnic_handle; 1278 u32 rnic_handle;
1279 u32 local_addr; /* local addr, or 0 */ 1279 __be32 local_addr; /* local addr, or 0 */
1280 u16 local_port; /* 0 means "pick one" */ 1280 __be16 local_port; /* 0 means "pick one" */
1281 u16 pad; 1281 u16 pad;
1282 u32 backlog; /* tradional tcp listen bl */ 1282 __be32 backlog; /* tradional tcp listen bl */
1283} __attribute__((packed)); 1283} __attribute__((packed));
1284 1284
1285struct c2wr_ep_listen_create_rep { 1285struct c2wr_ep_listen_create_rep {
@@ -1340,7 +1340,7 @@ struct c2wr_cr_accept_req {
1340 u32 rnic_handle; 1340 u32 rnic_handle;
1341 u32 qp_handle; /* QP to bind to this LLP conn */ 1341 u32 qp_handle; /* QP to bind to this LLP conn */
1342 u32 ep_handle; /* LLP handle to accept */ 1342 u32 ep_handle; /* LLP handle to accept */
1343 u32 private_data_length; 1343 __be32 private_data_length;
1344 u8 private_data[0]; /* data in-line in msg. */ 1344 u8 private_data[0]; /* data in-line in msg. */
1345} __attribute__((packed)); 1345} __attribute__((packed));
1346 1346
@@ -1508,7 +1508,7 @@ static __inline__ void c2_wr_set_sge_count(void *wr, u8 sge_count)
1508{ 1508{
1509 ((struct c2wr_hdr *) wr)->sge_count = sge_count; 1509 ((struct c2wr_hdr *) wr)->sge_count = sge_count;
1510} 1510}
1511static __inline__ u32 c2_wr_get_wqe_count(void *wr) 1511static __inline__ __be32 c2_wr_get_wqe_count(void *wr)
1512{ 1512{
1513 return ((struct c2wr_hdr *) wr)->wqe_count; 1513 return ((struct c2wr_hdr *) wr)->wqe_count;
1514} 1514}
diff --git a/drivers/infiniband/hw/cxgb3/cxio_dbg.c b/drivers/infiniband/hw/cxgb3/cxio_dbg.c
index 75f7b16a271d..a8d24d53f307 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_dbg.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_dbg.c
@@ -45,16 +45,16 @@ void cxio_dump_tpt(struct cxio_rdev *rdev, u32 stag)
45 45
46 m = kmalloc(sizeof(*m) + size, GFP_ATOMIC); 46 m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
47 if (!m) { 47 if (!m) {
48 PDBG("%s couldn't allocate memory.\n", __FUNCTION__); 48 PDBG("%s couldn't allocate memory.\n", __func__);
49 return; 49 return;
50 } 50 }
51 m->mem_id = MEM_PMRX; 51 m->mem_id = MEM_PMRX;
52 m->addr = (stag>>8) * 32 + rdev->rnic_info.tpt_base; 52 m->addr = (stag>>8) * 32 + rdev->rnic_info.tpt_base;
53 m->len = size; 53 m->len = size;
54 PDBG("%s TPT addr 0x%x len %d\n", __FUNCTION__, m->addr, m->len); 54 PDBG("%s TPT addr 0x%x len %d\n", __func__, m->addr, m->len);
55 rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m); 55 rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m);
56 if (rc) { 56 if (rc) {
57 PDBG("%s toectl returned error %d\n", __FUNCTION__, rc); 57 PDBG("%s toectl returned error %d\n", __func__, rc);
58 kfree(m); 58 kfree(m);
59 return; 59 return;
60 } 60 }
@@ -82,17 +82,17 @@ void cxio_dump_pbl(struct cxio_rdev *rdev, u32 pbl_addr, uint len, u8 shift)
82 82
83 m = kmalloc(sizeof(*m) + size, GFP_ATOMIC); 83 m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
84 if (!m) { 84 if (!m) {
85 PDBG("%s couldn't allocate memory.\n", __FUNCTION__); 85 PDBG("%s couldn't allocate memory.\n", __func__);
86 return; 86 return;
87 } 87 }
88 m->mem_id = MEM_PMRX; 88 m->mem_id = MEM_PMRX;
89 m->addr = pbl_addr; 89 m->addr = pbl_addr;
90 m->len = size; 90 m->len = size;
91 PDBG("%s PBL addr 0x%x len %d depth %d\n", 91 PDBG("%s PBL addr 0x%x len %d depth %d\n",
92 __FUNCTION__, m->addr, m->len, npages); 92 __func__, m->addr, m->len, npages);
93 rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m); 93 rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m);
94 if (rc) { 94 if (rc) {
95 PDBG("%s toectl returned error %d\n", __FUNCTION__, rc); 95 PDBG("%s toectl returned error %d\n", __func__, rc);
96 kfree(m); 96 kfree(m);
97 return; 97 return;
98 } 98 }
@@ -144,16 +144,16 @@ void cxio_dump_rqt(struct cxio_rdev *rdev, u32 hwtid, int nents)
144 144
145 m = kmalloc(sizeof(*m) + size, GFP_ATOMIC); 145 m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
146 if (!m) { 146 if (!m) {
147 PDBG("%s couldn't allocate memory.\n", __FUNCTION__); 147 PDBG("%s couldn't allocate memory.\n", __func__);
148 return; 148 return;
149 } 149 }
150 m->mem_id = MEM_PMRX; 150 m->mem_id = MEM_PMRX;
151 m->addr = ((hwtid)<<10) + rdev->rnic_info.rqt_base; 151 m->addr = ((hwtid)<<10) + rdev->rnic_info.rqt_base;
152 m->len = size; 152 m->len = size;
153 PDBG("%s RQT addr 0x%x len %d\n", __FUNCTION__, m->addr, m->len); 153 PDBG("%s RQT addr 0x%x len %d\n", __func__, m->addr, m->len);
154 rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m); 154 rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m);
155 if (rc) { 155 if (rc) {
156 PDBG("%s toectl returned error %d\n", __FUNCTION__, rc); 156 PDBG("%s toectl returned error %d\n", __func__, rc);
157 kfree(m); 157 kfree(m);
158 return; 158 return;
159 } 159 }
@@ -177,16 +177,16 @@ void cxio_dump_tcb(struct cxio_rdev *rdev, u32 hwtid)
177 177
178 m = kmalloc(sizeof(*m) + size, GFP_ATOMIC); 178 m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
179 if (!m) { 179 if (!m) {
180 PDBG("%s couldn't allocate memory.\n", __FUNCTION__); 180 PDBG("%s couldn't allocate memory.\n", __func__);
181 return; 181 return;
182 } 182 }
183 m->mem_id = MEM_CM; 183 m->mem_id = MEM_CM;
184 m->addr = hwtid * size; 184 m->addr = hwtid * size;
185 m->len = size; 185 m->len = size;
186 PDBG("%s TCB %d len %d\n", __FUNCTION__, m->addr, m->len); 186 PDBG("%s TCB %d len %d\n", __func__, m->addr, m->len);
187 rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m); 187 rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m);
188 if (rc) { 188 if (rc) {
189 PDBG("%s toectl returned error %d\n", __FUNCTION__, rc); 189 PDBG("%s toectl returned error %d\n", __func__, rc);
190 kfree(m); 190 kfree(m);
191 return; 191 return;
192 } 192 }
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 03c5ff62889a..66eb7030aea8 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -140,7 +140,7 @@ static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid)
140 struct t3_modify_qp_wr *wqe; 140 struct t3_modify_qp_wr *wqe;
141 struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_KERNEL); 141 struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
142 if (!skb) { 142 if (!skb) {
143 PDBG("%s alloc_skb failed\n", __FUNCTION__); 143 PDBG("%s alloc_skb failed\n", __func__);
144 return -ENOMEM; 144 return -ENOMEM;
145 } 145 }
146 wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe)); 146 wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe));
@@ -225,7 +225,7 @@ static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
225 } 225 }
226out: 226out:
227 mutex_unlock(&uctx->lock); 227 mutex_unlock(&uctx->lock);
228 PDBG("%s qpid 0x%x\n", __FUNCTION__, qpid); 228 PDBG("%s qpid 0x%x\n", __func__, qpid);
229 return qpid; 229 return qpid;
230} 230}
231 231
@@ -237,7 +237,7 @@ static void put_qpid(struct cxio_rdev *rdev_p, u32 qpid,
237 entry = kmalloc(sizeof *entry, GFP_KERNEL); 237 entry = kmalloc(sizeof *entry, GFP_KERNEL);
238 if (!entry) 238 if (!entry)
239 return; 239 return;
240 PDBG("%s qpid 0x%x\n", __FUNCTION__, qpid); 240 PDBG("%s qpid 0x%x\n", __func__, qpid);
241 entry->qpid = qpid; 241 entry->qpid = qpid;
242 mutex_lock(&uctx->lock); 242 mutex_lock(&uctx->lock);
243 list_add_tail(&entry->entry, &uctx->qpids); 243 list_add_tail(&entry->entry, &uctx->qpids);
@@ -300,7 +300,7 @@ int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain,
300 if (!kernel_domain) 300 if (!kernel_domain)
301 wq->udb = (u64)rdev_p->rnic_info.udbell_physbase + 301 wq->udb = (u64)rdev_p->rnic_info.udbell_physbase +
302 (wq->qpid << rdev_p->qpshift); 302 (wq->qpid << rdev_p->qpshift);
303 PDBG("%s qpid 0x%x doorbell 0x%p udb 0x%llx\n", __FUNCTION__, 303 PDBG("%s qpid 0x%x doorbell 0x%p udb 0x%llx\n", __func__,
304 wq->qpid, wq->doorbell, (unsigned long long) wq->udb); 304 wq->qpid, wq->doorbell, (unsigned long long) wq->udb);
305 return 0; 305 return 0;
306err4: 306err4:
@@ -345,7 +345,7 @@ static void insert_recv_cqe(struct t3_wq *wq, struct t3_cq *cq)
345{ 345{
346 struct t3_cqe cqe; 346 struct t3_cqe cqe;
347 347
348 PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __FUNCTION__, 348 PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__,
349 wq, cq, cq->sw_rptr, cq->sw_wptr); 349 wq, cq, cq->sw_rptr, cq->sw_wptr);
350 memset(&cqe, 0, sizeof(cqe)); 350 memset(&cqe, 0, sizeof(cqe));
351 cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) | 351 cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |
@@ -363,10 +363,10 @@ void cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count)
363{ 363{
364 u32 ptr; 364 u32 ptr;
365 365
366 PDBG("%s wq %p cq %p\n", __FUNCTION__, wq, cq); 366 PDBG("%s wq %p cq %p\n", __func__, wq, cq);
367 367
368 /* flush RQ */ 368 /* flush RQ */
369 PDBG("%s rq_rptr %u rq_wptr %u skip count %u\n", __FUNCTION__, 369 PDBG("%s rq_rptr %u rq_wptr %u skip count %u\n", __func__,
370 wq->rq_rptr, wq->rq_wptr, count); 370 wq->rq_rptr, wq->rq_wptr, count);
371 ptr = wq->rq_rptr + count; 371 ptr = wq->rq_rptr + count;
372 while (ptr++ != wq->rq_wptr) 372 while (ptr++ != wq->rq_wptr)
@@ -378,7 +378,7 @@ static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq,
378{ 378{
379 struct t3_cqe cqe; 379 struct t3_cqe cqe;
380 380
381 PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __FUNCTION__, 381 PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__,
382 wq, cq, cq->sw_rptr, cq->sw_wptr); 382 wq, cq, cq->sw_rptr, cq->sw_wptr);
383 memset(&cqe, 0, sizeof(cqe)); 383 memset(&cqe, 0, sizeof(cqe));
384 cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) | 384 cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |
@@ -415,11 +415,11 @@ void cxio_flush_hw_cq(struct t3_cq *cq)
415{ 415{
416 struct t3_cqe *cqe, *swcqe; 416 struct t3_cqe *cqe, *swcqe;
417 417
418 PDBG("%s cq %p cqid 0x%x\n", __FUNCTION__, cq, cq->cqid); 418 PDBG("%s cq %p cqid 0x%x\n", __func__, cq, cq->cqid);
419 cqe = cxio_next_hw_cqe(cq); 419 cqe = cxio_next_hw_cqe(cq);
420 while (cqe) { 420 while (cqe) {
421 PDBG("%s flushing hwcq rptr 0x%x to swcq wptr 0x%x\n", 421 PDBG("%s flushing hwcq rptr 0x%x to swcq wptr 0x%x\n",
422 __FUNCTION__, cq->rptr, cq->sw_wptr); 422 __func__, cq->rptr, cq->sw_wptr);
423 swcqe = cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2); 423 swcqe = cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2);
424 *swcqe = *cqe; 424 *swcqe = *cqe;
425 swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1)); 425 swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
@@ -461,7 +461,7 @@ void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
461 (*count)++; 461 (*count)++;
462 ptr++; 462 ptr++;
463 } 463 }
464 PDBG("%s cq %p count %d\n", __FUNCTION__, cq, *count); 464 PDBG("%s cq %p count %d\n", __func__, cq, *count);
465} 465}
466 466
467void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count) 467void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
@@ -470,7 +470,7 @@ void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
470 u32 ptr; 470 u32 ptr;
471 471
472 *count = 0; 472 *count = 0;
473 PDBG("%s count zero %d\n", __FUNCTION__, *count); 473 PDBG("%s count zero %d\n", __func__, *count);
474 ptr = cq->sw_rptr; 474 ptr = cq->sw_rptr;
475 while (!Q_EMPTY(ptr, cq->sw_wptr)) { 475 while (!Q_EMPTY(ptr, cq->sw_wptr)) {
476 cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2)); 476 cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));
@@ -479,7 +479,7 @@ void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
479 (*count)++; 479 (*count)++;
480 ptr++; 480 ptr++;
481 } 481 }
482 PDBG("%s cq %p count %d\n", __FUNCTION__, cq, *count); 482 PDBG("%s cq %p count %d\n", __func__, cq, *count);
483} 483}
484 484
485static int cxio_hal_init_ctrl_cq(struct cxio_rdev *rdev_p) 485static int cxio_hal_init_ctrl_cq(struct cxio_rdev *rdev_p)
@@ -506,12 +506,12 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
506 506
507 skb = alloc_skb(sizeof(*wqe), GFP_KERNEL); 507 skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
508 if (!skb) { 508 if (!skb) {
509 PDBG("%s alloc_skb failed\n", __FUNCTION__); 509 PDBG("%s alloc_skb failed\n", __func__);
510 return -ENOMEM; 510 return -ENOMEM;
511 } 511 }
512 err = cxio_hal_init_ctrl_cq(rdev_p); 512 err = cxio_hal_init_ctrl_cq(rdev_p);
513 if (err) { 513 if (err) {
514 PDBG("%s err %d initializing ctrl_cq\n", __FUNCTION__, err); 514 PDBG("%s err %d initializing ctrl_cq\n", __func__, err);
515 goto err; 515 goto err;
516 } 516 }
517 rdev_p->ctrl_qp.workq = dma_alloc_coherent( 517 rdev_p->ctrl_qp.workq = dma_alloc_coherent(
@@ -521,7 +521,7 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
521 &(rdev_p->ctrl_qp.dma_addr), 521 &(rdev_p->ctrl_qp.dma_addr),
522 GFP_KERNEL); 522 GFP_KERNEL);
523 if (!rdev_p->ctrl_qp.workq) { 523 if (!rdev_p->ctrl_qp.workq) {
524 PDBG("%s dma_alloc_coherent failed\n", __FUNCTION__); 524 PDBG("%s dma_alloc_coherent failed\n", __func__);
525 err = -ENOMEM; 525 err = -ENOMEM;
526 goto err; 526 goto err;
527 } 527 }
@@ -591,25 +591,25 @@ static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
591 addr &= 0x7FFFFFF; 591 addr &= 0x7FFFFFF;
592 nr_wqe = len % 96 ? len / 96 + 1 : len / 96; /* 96B max per WQE */ 592 nr_wqe = len % 96 ? len / 96 + 1 : len / 96; /* 96B max per WQE */
593 PDBG("%s wptr 0x%x rptr 0x%x len %d, nr_wqe %d data %p addr 0x%0x\n", 593 PDBG("%s wptr 0x%x rptr 0x%x len %d, nr_wqe %d data %p addr 0x%0x\n",
594 __FUNCTION__, rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, len, 594 __func__, rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, len,
595 nr_wqe, data, addr); 595 nr_wqe, data, addr);
596 utx_len = 3; /* in 32B unit */ 596 utx_len = 3; /* in 32B unit */
597 for (i = 0; i < nr_wqe; i++) { 597 for (i = 0; i < nr_wqe; i++) {
598 if (Q_FULL(rdev_p->ctrl_qp.rptr, rdev_p->ctrl_qp.wptr, 598 if (Q_FULL(rdev_p->ctrl_qp.rptr, rdev_p->ctrl_qp.wptr,
599 T3_CTRL_QP_SIZE_LOG2)) { 599 T3_CTRL_QP_SIZE_LOG2)) {
600 PDBG("%s ctrl_qp full wtpr 0x%0x rptr 0x%0x, " 600 PDBG("%s ctrl_qp full wtpr 0x%0x rptr 0x%0x, "
601 "wait for more space i %d\n", __FUNCTION__, 601 "wait for more space i %d\n", __func__,
602 rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, i); 602 rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, i);
603 if (wait_event_interruptible(rdev_p->ctrl_qp.waitq, 603 if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,
604 !Q_FULL(rdev_p->ctrl_qp.rptr, 604 !Q_FULL(rdev_p->ctrl_qp.rptr,
605 rdev_p->ctrl_qp.wptr, 605 rdev_p->ctrl_qp.wptr,
606 T3_CTRL_QP_SIZE_LOG2))) { 606 T3_CTRL_QP_SIZE_LOG2))) {
607 PDBG("%s ctrl_qp workq interrupted\n", 607 PDBG("%s ctrl_qp workq interrupted\n",
608 __FUNCTION__); 608 __func__);
609 return -ERESTARTSYS; 609 return -ERESTARTSYS;
610 } 610 }
611 PDBG("%s ctrl_qp wakeup, continue posting work request " 611 PDBG("%s ctrl_qp wakeup, continue posting work request "
612 "i %d\n", __FUNCTION__, i); 612 "i %d\n", __func__, i);
613 } 613 }
614 wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr % 614 wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr %
615 (1 << T3_CTRL_QP_SIZE_LOG2))); 615 (1 << T3_CTRL_QP_SIZE_LOG2)));
@@ -630,7 +630,7 @@ static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
630 if ((i != 0) && 630 if ((i != 0) &&
631 (i % (((1 << T3_CTRL_QP_SIZE_LOG2)) >> 1) == 0)) { 631 (i % (((1 << T3_CTRL_QP_SIZE_LOG2)) >> 1) == 0)) {
632 flag = T3_COMPLETION_FLAG; 632 flag = T3_COMPLETION_FLAG;
633 PDBG("%s force completion at i %d\n", __FUNCTION__, i); 633 PDBG("%s force completion at i %d\n", __func__, i);
634 } 634 }
635 635
636 /* build the utx mem command */ 636 /* build the utx mem command */
@@ -701,7 +701,7 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
701 *stag = (stag_idx << 8) | ((*stag) & 0xFF); 701 *stag = (stag_idx << 8) | ((*stag) & 0xFF);
702 } 702 }
703 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n", 703 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
704 __FUNCTION__, stag_state, type, pdid, stag_idx); 704 __func__, stag_state, type, pdid, stag_idx);
705 705
706 if (reset_tpt_entry) 706 if (reset_tpt_entry)
707 cxio_hal_pblpool_free(rdev_p, *pbl_addr, *pbl_size << 3); 707 cxio_hal_pblpool_free(rdev_p, *pbl_addr, *pbl_size << 3);
@@ -718,7 +718,7 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
718 if (pbl) { 718 if (pbl) {
719 719
720 PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n", 720 PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
721 __FUNCTION__, *pbl_addr, rdev_p->rnic_info.pbl_base, 721 __func__, *pbl_addr, rdev_p->rnic_info.pbl_base,
722 *pbl_size); 722 *pbl_size);
723 err = cxio_hal_ctrl_qp_write_mem(rdev_p, 723 err = cxio_hal_ctrl_qp_write_mem(rdev_p,
724 (*pbl_addr >> 5), 724 (*pbl_addr >> 5),
@@ -814,7 +814,7 @@ int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr)
814 struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_ATOMIC); 814 struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_ATOMIC);
815 if (!skb) 815 if (!skb)
816 return -ENOMEM; 816 return -ENOMEM;
817 PDBG("%s rdev_p %p\n", __FUNCTION__, rdev_p); 817 PDBG("%s rdev_p %p\n", __func__, rdev_p);
818 wqe = (struct t3_rdma_init_wr *) __skb_put(skb, sizeof(*wqe)); 818 wqe = (struct t3_rdma_init_wr *) __skb_put(skb, sizeof(*wqe));
819 wqe->wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_INIT)); 819 wqe->wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_INIT));
820 wqe->wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(attr->tid) | 820 wqe->wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(attr->tid) |
@@ -856,7 +856,7 @@ static int cxio_hal_ev_handler(struct t3cdev *t3cdev_p, struct sk_buff *skb)
856 struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) skb->data; 856 struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) skb->data;
857 PDBG("%d: %s cq_id 0x%x cq_ptr 0x%x genbit %0x overflow %0x an %0x" 857 PDBG("%d: %s cq_id 0x%x cq_ptr 0x%x genbit %0x overflow %0x an %0x"
858 " se %0x notify %0x cqbranch %0x creditth %0x\n", 858 " se %0x notify %0x cqbranch %0x creditth %0x\n",
859 cnt, __FUNCTION__, RSPQ_CQID(rsp_msg), RSPQ_CQPTR(rsp_msg), 859 cnt, __func__, RSPQ_CQID(rsp_msg), RSPQ_CQPTR(rsp_msg),
860 RSPQ_GENBIT(rsp_msg), RSPQ_OVERFLOW(rsp_msg), RSPQ_AN(rsp_msg), 860 RSPQ_GENBIT(rsp_msg), RSPQ_OVERFLOW(rsp_msg), RSPQ_AN(rsp_msg),
861 RSPQ_SE(rsp_msg), RSPQ_NOTIFY(rsp_msg), RSPQ_CQBRANCH(rsp_msg), 861 RSPQ_SE(rsp_msg), RSPQ_NOTIFY(rsp_msg), RSPQ_CQBRANCH(rsp_msg),
862 RSPQ_CREDIT_THRESH(rsp_msg)); 862 RSPQ_CREDIT_THRESH(rsp_msg));
@@ -868,7 +868,7 @@ static int cxio_hal_ev_handler(struct t3cdev *t3cdev_p, struct sk_buff *skb)
868 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe)); 868 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
869 rdev_p = (struct cxio_rdev *)t3cdev_p->ulp; 869 rdev_p = (struct cxio_rdev *)t3cdev_p->ulp;
870 if (!rdev_p) { 870 if (!rdev_p) {
871 PDBG("%s called by t3cdev %p with null ulp\n", __FUNCTION__, 871 PDBG("%s called by t3cdev %p with null ulp\n", __func__,
872 t3cdev_p); 872 t3cdev_p);
873 return 0; 873 return 0;
874 } 874 }
@@ -908,13 +908,13 @@ int cxio_rdev_open(struct cxio_rdev *rdev_p)
908 strncpy(rdev_p->dev_name, rdev_p->t3cdev_p->name, 908 strncpy(rdev_p->dev_name, rdev_p->t3cdev_p->name,
909 T3_MAX_DEV_NAME_LEN); 909 T3_MAX_DEV_NAME_LEN);
910 } else { 910 } else {
911 PDBG("%s t3cdev_p or dev_name must be set\n", __FUNCTION__); 911 PDBG("%s t3cdev_p or dev_name must be set\n", __func__);
912 return -EINVAL; 912 return -EINVAL;
913 } 913 }
914 914
915 list_add_tail(&rdev_p->entry, &rdev_list); 915 list_add_tail(&rdev_p->entry, &rdev_list);
916 916
917 PDBG("%s opening rnic dev %s\n", __FUNCTION__, rdev_p->dev_name); 917 PDBG("%s opening rnic dev %s\n", __func__, rdev_p->dev_name);
918 memset(&rdev_p->ctrl_qp, 0, sizeof(rdev_p->ctrl_qp)); 918 memset(&rdev_p->ctrl_qp, 0, sizeof(rdev_p->ctrl_qp));
919 if (!rdev_p->t3cdev_p) 919 if (!rdev_p->t3cdev_p)
920 rdev_p->t3cdev_p = dev2t3cdev(netdev_p); 920 rdev_p->t3cdev_p = dev2t3cdev(netdev_p);
@@ -923,14 +923,14 @@ int cxio_rdev_open(struct cxio_rdev *rdev_p)
923 &(rdev_p->rnic_info)); 923 &(rdev_p->rnic_info));
924 if (err) { 924 if (err) {
925 printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n", 925 printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n",
926 __FUNCTION__, rdev_p->t3cdev_p, err); 926 __func__, rdev_p->t3cdev_p, err);
927 goto err1; 927 goto err1;
928 } 928 }
929 err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, GET_PORTS, 929 err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, GET_PORTS,
930 &(rdev_p->port_info)); 930 &(rdev_p->port_info));
931 if (err) { 931 if (err) {
932 printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n", 932 printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n",
933 __FUNCTION__, rdev_p->t3cdev_p, err); 933 __func__, rdev_p->t3cdev_p, err);
934 goto err1; 934 goto err1;
935 } 935 }
936 936
@@ -947,7 +947,7 @@ int cxio_rdev_open(struct cxio_rdev *rdev_p)
947 rdev_p->qpmask = (65536 >> ilog2(rdev_p->qpnr)) - 1; 947 rdev_p->qpmask = (65536 >> ilog2(rdev_p->qpnr)) - 1;
948 PDBG("%s rnic %s info: tpt_base 0x%0x tpt_top 0x%0x num stags %d " 948 PDBG("%s rnic %s info: tpt_base 0x%0x tpt_top 0x%0x num stags %d "
949 "pbl_base 0x%0x pbl_top 0x%0x rqt_base 0x%0x, rqt_top 0x%0x\n", 949 "pbl_base 0x%0x pbl_top 0x%0x rqt_base 0x%0x, rqt_top 0x%0x\n",
950 __FUNCTION__, rdev_p->dev_name, rdev_p->rnic_info.tpt_base, 950 __func__, rdev_p->dev_name, rdev_p->rnic_info.tpt_base,
951 rdev_p->rnic_info.tpt_top, cxio_num_stags(rdev_p), 951 rdev_p->rnic_info.tpt_top, cxio_num_stags(rdev_p),
952 rdev_p->rnic_info.pbl_base, 952 rdev_p->rnic_info.pbl_base,
953 rdev_p->rnic_info.pbl_top, rdev_p->rnic_info.rqt_base, 953 rdev_p->rnic_info.pbl_top, rdev_p->rnic_info.rqt_base,
@@ -961,7 +961,7 @@ int cxio_rdev_open(struct cxio_rdev *rdev_p)
961 err = cxio_hal_init_ctrl_qp(rdev_p); 961 err = cxio_hal_init_ctrl_qp(rdev_p);
962 if (err) { 962 if (err) {
963 printk(KERN_ERR "%s error %d initializing ctrl_qp.\n", 963 printk(KERN_ERR "%s error %d initializing ctrl_qp.\n",
964 __FUNCTION__, err); 964 __func__, err);
965 goto err1; 965 goto err1;
966 } 966 }
967 err = cxio_hal_init_resource(rdev_p, cxio_num_stags(rdev_p), 0, 967 err = cxio_hal_init_resource(rdev_p, cxio_num_stags(rdev_p), 0,
@@ -969,19 +969,19 @@ int cxio_rdev_open(struct cxio_rdev *rdev_p)
969 T3_MAX_NUM_PD); 969 T3_MAX_NUM_PD);
970 if (err) { 970 if (err) {
971 printk(KERN_ERR "%s error %d initializing hal resources.\n", 971 printk(KERN_ERR "%s error %d initializing hal resources.\n",
972 __FUNCTION__, err); 972 __func__, err);
973 goto err2; 973 goto err2;
974 } 974 }
975 err = cxio_hal_pblpool_create(rdev_p); 975 err = cxio_hal_pblpool_create(rdev_p);
976 if (err) { 976 if (err) {
977 printk(KERN_ERR "%s error %d initializing pbl mem pool.\n", 977 printk(KERN_ERR "%s error %d initializing pbl mem pool.\n",
978 __FUNCTION__, err); 978 __func__, err);
979 goto err3; 979 goto err3;
980 } 980 }
981 err = cxio_hal_rqtpool_create(rdev_p); 981 err = cxio_hal_rqtpool_create(rdev_p);
982 if (err) { 982 if (err) {
983 printk(KERN_ERR "%s error %d initializing rqt mem pool.\n", 983 printk(KERN_ERR "%s error %d initializing rqt mem pool.\n",
984 __FUNCTION__, err); 984 __func__, err);
985 goto err4; 985 goto err4;
986 } 986 }
987 return 0; 987 return 0;
@@ -1043,7 +1043,7 @@ static void flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq)
1043 * Insert this completed cqe into the swcq. 1043 * Insert this completed cqe into the swcq.
1044 */ 1044 */
1045 PDBG("%s moving cqe into swcq sq idx %ld cq idx %ld\n", 1045 PDBG("%s moving cqe into swcq sq idx %ld cq idx %ld\n",
1046 __FUNCTION__, Q_PTR2IDX(ptr, wq->sq_size_log2), 1046 __func__, Q_PTR2IDX(ptr, wq->sq_size_log2),
1047 Q_PTR2IDX(cq->sw_wptr, cq->size_log2)); 1047 Q_PTR2IDX(cq->sw_wptr, cq->size_log2));
1048 sqp->cqe.header |= htonl(V_CQE_SWCQE(1)); 1048 sqp->cqe.header |= htonl(V_CQE_SWCQE(1));
1049 *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) 1049 *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2))
@@ -1112,7 +1112,7 @@ int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
1112 1112
1113 PDBG("%s CQE OOO %d qpid 0x%0x genbit %d type %d status 0x%0x" 1113 PDBG("%s CQE OOO %d qpid 0x%0x genbit %d type %d status 0x%0x"
1114 " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n", 1114 " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
1115 __FUNCTION__, CQE_OOO(*hw_cqe), CQE_QPID(*hw_cqe), 1115 __func__, CQE_OOO(*hw_cqe), CQE_QPID(*hw_cqe),
1116 CQE_GENBIT(*hw_cqe), CQE_TYPE(*hw_cqe), CQE_STATUS(*hw_cqe), 1116 CQE_GENBIT(*hw_cqe), CQE_TYPE(*hw_cqe), CQE_STATUS(*hw_cqe),
1117 CQE_OPCODE(*hw_cqe), CQE_LEN(*hw_cqe), CQE_WRID_HI(*hw_cqe), 1117 CQE_OPCODE(*hw_cqe), CQE_LEN(*hw_cqe), CQE_WRID_HI(*hw_cqe),
1118 CQE_WRID_LOW(*hw_cqe)); 1118 CQE_WRID_LOW(*hw_cqe));
@@ -1215,7 +1215,7 @@ int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
1215 struct t3_swsq *sqp; 1215 struct t3_swsq *sqp;
1216 1216
1217 PDBG("%s out of order completion going in swsq at idx %ld\n", 1217 PDBG("%s out of order completion going in swsq at idx %ld\n",
1218 __FUNCTION__, 1218 __func__,
1219 Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2)); 1219 Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2));
1220 sqp = wq->sq + 1220 sqp = wq->sq +
1221 Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2); 1221 Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2);
@@ -1234,13 +1234,13 @@ proc_cqe:
1234 */ 1234 */
1235 if (SQ_TYPE(*hw_cqe)) { 1235 if (SQ_TYPE(*hw_cqe)) {
1236 wq->sq_rptr = CQE_WRID_SQ_WPTR(*hw_cqe); 1236 wq->sq_rptr = CQE_WRID_SQ_WPTR(*hw_cqe);
1237 PDBG("%s completing sq idx %ld\n", __FUNCTION__, 1237 PDBG("%s completing sq idx %ld\n", __func__,
1238 Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2)); 1238 Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2));
1239 *cookie = (wq->sq + 1239 *cookie = (wq->sq +
1240 Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2))->wr_id; 1240 Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2))->wr_id;
1241 wq->sq_rptr++; 1241 wq->sq_rptr++;
1242 } else { 1242 } else {
1243 PDBG("%s completing rq idx %ld\n", __FUNCTION__, 1243 PDBG("%s completing rq idx %ld\n", __func__,
1244 Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)); 1244 Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2));
1245 *cookie = *(wq->rq + Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)); 1245 *cookie = *(wq->rq + Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2));
1246 wq->rq_rptr++; 1246 wq->rq_rptr++;
@@ -1255,11 +1255,11 @@ flush_wq:
1255skip_cqe: 1255skip_cqe:
1256 if (SW_CQE(*hw_cqe)) { 1256 if (SW_CQE(*hw_cqe)) {
1257 PDBG("%s cq %p cqid 0x%x skip sw cqe sw_rptr 0x%x\n", 1257 PDBG("%s cq %p cqid 0x%x skip sw cqe sw_rptr 0x%x\n",
1258 __FUNCTION__, cq, cq->cqid, cq->sw_rptr); 1258 __func__, cq, cq->cqid, cq->sw_rptr);
1259 ++cq->sw_rptr; 1259 ++cq->sw_rptr;
1260 } else { 1260 } else {
1261 PDBG("%s cq %p cqid 0x%x skip hw cqe rptr 0x%x\n", 1261 PDBG("%s cq %p cqid 0x%x skip hw cqe rptr 0x%x\n",
1262 __FUNCTION__, cq, cq->cqid, cq->rptr); 1262 __func__, cq, cq->cqid, cq->rptr);
1263 ++cq->rptr; 1263 ++cq->rptr;
1264 1264
1265 /* 1265 /*
diff --git a/drivers/infiniband/hw/cxgb3/cxio_resource.c b/drivers/infiniband/hw/cxgb3/cxio_resource.c
index d3095ae5bc2e..45ed4f25ef78 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_resource.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_resource.c
@@ -206,13 +206,13 @@ void cxio_hal_put_stag(struct cxio_hal_resource *rscp, u32 stag)
206u32 cxio_hal_get_qpid(struct cxio_hal_resource *rscp) 206u32 cxio_hal_get_qpid(struct cxio_hal_resource *rscp)
207{ 207{
208 u32 qpid = cxio_hal_get_resource(rscp->qpid_fifo); 208 u32 qpid = cxio_hal_get_resource(rscp->qpid_fifo);
209 PDBG("%s qpid 0x%x\n", __FUNCTION__, qpid); 209 PDBG("%s qpid 0x%x\n", __func__, qpid);
210 return qpid; 210 return qpid;
211} 211}
212 212
213void cxio_hal_put_qpid(struct cxio_hal_resource *rscp, u32 qpid) 213void cxio_hal_put_qpid(struct cxio_hal_resource *rscp, u32 qpid)
214{ 214{
215 PDBG("%s qpid 0x%x\n", __FUNCTION__, qpid); 215 PDBG("%s qpid 0x%x\n", __func__, qpid);
216 cxio_hal_put_resource(rscp->qpid_fifo, qpid); 216 cxio_hal_put_resource(rscp->qpid_fifo, qpid);
217} 217}
218 218
@@ -255,13 +255,13 @@ void cxio_hal_destroy_resource(struct cxio_hal_resource *rscp)
255u32 cxio_hal_pblpool_alloc(struct cxio_rdev *rdev_p, int size) 255u32 cxio_hal_pblpool_alloc(struct cxio_rdev *rdev_p, int size)
256{ 256{
257 unsigned long addr = gen_pool_alloc(rdev_p->pbl_pool, size); 257 unsigned long addr = gen_pool_alloc(rdev_p->pbl_pool, size);
258 PDBG("%s addr 0x%x size %d\n", __FUNCTION__, (u32)addr, size); 258 PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
259 return (u32)addr; 259 return (u32)addr;
260} 260}
261 261
262void cxio_hal_pblpool_free(struct cxio_rdev *rdev_p, u32 addr, int size) 262void cxio_hal_pblpool_free(struct cxio_rdev *rdev_p, u32 addr, int size)
263{ 263{
264 PDBG("%s addr 0x%x size %d\n", __FUNCTION__, addr, size); 264 PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
265 gen_pool_free(rdev_p->pbl_pool, (unsigned long)addr, size); 265 gen_pool_free(rdev_p->pbl_pool, (unsigned long)addr, size);
266} 266}
267 267
@@ -292,13 +292,13 @@ void cxio_hal_pblpool_destroy(struct cxio_rdev *rdev_p)
292u32 cxio_hal_rqtpool_alloc(struct cxio_rdev *rdev_p, int size) 292u32 cxio_hal_rqtpool_alloc(struct cxio_rdev *rdev_p, int size)
293{ 293{
294 unsigned long addr = gen_pool_alloc(rdev_p->rqt_pool, size << 6); 294 unsigned long addr = gen_pool_alloc(rdev_p->rqt_pool, size << 6);
295 PDBG("%s addr 0x%x size %d\n", __FUNCTION__, (u32)addr, size << 6); 295 PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
296 return (u32)addr; 296 return (u32)addr;
297} 297}
298 298
299void cxio_hal_rqtpool_free(struct cxio_rdev *rdev_p, u32 addr, int size) 299void cxio_hal_rqtpool_free(struct cxio_rdev *rdev_p, u32 addr, int size)
300{ 300{
301 PDBG("%s addr 0x%x size %d\n", __FUNCTION__, addr, size << 6); 301 PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6);
302 gen_pool_free(rdev_p->rqt_pool, (unsigned long)addr, size << 6); 302 gen_pool_free(rdev_p->rqt_pool, (unsigned long)addr, size << 6);
303} 303}
304 304
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
index 0315c9d9fce9..6ba4138c8ec3 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ b/drivers/infiniband/hw/cxgb3/iwch.c
@@ -65,7 +65,7 @@ static DEFINE_MUTEX(dev_mutex);
65 65
66static void rnic_init(struct iwch_dev *rnicp) 66static void rnic_init(struct iwch_dev *rnicp)
67{ 67{
68 PDBG("%s iwch_dev %p\n", __FUNCTION__, rnicp); 68 PDBG("%s iwch_dev %p\n", __func__, rnicp);
69 idr_init(&rnicp->cqidr); 69 idr_init(&rnicp->cqidr);
70 idr_init(&rnicp->qpidr); 70 idr_init(&rnicp->qpidr);
71 idr_init(&rnicp->mmidr); 71 idr_init(&rnicp->mmidr);
@@ -106,7 +106,7 @@ static void open_rnic_dev(struct t3cdev *tdev)
106 struct iwch_dev *rnicp; 106 struct iwch_dev *rnicp;
107 static int vers_printed; 107 static int vers_printed;
108 108
109 PDBG("%s t3cdev %p\n", __FUNCTION__, tdev); 109 PDBG("%s t3cdev %p\n", __func__, tdev);
110 if (!vers_printed++) 110 if (!vers_printed++)
111 printk(KERN_INFO MOD "Chelsio T3 RDMA Driver - version %s\n", 111 printk(KERN_INFO MOD "Chelsio T3 RDMA Driver - version %s\n",
112 DRV_VERSION); 112 DRV_VERSION);
@@ -144,7 +144,7 @@ static void open_rnic_dev(struct t3cdev *tdev)
144static void close_rnic_dev(struct t3cdev *tdev) 144static void close_rnic_dev(struct t3cdev *tdev)
145{ 145{
146 struct iwch_dev *dev, *tmp; 146 struct iwch_dev *dev, *tmp;
147 PDBG("%s t3cdev %p\n", __FUNCTION__, tdev); 147 PDBG("%s t3cdev %p\n", __func__, tdev);
148 mutex_lock(&dev_mutex); 148 mutex_lock(&dev_mutex);
149 list_for_each_entry_safe(dev, tmp, &dev_list, entry) { 149 list_for_each_entry_safe(dev, tmp, &dev_list, entry) {
150 if (dev->rdev.t3cdev_p == tdev) { 150 if (dev->rdev.t3cdev_p == tdev) {
diff --git a/drivers/infiniband/hw/cxgb3/iwch.h b/drivers/infiniband/hw/cxgb3/iwch.h
index caf4e6007a44..9ad9b1e7c8c1 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.h
+++ b/drivers/infiniband/hw/cxgb3/iwch.h
@@ -147,7 +147,7 @@ static inline int insert_handle(struct iwch_dev *rhp, struct idr *idr,
147 void *handle, u32 id) 147 void *handle, u32 id)
148{ 148{
149 int ret; 149 int ret;
150 u32 newid; 150 int newid;
151 151
152 do { 152 do {
153 if (!idr_pre_get(idr, GFP_KERNEL)) { 153 if (!idr_pre_get(idr, GFP_KERNEL)) {
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 99f2f2a46bf7..72ca360c3dbc 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -110,9 +110,9 @@ static void connect_reply_upcall(struct iwch_ep *ep, int status);
110 110
111static void start_ep_timer(struct iwch_ep *ep) 111static void start_ep_timer(struct iwch_ep *ep)
112{ 112{
113 PDBG("%s ep %p\n", __FUNCTION__, ep); 113 PDBG("%s ep %p\n", __func__, ep);
114 if (timer_pending(&ep->timer)) { 114 if (timer_pending(&ep->timer)) {
115 PDBG("%s stopped / restarted timer ep %p\n", __FUNCTION__, ep); 115 PDBG("%s stopped / restarted timer ep %p\n", __func__, ep);
116 del_timer_sync(&ep->timer); 116 del_timer_sync(&ep->timer);
117 } else 117 } else
118 get_ep(&ep->com); 118 get_ep(&ep->com);
@@ -124,7 +124,7 @@ static void start_ep_timer(struct iwch_ep *ep)
124 124
125static void stop_ep_timer(struct iwch_ep *ep) 125static void stop_ep_timer(struct iwch_ep *ep)
126{ 126{
127 PDBG("%s ep %p\n", __FUNCTION__, ep); 127 PDBG("%s ep %p\n", __func__, ep);
128 del_timer_sync(&ep->timer); 128 del_timer_sync(&ep->timer);
129 put_ep(&ep->com); 129 put_ep(&ep->com);
130} 130}
@@ -190,7 +190,7 @@ int iwch_resume_tid(struct iwch_ep *ep)
190 190
191static void set_emss(struct iwch_ep *ep, u16 opt) 191static void set_emss(struct iwch_ep *ep, u16 opt)
192{ 192{
193 PDBG("%s ep %p opt %u\n", __FUNCTION__, ep, opt); 193 PDBG("%s ep %p opt %u\n", __func__, ep, opt);
194 ep->emss = T3C_DATA(ep->com.tdev)->mtus[G_TCPOPT_MSS(opt)] - 40; 194 ep->emss = T3C_DATA(ep->com.tdev)->mtus[G_TCPOPT_MSS(opt)] - 40;
195 if (G_TCPOPT_TSTAMP(opt)) 195 if (G_TCPOPT_TSTAMP(opt))
196 ep->emss -= 12; 196 ep->emss -= 12;
@@ -220,7 +220,7 @@ static void state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
220 unsigned long flags; 220 unsigned long flags;
221 221
222 spin_lock_irqsave(&epc->lock, flags); 222 spin_lock_irqsave(&epc->lock, flags);
223 PDBG("%s - %s -> %s\n", __FUNCTION__, states[epc->state], states[new]); 223 PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
224 __state_set(epc, new); 224 __state_set(epc, new);
225 spin_unlock_irqrestore(&epc->lock, flags); 225 spin_unlock_irqrestore(&epc->lock, flags);
226 return; 226 return;
@@ -236,7 +236,7 @@ static void *alloc_ep(int size, gfp_t gfp)
236 spin_lock_init(&epc->lock); 236 spin_lock_init(&epc->lock);
237 init_waitqueue_head(&epc->waitq); 237 init_waitqueue_head(&epc->waitq);
238 } 238 }
239 PDBG("%s alloc ep %p\n", __FUNCTION__, epc); 239 PDBG("%s alloc ep %p\n", __func__, epc);
240 return epc; 240 return epc;
241} 241}
242 242
@@ -244,13 +244,13 @@ void __free_ep(struct kref *kref)
244{ 244{
245 struct iwch_ep_common *epc; 245 struct iwch_ep_common *epc;
246 epc = container_of(kref, struct iwch_ep_common, kref); 246 epc = container_of(kref, struct iwch_ep_common, kref);
247 PDBG("%s ep %p state %s\n", __FUNCTION__, epc, states[state_read(epc)]); 247 PDBG("%s ep %p state %s\n", __func__, epc, states[state_read(epc)]);
248 kfree(epc); 248 kfree(epc);
249} 249}
250 250
251static void release_ep_resources(struct iwch_ep *ep) 251static void release_ep_resources(struct iwch_ep *ep)
252{ 252{
253 PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid); 253 PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
254 cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid); 254 cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
255 dst_release(ep->dst); 255 dst_release(ep->dst);
256 l2t_release(L2DATA(ep->com.tdev), ep->l2t); 256 l2t_release(L2DATA(ep->com.tdev), ep->l2t);
@@ -349,7 +349,7 @@ static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu)
349 349
350static void arp_failure_discard(struct t3cdev *dev, struct sk_buff *skb) 350static void arp_failure_discard(struct t3cdev *dev, struct sk_buff *skb)
351{ 351{
352 PDBG("%s t3cdev %p\n", __FUNCTION__, dev); 352 PDBG("%s t3cdev %p\n", __func__, dev);
353 kfree_skb(skb); 353 kfree_skb(skb);
354} 354}
355 355
@@ -370,7 +370,7 @@ static void abort_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
370{ 370{
371 struct cpl_abort_req *req = cplhdr(skb); 371 struct cpl_abort_req *req = cplhdr(skb);
372 372
373 PDBG("%s t3cdev %p\n", __FUNCTION__, dev); 373 PDBG("%s t3cdev %p\n", __func__, dev);
374 req->cmd = CPL_ABORT_NO_RST; 374 req->cmd = CPL_ABORT_NO_RST;
375 cxgb3_ofld_send(dev, skb); 375 cxgb3_ofld_send(dev, skb);
376} 376}
@@ -380,10 +380,10 @@ static int send_halfclose(struct iwch_ep *ep, gfp_t gfp)
380 struct cpl_close_con_req *req; 380 struct cpl_close_con_req *req;
381 struct sk_buff *skb; 381 struct sk_buff *skb;
382 382
383 PDBG("%s ep %p\n", __FUNCTION__, ep); 383 PDBG("%s ep %p\n", __func__, ep);
384 skb = get_skb(NULL, sizeof(*req), gfp); 384 skb = get_skb(NULL, sizeof(*req), gfp);
385 if (!skb) { 385 if (!skb) {
386 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __FUNCTION__); 386 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
387 return -ENOMEM; 387 return -ENOMEM;
388 } 388 }
389 skb->priority = CPL_PRIORITY_DATA; 389 skb->priority = CPL_PRIORITY_DATA;
@@ -400,11 +400,11 @@ static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
400{ 400{
401 struct cpl_abort_req *req; 401 struct cpl_abort_req *req;
402 402
403 PDBG("%s ep %p\n", __FUNCTION__, ep); 403 PDBG("%s ep %p\n", __func__, ep);
404 skb = get_skb(skb, sizeof(*req), gfp); 404 skb = get_skb(skb, sizeof(*req), gfp);
405 if (!skb) { 405 if (!skb) {
406 printk(KERN_ERR MOD "%s - failed to alloc skb.\n", 406 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
407 __FUNCTION__); 407 __func__);
408 return -ENOMEM; 408 return -ENOMEM;
409 } 409 }
410 skb->priority = CPL_PRIORITY_DATA; 410 skb->priority = CPL_PRIORITY_DATA;
@@ -426,12 +426,12 @@ static int send_connect(struct iwch_ep *ep)
426 unsigned int mtu_idx; 426 unsigned int mtu_idx;
427 int wscale; 427 int wscale;
428 428
429 PDBG("%s ep %p\n", __FUNCTION__, ep); 429 PDBG("%s ep %p\n", __func__, ep);
430 430
431 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 431 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
432 if (!skb) { 432 if (!skb) {
433 printk(KERN_ERR MOD "%s - failed to alloc skb.\n", 433 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
434 __FUNCTION__); 434 __func__);
435 return -ENOMEM; 435 return -ENOMEM;
436 } 436 }
437 mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst)); 437 mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
@@ -470,7 +470,7 @@ static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
470 struct mpa_message *mpa; 470 struct mpa_message *mpa;
471 int len; 471 int len;
472 472
473 PDBG("%s ep %p pd_len %d\n", __FUNCTION__, ep, ep->plen); 473 PDBG("%s ep %p pd_len %d\n", __func__, ep, ep->plen);
474 474
475 BUG_ON(skb_cloned(skb)); 475 BUG_ON(skb_cloned(skb));
476 476
@@ -530,13 +530,13 @@ static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen)
530 struct mpa_message *mpa; 530 struct mpa_message *mpa;
531 struct sk_buff *skb; 531 struct sk_buff *skb;
532 532
533 PDBG("%s ep %p plen %d\n", __FUNCTION__, ep, plen); 533 PDBG("%s ep %p plen %d\n", __func__, ep, plen);
534 534
535 mpalen = sizeof(*mpa) + plen; 535 mpalen = sizeof(*mpa) + plen;
536 536
537 skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL); 537 skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
538 if (!skb) { 538 if (!skb) {
539 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __FUNCTION__); 539 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
540 return -ENOMEM; 540 return -ENOMEM;
541 } 541 }
542 skb_reserve(skb, sizeof(*req)); 542 skb_reserve(skb, sizeof(*req));
@@ -580,13 +580,13 @@ static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen)
580 int len; 580 int len;
581 struct sk_buff *skb; 581 struct sk_buff *skb;
582 582
583 PDBG("%s ep %p plen %d\n", __FUNCTION__, ep, plen); 583 PDBG("%s ep %p plen %d\n", __func__, ep, plen);
584 584
585 mpalen = sizeof(*mpa) + plen; 585 mpalen = sizeof(*mpa) + plen;
586 586
587 skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL); 587 skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
588 if (!skb) { 588 if (!skb) {
589 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __FUNCTION__); 589 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
590 return -ENOMEM; 590 return -ENOMEM;
591 } 591 }
592 skb->priority = CPL_PRIORITY_DATA; 592 skb->priority = CPL_PRIORITY_DATA;
@@ -630,7 +630,7 @@ static int act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
630 struct cpl_act_establish *req = cplhdr(skb); 630 struct cpl_act_establish *req = cplhdr(skb);
631 unsigned int tid = GET_TID(req); 631 unsigned int tid = GET_TID(req);
632 632
633 PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, tid); 633 PDBG("%s ep %p tid %d\n", __func__, ep, tid);
634 634
635 dst_confirm(ep->dst); 635 dst_confirm(ep->dst);
636 636
@@ -663,7 +663,7 @@ static void close_complete_upcall(struct iwch_ep *ep)
663{ 663{
664 struct iw_cm_event event; 664 struct iw_cm_event event;
665 665
666 PDBG("%s ep %p\n", __FUNCTION__, ep); 666 PDBG("%s ep %p\n", __func__, ep);
667 memset(&event, 0, sizeof(event)); 667 memset(&event, 0, sizeof(event));
668 event.event = IW_CM_EVENT_CLOSE; 668 event.event = IW_CM_EVENT_CLOSE;
669 if (ep->com.cm_id) { 669 if (ep->com.cm_id) {
@@ -680,7 +680,7 @@ static void peer_close_upcall(struct iwch_ep *ep)
680{ 680{
681 struct iw_cm_event event; 681 struct iw_cm_event event;
682 682
683 PDBG("%s ep %p\n", __FUNCTION__, ep); 683 PDBG("%s ep %p\n", __func__, ep);
684 memset(&event, 0, sizeof(event)); 684 memset(&event, 0, sizeof(event));
685 event.event = IW_CM_EVENT_DISCONNECT; 685 event.event = IW_CM_EVENT_DISCONNECT;
686 if (ep->com.cm_id) { 686 if (ep->com.cm_id) {
@@ -694,7 +694,7 @@ static void peer_abort_upcall(struct iwch_ep *ep)
694{ 694{
695 struct iw_cm_event event; 695 struct iw_cm_event event;
696 696
697 PDBG("%s ep %p\n", __FUNCTION__, ep); 697 PDBG("%s ep %p\n", __func__, ep);
698 memset(&event, 0, sizeof(event)); 698 memset(&event, 0, sizeof(event));
699 event.event = IW_CM_EVENT_CLOSE; 699 event.event = IW_CM_EVENT_CLOSE;
700 event.status = -ECONNRESET; 700 event.status = -ECONNRESET;
@@ -712,7 +712,7 @@ static void connect_reply_upcall(struct iwch_ep *ep, int status)
712{ 712{
713 struct iw_cm_event event; 713 struct iw_cm_event event;
714 714
715 PDBG("%s ep %p status %d\n", __FUNCTION__, ep, status); 715 PDBG("%s ep %p status %d\n", __func__, ep, status);
716 memset(&event, 0, sizeof(event)); 716 memset(&event, 0, sizeof(event));
717 event.event = IW_CM_EVENT_CONNECT_REPLY; 717 event.event = IW_CM_EVENT_CONNECT_REPLY;
718 event.status = status; 718 event.status = status;
@@ -724,7 +724,7 @@ static void connect_reply_upcall(struct iwch_ep *ep, int status)
724 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); 724 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
725 } 725 }
726 if (ep->com.cm_id) { 726 if (ep->com.cm_id) {
727 PDBG("%s ep %p tid %d status %d\n", __FUNCTION__, ep, 727 PDBG("%s ep %p tid %d status %d\n", __func__, ep,
728 ep->hwtid, status); 728 ep->hwtid, status);
729 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 729 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
730 } 730 }
@@ -739,7 +739,7 @@ static void connect_request_upcall(struct iwch_ep *ep)
739{ 739{
740 struct iw_cm_event event; 740 struct iw_cm_event event;
741 741
742 PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid); 742 PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
743 memset(&event, 0, sizeof(event)); 743 memset(&event, 0, sizeof(event));
744 event.event = IW_CM_EVENT_CONNECT_REQUEST; 744 event.event = IW_CM_EVENT_CONNECT_REQUEST;
745 event.local_addr = ep->com.local_addr; 745 event.local_addr = ep->com.local_addr;
@@ -759,11 +759,11 @@ static void established_upcall(struct iwch_ep *ep)
759{ 759{
760 struct iw_cm_event event; 760 struct iw_cm_event event;
761 761
762 PDBG("%s ep %p\n", __FUNCTION__, ep); 762 PDBG("%s ep %p\n", __func__, ep);
763 memset(&event, 0, sizeof(event)); 763 memset(&event, 0, sizeof(event));
764 event.event = IW_CM_EVENT_ESTABLISHED; 764 event.event = IW_CM_EVENT_ESTABLISHED;
765 if (ep->com.cm_id) { 765 if (ep->com.cm_id) {
766 PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid); 766 PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
767 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 767 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
768 } 768 }
769} 769}
@@ -773,7 +773,7 @@ static int update_rx_credits(struct iwch_ep *ep, u32 credits)
773 struct cpl_rx_data_ack *req; 773 struct cpl_rx_data_ack *req;
774 struct sk_buff *skb; 774 struct sk_buff *skb;
775 775
776 PDBG("%s ep %p credits %u\n", __FUNCTION__, ep, credits); 776 PDBG("%s ep %p credits %u\n", __func__, ep, credits);
777 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 777 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
778 if (!skb) { 778 if (!skb) {
779 printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n"); 779 printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
@@ -797,7 +797,7 @@ static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
797 enum iwch_qp_attr_mask mask; 797 enum iwch_qp_attr_mask mask;
798 int err; 798 int err;
799 799
800 PDBG("%s ep %p\n", __FUNCTION__, ep); 800 PDBG("%s ep %p\n", __func__, ep);
801 801
802 /* 802 /*
803 * Stop mpa timer. If it expired, then the state has 803 * Stop mpa timer. If it expired, then the state has
@@ -884,7 +884,7 @@ static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
884 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 884 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
885 ep->mpa_attr.version = mpa_rev; 885 ep->mpa_attr.version = mpa_rev;
886 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " 886 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
887 "xmit_marker_enabled=%d, version=%d\n", __FUNCTION__, 887 "xmit_marker_enabled=%d, version=%d\n", __func__,
888 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, 888 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
889 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version); 889 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
890 890
@@ -915,7 +915,7 @@ static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
915 struct mpa_message *mpa; 915 struct mpa_message *mpa;
916 u16 plen; 916 u16 plen;
917 917
918 PDBG("%s ep %p\n", __FUNCTION__, ep); 918 PDBG("%s ep %p\n", __func__, ep);
919 919
920 /* 920 /*
921 * Stop mpa timer. If it expired, then the state has 921 * Stop mpa timer. If it expired, then the state has
@@ -935,7 +935,7 @@ static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
935 return; 935 return;
936 } 936 }
937 937
938 PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__); 938 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
939 939
940 /* 940 /*
941 * Copy the new data into our accumulation buffer. 941 * Copy the new data into our accumulation buffer.
@@ -950,7 +950,7 @@ static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
950 */ 950 */
951 if (ep->mpa_pkt_len < sizeof(*mpa)) 951 if (ep->mpa_pkt_len < sizeof(*mpa))
952 return; 952 return;
953 PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__); 953 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
954 mpa = (struct mpa_message *) ep->mpa_pkt; 954 mpa = (struct mpa_message *) ep->mpa_pkt;
955 955
956 /* 956 /*
@@ -1000,7 +1000,7 @@ static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
1000 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 1000 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1001 ep->mpa_attr.version = mpa_rev; 1001 ep->mpa_attr.version = mpa_rev;
1002 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " 1002 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1003 "xmit_marker_enabled=%d, version=%d\n", __FUNCTION__, 1003 "xmit_marker_enabled=%d, version=%d\n", __func__,
1004 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, 1004 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1005 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version); 1005 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
1006 1006
@@ -1017,7 +1017,7 @@ static int rx_data(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1017 struct cpl_rx_data *hdr = cplhdr(skb); 1017 struct cpl_rx_data *hdr = cplhdr(skb);
1018 unsigned int dlen = ntohs(hdr->len); 1018 unsigned int dlen = ntohs(hdr->len);
1019 1019
1020 PDBG("%s ep %p dlen %u\n", __FUNCTION__, ep, dlen); 1020 PDBG("%s ep %p dlen %u\n", __func__, ep, dlen);
1021 1021
1022 skb_pull(skb, sizeof(*hdr)); 1022 skb_pull(skb, sizeof(*hdr));
1023 skb_trim(skb, dlen); 1023 skb_trim(skb, dlen);
@@ -1037,7 +1037,7 @@ static int rx_data(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1037 default: 1037 default:
1038 printk(KERN_ERR MOD "%s Unexpected streaming data." 1038 printk(KERN_ERR MOD "%s Unexpected streaming data."
1039 " ep %p state %d tid %d\n", 1039 " ep %p state %d tid %d\n",
1040 __FUNCTION__, ep, state_read(&ep->com), ep->hwtid); 1040 __func__, ep, state_read(&ep->com), ep->hwtid);
1041 1041
1042 /* 1042 /*
1043 * The ep will timeout and inform the ULP of the failure. 1043 * The ep will timeout and inform the ULP of the failure.
@@ -1063,7 +1063,7 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1063 struct cpl_wr_ack *hdr = cplhdr(skb); 1063 struct cpl_wr_ack *hdr = cplhdr(skb);
1064 unsigned int credits = ntohs(hdr->credits); 1064 unsigned int credits = ntohs(hdr->credits);
1065 1065
1066 PDBG("%s ep %p credits %u\n", __FUNCTION__, ep, credits); 1066 PDBG("%s ep %p credits %u\n", __func__, ep, credits);
1067 1067
1068 if (credits == 0) 1068 if (credits == 0)
1069 return CPL_RET_BUF_DONE; 1069 return CPL_RET_BUF_DONE;
@@ -1084,7 +1084,7 @@ static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1084{ 1084{
1085 struct iwch_ep *ep = ctx; 1085 struct iwch_ep *ep = ctx;
1086 1086
1087 PDBG("%s ep %p\n", __FUNCTION__, ep); 1087 PDBG("%s ep %p\n", __func__, ep);
1088 1088
1089 /* 1089 /*
1090 * We get 2 abort replies from the HW. The first one must 1090 * We get 2 abort replies from the HW. The first one must
@@ -1115,7 +1115,7 @@ static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1115 struct iwch_ep *ep = ctx; 1115 struct iwch_ep *ep = ctx;
1116 struct cpl_act_open_rpl *rpl = cplhdr(skb); 1116 struct cpl_act_open_rpl *rpl = cplhdr(skb);
1117 1117
1118 PDBG("%s ep %p status %u errno %d\n", __FUNCTION__, ep, rpl->status, 1118 PDBG("%s ep %p status %u errno %d\n", __func__, ep, rpl->status,
1119 status2errno(rpl->status)); 1119 status2errno(rpl->status));
1120 connect_reply_upcall(ep, status2errno(rpl->status)); 1120 connect_reply_upcall(ep, status2errno(rpl->status));
1121 state_set(&ep->com, DEAD); 1121 state_set(&ep->com, DEAD);
@@ -1133,7 +1133,7 @@ static int listen_start(struct iwch_listen_ep *ep)
1133 struct sk_buff *skb; 1133 struct sk_buff *skb;
1134 struct cpl_pass_open_req *req; 1134 struct cpl_pass_open_req *req;
1135 1135
1136 PDBG("%s ep %p\n", __FUNCTION__, ep); 1136 PDBG("%s ep %p\n", __func__, ep);
1137 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 1137 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1138 if (!skb) { 1138 if (!skb) {
1139 printk(KERN_ERR MOD "t3c_listen_start failed to alloc skb!\n"); 1139 printk(KERN_ERR MOD "t3c_listen_start failed to alloc skb!\n");
@@ -1162,7 +1162,7 @@ static int pass_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1162 struct iwch_listen_ep *ep = ctx; 1162 struct iwch_listen_ep *ep = ctx;
1163 struct cpl_pass_open_rpl *rpl = cplhdr(skb); 1163 struct cpl_pass_open_rpl *rpl = cplhdr(skb);
1164 1164
1165 PDBG("%s ep %p status %d error %d\n", __FUNCTION__, ep, 1165 PDBG("%s ep %p status %d error %d\n", __func__, ep,
1166 rpl->status, status2errno(rpl->status)); 1166 rpl->status, status2errno(rpl->status));
1167 ep->com.rpl_err = status2errno(rpl->status); 1167 ep->com.rpl_err = status2errno(rpl->status);
1168 ep->com.rpl_done = 1; 1168 ep->com.rpl_done = 1;
@@ -1176,10 +1176,10 @@ static int listen_stop(struct iwch_listen_ep *ep)
1176 struct sk_buff *skb; 1176 struct sk_buff *skb;
1177 struct cpl_close_listserv_req *req; 1177 struct cpl_close_listserv_req *req;
1178 1178
1179 PDBG("%s ep %p\n", __FUNCTION__, ep); 1179 PDBG("%s ep %p\n", __func__, ep);
1180 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 1180 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1181 if (!skb) { 1181 if (!skb) {
1182 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __FUNCTION__); 1182 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
1183 return -ENOMEM; 1183 return -ENOMEM;
1184 } 1184 }
1185 req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req)); 1185 req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req));
@@ -1197,7 +1197,7 @@ static int close_listsrv_rpl(struct t3cdev *tdev, struct sk_buff *skb,
1197 struct iwch_listen_ep *ep = ctx; 1197 struct iwch_listen_ep *ep = ctx;
1198 struct cpl_close_listserv_rpl *rpl = cplhdr(skb); 1198 struct cpl_close_listserv_rpl *rpl = cplhdr(skb);
1199 1199
1200 PDBG("%s ep %p\n", __FUNCTION__, ep); 1200 PDBG("%s ep %p\n", __func__, ep);
1201 ep->com.rpl_err = status2errno(rpl->status); 1201 ep->com.rpl_err = status2errno(rpl->status);
1202 ep->com.rpl_done = 1; 1202 ep->com.rpl_done = 1;
1203 wake_up(&ep->com.waitq); 1203 wake_up(&ep->com.waitq);
@@ -1211,7 +1211,7 @@ static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
1211 u32 opt0h, opt0l, opt2; 1211 u32 opt0h, opt0l, opt2;
1212 int wscale; 1212 int wscale;
1213 1213
1214 PDBG("%s ep %p\n", __FUNCTION__, ep); 1214 PDBG("%s ep %p\n", __func__, ep);
1215 BUG_ON(skb_cloned(skb)); 1215 BUG_ON(skb_cloned(skb));
1216 skb_trim(skb, sizeof(*rpl)); 1216 skb_trim(skb, sizeof(*rpl));
1217 skb_get(skb); 1217 skb_get(skb);
@@ -1244,7 +1244,7 @@ static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
1244static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip, 1244static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip,
1245 struct sk_buff *skb) 1245 struct sk_buff *skb)
1246{ 1246{
1247 PDBG("%s t3cdev %p tid %u peer_ip %x\n", __FUNCTION__, tdev, hwtid, 1247 PDBG("%s t3cdev %p tid %u peer_ip %x\n", __func__, tdev, hwtid,
1248 peer_ip); 1248 peer_ip);
1249 BUG_ON(skb_cloned(skb)); 1249 BUG_ON(skb_cloned(skb));
1250 skb_trim(skb, sizeof(struct cpl_tid_release)); 1250 skb_trim(skb, sizeof(struct cpl_tid_release));
@@ -1279,11 +1279,11 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1279 struct rtable *rt; 1279 struct rtable *rt;
1280 struct iff_mac tim; 1280 struct iff_mac tim;
1281 1281
1282 PDBG("%s parent ep %p tid %u\n", __FUNCTION__, parent_ep, hwtid); 1282 PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
1283 1283
1284 if (state_read(&parent_ep->com) != LISTEN) { 1284 if (state_read(&parent_ep->com) != LISTEN) {
1285 printk(KERN_ERR "%s - listening ep not in LISTEN\n", 1285 printk(KERN_ERR "%s - listening ep not in LISTEN\n",
1286 __FUNCTION__); 1286 __func__);
1287 goto reject; 1287 goto reject;
1288 } 1288 }
1289 1289
@@ -1295,7 +1295,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1295 if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) { 1295 if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) {
1296 printk(KERN_ERR 1296 printk(KERN_ERR
1297 "%s bad dst mac %02x %02x %02x %02x %02x %02x\n", 1297 "%s bad dst mac %02x %02x %02x %02x %02x %02x\n",
1298 __FUNCTION__, 1298 __func__,
1299 req->dst_mac[0], 1299 req->dst_mac[0],
1300 req->dst_mac[1], 1300 req->dst_mac[1],
1301 req->dst_mac[2], 1301 req->dst_mac[2],
@@ -1313,21 +1313,21 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1313 req->peer_port, G_PASS_OPEN_TOS(ntohl(req->tos_tid))); 1313 req->peer_port, G_PASS_OPEN_TOS(ntohl(req->tos_tid)));
1314 if (!rt) { 1314 if (!rt) {
1315 printk(KERN_ERR MOD "%s - failed to find dst entry!\n", 1315 printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
1316 __FUNCTION__); 1316 __func__);
1317 goto reject; 1317 goto reject;
1318 } 1318 }
1319 dst = &rt->u.dst; 1319 dst = &rt->u.dst;
1320 l2t = t3_l2t_get(tdev, dst->neighbour, dst->neighbour->dev); 1320 l2t = t3_l2t_get(tdev, dst->neighbour, dst->neighbour->dev);
1321 if (!l2t) { 1321 if (!l2t) {
1322 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", 1322 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
1323 __FUNCTION__); 1323 __func__);
1324 dst_release(dst); 1324 dst_release(dst);
1325 goto reject; 1325 goto reject;
1326 } 1326 }
1327 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL); 1327 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
1328 if (!child_ep) { 1328 if (!child_ep) {
1329 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", 1329 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
1330 __FUNCTION__); 1330 __func__);
1331 l2t_release(L2DATA(tdev), l2t); 1331 l2t_release(L2DATA(tdev), l2t);
1332 dst_release(dst); 1332 dst_release(dst);
1333 goto reject; 1333 goto reject;
@@ -1362,7 +1362,7 @@ static int pass_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1362 struct iwch_ep *ep = ctx; 1362 struct iwch_ep *ep = ctx;
1363 struct cpl_pass_establish *req = cplhdr(skb); 1363 struct cpl_pass_establish *req = cplhdr(skb);
1364 1364
1365 PDBG("%s ep %p\n", __FUNCTION__, ep); 1365 PDBG("%s ep %p\n", __func__, ep);
1366 ep->snd_seq = ntohl(req->snd_isn); 1366 ep->snd_seq = ntohl(req->snd_isn);
1367 ep->rcv_seq = ntohl(req->rcv_isn); 1367 ep->rcv_seq = ntohl(req->rcv_isn);
1368 1368
@@ -1383,7 +1383,7 @@ static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1383 int disconnect = 1; 1383 int disconnect = 1;
1384 int release = 0; 1384 int release = 0;
1385 1385
1386 PDBG("%s ep %p\n", __FUNCTION__, ep); 1386 PDBG("%s ep %p\n", __func__, ep);
1387 dst_confirm(ep->dst); 1387 dst_confirm(ep->dst);
1388 1388
1389 spin_lock_irqsave(&ep->com.lock, flags); 1389 spin_lock_irqsave(&ep->com.lock, flags);
@@ -1473,7 +1473,7 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1473 int state; 1473 int state;
1474 1474
1475 if (is_neg_adv_abort(req->status)) { 1475 if (is_neg_adv_abort(req->status)) {
1476 PDBG("%s neg_adv_abort ep %p tid %d\n", __FUNCTION__, ep, 1476 PDBG("%s neg_adv_abort ep %p tid %d\n", __func__, ep,
1477 ep->hwtid); 1477 ep->hwtid);
1478 t3_l2t_send_event(ep->com.tdev, ep->l2t); 1478 t3_l2t_send_event(ep->com.tdev, ep->l2t);
1479 return CPL_RET_BUF_DONE; 1479 return CPL_RET_BUF_DONE;
@@ -1489,7 +1489,7 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1489 } 1489 }
1490 1490
1491 state = state_read(&ep->com); 1491 state = state_read(&ep->com);
1492 PDBG("%s ep %p state %u\n", __FUNCTION__, ep, state); 1492 PDBG("%s ep %p state %u\n", __func__, ep, state);
1493 switch (state) { 1493 switch (state) {
1494 case CONNECTING: 1494 case CONNECTING:
1495 break; 1495 break;
@@ -1528,14 +1528,14 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1528 if (ret) 1528 if (ret)
1529 printk(KERN_ERR MOD 1529 printk(KERN_ERR MOD
1530 "%s - qp <- error failed!\n", 1530 "%s - qp <- error failed!\n",
1531 __FUNCTION__); 1531 __func__);
1532 } 1532 }
1533 peer_abort_upcall(ep); 1533 peer_abort_upcall(ep);
1534 break; 1534 break;
1535 case ABORTING: 1535 case ABORTING:
1536 break; 1536 break;
1537 case DEAD: 1537 case DEAD:
1538 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __FUNCTION__); 1538 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
1539 return CPL_RET_BUF_DONE; 1539 return CPL_RET_BUF_DONE;
1540 default: 1540 default:
1541 BUG_ON(1); 1541 BUG_ON(1);
@@ -1546,7 +1546,7 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1546 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL); 1546 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
1547 if (!rpl_skb) { 1547 if (!rpl_skb) {
1548 printk(KERN_ERR MOD "%s - cannot allocate skb!\n", 1548 printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
1549 __FUNCTION__); 1549 __func__);
1550 dst_release(ep->dst); 1550 dst_release(ep->dst);
1551 l2t_release(L2DATA(ep->com.tdev), ep->l2t); 1551 l2t_release(L2DATA(ep->com.tdev), ep->l2t);
1552 put_ep(&ep->com); 1552 put_ep(&ep->com);
@@ -1573,7 +1573,7 @@ static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1573 unsigned long flags; 1573 unsigned long flags;
1574 int release = 0; 1574 int release = 0;
1575 1575
1576 PDBG("%s ep %p\n", __FUNCTION__, ep); 1576 PDBG("%s ep %p\n", __func__, ep);
1577 BUG_ON(!ep); 1577 BUG_ON(!ep);
1578 1578
1579 /* The cm_id may be null if we failed to connect */ 1579 /* The cm_id may be null if we failed to connect */
@@ -1624,9 +1624,9 @@ static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1624{ 1624{
1625 struct iwch_ep *ep = ctx; 1625 struct iwch_ep *ep = ctx;
1626 1626
1627 PDBG("%s ep %p\n", __FUNCTION__, ep); 1627 PDBG("%s ep %p\n", __func__, ep);
1628 skb_pull(skb, sizeof(struct cpl_rdma_terminate)); 1628 skb_pull(skb, sizeof(struct cpl_rdma_terminate));
1629 PDBG("%s saving %d bytes of term msg\n", __FUNCTION__, skb->len); 1629 PDBG("%s saving %d bytes of term msg\n", __func__, skb->len);
1630 skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer, 1630 skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
1631 skb->len); 1631 skb->len);
1632 ep->com.qp->attr.terminate_msg_len = skb->len; 1632 ep->com.qp->attr.terminate_msg_len = skb->len;
@@ -1639,13 +1639,13 @@ static int ec_status(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1639 struct cpl_rdma_ec_status *rep = cplhdr(skb); 1639 struct cpl_rdma_ec_status *rep = cplhdr(skb);
1640 struct iwch_ep *ep = ctx; 1640 struct iwch_ep *ep = ctx;
1641 1641
1642 PDBG("%s ep %p tid %u status %d\n", __FUNCTION__, ep, ep->hwtid, 1642 PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid,
1643 rep->status); 1643 rep->status);
1644 if (rep->status) { 1644 if (rep->status) {
1645 struct iwch_qp_attributes attrs; 1645 struct iwch_qp_attributes attrs;
1646 1646
1647 printk(KERN_ERR MOD "%s BAD CLOSE - Aborting tid %u\n", 1647 printk(KERN_ERR MOD "%s BAD CLOSE - Aborting tid %u\n",
1648 __FUNCTION__, ep->hwtid); 1648 __func__, ep->hwtid);
1649 stop_ep_timer(ep); 1649 stop_ep_timer(ep);
1650 attrs.next_state = IWCH_QP_STATE_ERROR; 1650 attrs.next_state = IWCH_QP_STATE_ERROR;
1651 iwch_modify_qp(ep->com.qp->rhp, 1651 iwch_modify_qp(ep->com.qp->rhp,
@@ -1663,7 +1663,7 @@ static void ep_timeout(unsigned long arg)
1663 unsigned long flags; 1663 unsigned long flags;
1664 1664
1665 spin_lock_irqsave(&ep->com.lock, flags); 1665 spin_lock_irqsave(&ep->com.lock, flags);
1666 PDBG("%s ep %p tid %u state %d\n", __FUNCTION__, ep, ep->hwtid, 1666 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
1667 ep->com.state); 1667 ep->com.state);
1668 switch (ep->com.state) { 1668 switch (ep->com.state) {
1669 case MPA_REQ_SENT: 1669 case MPA_REQ_SENT:
@@ -1693,7 +1693,7 @@ int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
1693{ 1693{
1694 int err; 1694 int err;
1695 struct iwch_ep *ep = to_ep(cm_id); 1695 struct iwch_ep *ep = to_ep(cm_id);
1696 PDBG("%s ep %p tid %u\n", __FUNCTION__, ep, ep->hwtid); 1696 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1697 1697
1698 if (state_read(&ep->com) == DEAD) { 1698 if (state_read(&ep->com) == DEAD) {
1699 put_ep(&ep->com); 1699 put_ep(&ep->com);
@@ -1718,7 +1718,7 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1718 struct iwch_dev *h = to_iwch_dev(cm_id->device); 1718 struct iwch_dev *h = to_iwch_dev(cm_id->device);
1719 struct iwch_qp *qp = get_qhp(h, conn_param->qpn); 1719 struct iwch_qp *qp = get_qhp(h, conn_param->qpn);
1720 1720
1721 PDBG("%s ep %p tid %u\n", __FUNCTION__, ep, ep->hwtid); 1721 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1722 if (state_read(&ep->com) == DEAD) 1722 if (state_read(&ep->com) == DEAD)
1723 return -ECONNRESET; 1723 return -ECONNRESET;
1724 1724
@@ -1739,7 +1739,7 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1739 ep->com.rpl_err = 0; 1739 ep->com.rpl_err = 0;
1740 ep->ird = conn_param->ird; 1740 ep->ird = conn_param->ird;
1741 ep->ord = conn_param->ord; 1741 ep->ord = conn_param->ord;
1742 PDBG("%s %d ird %d ord %d\n", __FUNCTION__, __LINE__, ep->ird, ep->ord); 1742 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
1743 1743
1744 get_ep(&ep->com); 1744 get_ep(&ep->com);
1745 1745
@@ -1810,7 +1810,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1810 1810
1811 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 1811 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
1812 if (!ep) { 1812 if (!ep) {
1813 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __FUNCTION__); 1813 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
1814 err = -ENOMEM; 1814 err = -ENOMEM;
1815 goto out; 1815 goto out;
1816 } 1816 }
@@ -1827,7 +1827,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1827 ep->com.cm_id = cm_id; 1827 ep->com.cm_id = cm_id;
1828 ep->com.qp = get_qhp(h, conn_param->qpn); 1828 ep->com.qp = get_qhp(h, conn_param->qpn);
1829 BUG_ON(!ep->com.qp); 1829 BUG_ON(!ep->com.qp);
1830 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __FUNCTION__, conn_param->qpn, 1830 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
1831 ep->com.qp, cm_id); 1831 ep->com.qp, cm_id);
1832 1832
1833 /* 1833 /*
@@ -1835,7 +1835,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1835 */ 1835 */
1836 ep->atid = cxgb3_alloc_atid(h->rdev.t3cdev_p, &t3c_client, ep); 1836 ep->atid = cxgb3_alloc_atid(h->rdev.t3cdev_p, &t3c_client, ep);
1837 if (ep->atid == -1) { 1837 if (ep->atid == -1) {
1838 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __FUNCTION__); 1838 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
1839 err = -ENOMEM; 1839 err = -ENOMEM;
1840 goto fail2; 1840 goto fail2;
1841 } 1841 }
@@ -1847,7 +1847,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1847 cm_id->local_addr.sin_port, 1847 cm_id->local_addr.sin_port,
1848 cm_id->remote_addr.sin_port, IPTOS_LOWDELAY); 1848 cm_id->remote_addr.sin_port, IPTOS_LOWDELAY);
1849 if (!rt) { 1849 if (!rt) {
1850 printk(KERN_ERR MOD "%s - cannot find route.\n", __FUNCTION__); 1850 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
1851 err = -EHOSTUNREACH; 1851 err = -EHOSTUNREACH;
1852 goto fail3; 1852 goto fail3;
1853 } 1853 }
@@ -1857,7 +1857,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1857 ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst->neighbour, 1857 ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst->neighbour,
1858 ep->dst->neighbour->dev); 1858 ep->dst->neighbour->dev);
1859 if (!ep->l2t) { 1859 if (!ep->l2t) {
1860 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __FUNCTION__); 1860 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
1861 err = -ENOMEM; 1861 err = -ENOMEM;
1862 goto fail4; 1862 goto fail4;
1863 } 1863 }
@@ -1894,11 +1894,11 @@ int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
1894 1894
1895 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 1895 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
1896 if (!ep) { 1896 if (!ep) {
1897 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __FUNCTION__); 1897 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
1898 err = -ENOMEM; 1898 err = -ENOMEM;
1899 goto fail1; 1899 goto fail1;
1900 } 1900 }
1901 PDBG("%s ep %p\n", __FUNCTION__, ep); 1901 PDBG("%s ep %p\n", __func__, ep);
1902 ep->com.tdev = h->rdev.t3cdev_p; 1902 ep->com.tdev = h->rdev.t3cdev_p;
1903 cm_id->add_ref(cm_id); 1903 cm_id->add_ref(cm_id);
1904 ep->com.cm_id = cm_id; 1904 ep->com.cm_id = cm_id;
@@ -1910,7 +1910,7 @@ int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
1910 */ 1910 */
1911 ep->stid = cxgb3_alloc_stid(h->rdev.t3cdev_p, &t3c_client, ep); 1911 ep->stid = cxgb3_alloc_stid(h->rdev.t3cdev_p, &t3c_client, ep);
1912 if (ep->stid == -1) { 1912 if (ep->stid == -1) {
1913 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __FUNCTION__); 1913 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
1914 err = -ENOMEM; 1914 err = -ENOMEM;
1915 goto fail2; 1915 goto fail2;
1916 } 1916 }
@@ -1942,7 +1942,7 @@ int iwch_destroy_listen(struct iw_cm_id *cm_id)
1942 int err; 1942 int err;
1943 struct iwch_listen_ep *ep = to_listen_ep(cm_id); 1943 struct iwch_listen_ep *ep = to_listen_ep(cm_id);
1944 1944
1945 PDBG("%s ep %p\n", __FUNCTION__, ep); 1945 PDBG("%s ep %p\n", __func__, ep);
1946 1946
1947 might_sleep(); 1947 might_sleep();
1948 state_set(&ep->com, DEAD); 1948 state_set(&ep->com, DEAD);
@@ -1965,11 +1965,11 @@ int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
1965 1965
1966 spin_lock_irqsave(&ep->com.lock, flags); 1966 spin_lock_irqsave(&ep->com.lock, flags);
1967 1967
1968 PDBG("%s ep %p state %s, abrupt %d\n", __FUNCTION__, ep, 1968 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
1969 states[ep->com.state], abrupt); 1969 states[ep->com.state], abrupt);
1970 1970
1971 if (ep->com.state == DEAD) { 1971 if (ep->com.state == DEAD) {
1972 PDBG("%s already dead ep %p\n", __FUNCTION__, ep); 1972 PDBG("%s already dead ep %p\n", __func__, ep);
1973 goto out; 1973 goto out;
1974 } 1974 }
1975 1975
@@ -2020,7 +2020,7 @@ int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
2020 if (ep->dst != old) 2020 if (ep->dst != old)
2021 return 0; 2021 return 0;
2022 2022
2023 PDBG("%s ep %p redirect to dst %p l2t %p\n", __FUNCTION__, ep, new, 2023 PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
2024 l2t); 2024 l2t);
2025 dst_hold(new); 2025 dst_hold(new);
2026 l2t_release(L2DATA(ep->com.tdev), ep->l2t); 2026 l2t_release(L2DATA(ep->com.tdev), ep->l2t);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.h b/drivers/infiniband/hw/cxgb3/iwch_cm.h
index 6107e7cd9b57..2bb7fbdb3ff4 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.h
@@ -54,13 +54,13 @@
54#define MPA_FLAGS_MASK 0xE0 54#define MPA_FLAGS_MASK 0xE0
55 55
56#define put_ep(ep) { \ 56#define put_ep(ep) { \
57 PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __FUNCTION__, __LINE__, \ 57 PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__, \
58 ep, atomic_read(&((ep)->kref.refcount))); \ 58 ep, atomic_read(&((ep)->kref.refcount))); \
59 kref_put(&((ep)->kref), __free_ep); \ 59 kref_put(&((ep)->kref), __free_ep); \
60} 60}
61 61
62#define get_ep(ep) { \ 62#define get_ep(ep) { \
63 PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __FUNCTION__, __LINE__, \ 63 PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __func__, __LINE__, \
64 ep, atomic_read(&((ep)->kref.refcount))); \ 64 ep, atomic_read(&((ep)->kref.refcount))); \
65 kref_get(&((ep)->kref)); \ 65 kref_get(&((ep)->kref)); \
66} 66}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cq.c b/drivers/infiniband/hw/cxgb3/iwch_cq.c
index d7624c170ee7..4ee8ccd0a9e5 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cq.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cq.c
@@ -67,7 +67,7 @@ static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
67 ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, 67 ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie,
68 &credit); 68 &credit);
69 if (t3a_device(chp->rhp) && credit) { 69 if (t3a_device(chp->rhp) && credit) {
70 PDBG("%s updating %d cq credits on id %d\n", __FUNCTION__, 70 PDBG("%s updating %d cq credits on id %d\n", __func__,
71 credit, chp->cq.cqid); 71 credit, chp->cq.cqid);
72 cxio_hal_cq_op(&rhp->rdev, &chp->cq, CQ_CREDIT_UPDATE, credit); 72 cxio_hal_cq_op(&rhp->rdev, &chp->cq, CQ_CREDIT_UPDATE, credit);
73 } 73 }
@@ -83,7 +83,7 @@ static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
83 wc->vendor_err = CQE_STATUS(cqe); 83 wc->vendor_err = CQE_STATUS(cqe);
84 84
85 PDBG("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x " 85 PDBG("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x "
86 "lo 0x%x cookie 0x%llx\n", __FUNCTION__, 86 "lo 0x%x cookie 0x%llx\n", __func__,
87 CQE_QPID(cqe), CQE_TYPE(cqe), 87 CQE_QPID(cqe), CQE_TYPE(cqe),
88 CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe), 88 CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe),
89 CQE_WRID_LOW(cqe), (unsigned long long) cookie); 89 CQE_WRID_LOW(cqe), (unsigned long long) cookie);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_ev.c b/drivers/infiniband/hw/cxgb3/iwch_ev.c
index b40676662a8a..7b67a6771720 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_ev.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_ev.c
@@ -52,7 +52,7 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
52 52
53 if (!qhp) { 53 if (!qhp) {
54 printk(KERN_ERR "%s unaffiliated error 0x%x qpid 0x%x\n", 54 printk(KERN_ERR "%s unaffiliated error 0x%x qpid 0x%x\n",
55 __FUNCTION__, CQE_STATUS(rsp_msg->cqe), 55 __func__, CQE_STATUS(rsp_msg->cqe),
56 CQE_QPID(rsp_msg->cqe)); 56 CQE_QPID(rsp_msg->cqe));
57 spin_unlock(&rnicp->lock); 57 spin_unlock(&rnicp->lock);
58 return; 58 return;
@@ -61,14 +61,14 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
61 if ((qhp->attr.state == IWCH_QP_STATE_ERROR) || 61 if ((qhp->attr.state == IWCH_QP_STATE_ERROR) ||
62 (qhp->attr.state == IWCH_QP_STATE_TERMINATE)) { 62 (qhp->attr.state == IWCH_QP_STATE_TERMINATE)) {
63 PDBG("%s AE received after RTS - " 63 PDBG("%s AE received after RTS - "
64 "qp state %d qpid 0x%x status 0x%x\n", __FUNCTION__, 64 "qp state %d qpid 0x%x status 0x%x\n", __func__,
65 qhp->attr.state, qhp->wq.qpid, CQE_STATUS(rsp_msg->cqe)); 65 qhp->attr.state, qhp->wq.qpid, CQE_STATUS(rsp_msg->cqe));
66 spin_unlock(&rnicp->lock); 66 spin_unlock(&rnicp->lock);
67 return; 67 return;
68 } 68 }
69 69
70 printk(KERN_ERR "%s - AE qpid 0x%x opcode %d status 0x%x " 70 printk(KERN_ERR "%s - AE qpid 0x%x opcode %d status 0x%x "
71 "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __FUNCTION__, 71 "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __func__,
72 CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe), 72 CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
73 CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe), 73 CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
74 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe)); 74 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
@@ -132,10 +132,10 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
132 (CQE_STATUS(rsp_msg->cqe) == 0)) { 132 (CQE_STATUS(rsp_msg->cqe) == 0)) {
133 if (SQ_TYPE(rsp_msg->cqe)) { 133 if (SQ_TYPE(rsp_msg->cqe)) {
134 PDBG("%s QPID 0x%x ep %p disconnecting\n", 134 PDBG("%s QPID 0x%x ep %p disconnecting\n",
135 __FUNCTION__, qhp->wq.qpid, qhp->ep); 135 __func__, qhp->wq.qpid, qhp->ep);
136 iwch_ep_disconnect(qhp->ep, 0, GFP_ATOMIC); 136 iwch_ep_disconnect(qhp->ep, 0, GFP_ATOMIC);
137 } else { 137 } else {
138 PDBG("%s post REQ_ERR AE QPID 0x%x\n", __FUNCTION__, 138 PDBG("%s post REQ_ERR AE QPID 0x%x\n", __func__,
139 qhp->wq.qpid); 139 qhp->wq.qpid);
140 post_qp_event(rnicp, chp, rsp_msg, 140 post_qp_event(rnicp, chp, rsp_msg,
141 IB_EVENT_QP_REQ_ERR, 0); 141 IB_EVENT_QP_REQ_ERR, 0);
@@ -180,7 +180,7 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
180 case TPT_ERR_INVALIDATE_SHARED_MR: 180 case TPT_ERR_INVALIDATE_SHARED_MR:
181 case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND: 181 case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
182 printk(KERN_ERR "%s - CQE Err qpid 0x%x opcode %d status 0x%x " 182 printk(KERN_ERR "%s - CQE Err qpid 0x%x opcode %d status 0x%x "
183 "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __FUNCTION__, 183 "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __func__,
184 CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe), 184 CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
185 CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe), 185 CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
186 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe)); 186 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
diff --git a/drivers/infiniband/hw/cxgb3/iwch_mem.c b/drivers/infiniband/hw/cxgb3/iwch_mem.c
index b8797c66676d..58c3d61bcd14 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_mem.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_mem.c
@@ -62,7 +62,7 @@ int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
62 mmid = stag >> 8; 62 mmid = stag >> 8;
63 mhp->ibmr.rkey = mhp->ibmr.lkey = stag; 63 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
64 insert_handle(rhp, &rhp->mmidr, mhp, mmid); 64 insert_handle(rhp, &rhp->mmidr, mhp, mmid);
65 PDBG("%s mmid 0x%x mhp %p\n", __FUNCTION__, mmid, mhp); 65 PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
66 return 0; 66 return 0;
67} 67}
68 68
@@ -96,7 +96,7 @@ int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
96 mmid = stag >> 8; 96 mmid = stag >> 8;
97 mhp->ibmr.rkey = mhp->ibmr.lkey = stag; 97 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
98 insert_handle(rhp, &rhp->mmidr, mhp, mmid); 98 insert_handle(rhp, &rhp->mmidr, mhp, mmid);
99 PDBG("%s mmid 0x%x mhp %p\n", __FUNCTION__, mmid, mhp); 99 PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
100 return 0; 100 return 0;
101} 101}
102 102
@@ -163,7 +163,7 @@ int build_phys_page_list(struct ib_phys_buf *buffer_list,
163 ((u64) j << *shift)); 163 ((u64) j << *shift));
164 164
165 PDBG("%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d\n", 165 PDBG("%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d\n",
166 __FUNCTION__, (unsigned long long) *iova_start, 166 __func__, (unsigned long long) *iova_start,
167 (unsigned long long) mask, *shift, (unsigned long long) *total_size, 167 (unsigned long long) mask, *shift, (unsigned long long) *total_size,
168 *npages); 168 *npages);
169 169
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index b2ea9210467f..ca7265443c05 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -101,7 +101,7 @@ static int iwch_dealloc_ucontext(struct ib_ucontext *context)
101 struct iwch_ucontext *ucontext = to_iwch_ucontext(context); 101 struct iwch_ucontext *ucontext = to_iwch_ucontext(context);
102 struct iwch_mm_entry *mm, *tmp; 102 struct iwch_mm_entry *mm, *tmp;
103 103
104 PDBG("%s context %p\n", __FUNCTION__, context); 104 PDBG("%s context %p\n", __func__, context);
105 list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry) 105 list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
106 kfree(mm); 106 kfree(mm);
107 cxio_release_ucontext(&rhp->rdev, &ucontext->uctx); 107 cxio_release_ucontext(&rhp->rdev, &ucontext->uctx);
@@ -115,7 +115,7 @@ static struct ib_ucontext *iwch_alloc_ucontext(struct ib_device *ibdev,
115 struct iwch_ucontext *context; 115 struct iwch_ucontext *context;
116 struct iwch_dev *rhp = to_iwch_dev(ibdev); 116 struct iwch_dev *rhp = to_iwch_dev(ibdev);
117 117
118 PDBG("%s ibdev %p\n", __FUNCTION__, ibdev); 118 PDBG("%s ibdev %p\n", __func__, ibdev);
119 context = kzalloc(sizeof(*context), GFP_KERNEL); 119 context = kzalloc(sizeof(*context), GFP_KERNEL);
120 if (!context) 120 if (!context)
121 return ERR_PTR(-ENOMEM); 121 return ERR_PTR(-ENOMEM);
@@ -129,7 +129,7 @@ static int iwch_destroy_cq(struct ib_cq *ib_cq)
129{ 129{
130 struct iwch_cq *chp; 130 struct iwch_cq *chp;
131 131
132 PDBG("%s ib_cq %p\n", __FUNCTION__, ib_cq); 132 PDBG("%s ib_cq %p\n", __func__, ib_cq);
133 chp = to_iwch_cq(ib_cq); 133 chp = to_iwch_cq(ib_cq);
134 134
135 remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid); 135 remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
@@ -151,7 +151,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
151 struct iwch_create_cq_req ureq; 151 struct iwch_create_cq_req ureq;
152 struct iwch_ucontext *ucontext = NULL; 152 struct iwch_ucontext *ucontext = NULL;
153 153
154 PDBG("%s ib_dev %p entries %d\n", __FUNCTION__, ibdev, entries); 154 PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
155 rhp = to_iwch_dev(ibdev); 155 rhp = to_iwch_dev(ibdev);
156 chp = kzalloc(sizeof(*chp), GFP_KERNEL); 156 chp = kzalloc(sizeof(*chp), GFP_KERNEL);
157 if (!chp) 157 if (!chp)
@@ -233,7 +233,7 @@ static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
233 struct t3_cq oldcq, newcq; 233 struct t3_cq oldcq, newcq;
234 int ret; 234 int ret;
235 235
236 PDBG("%s ib_cq %p cqe %d\n", __FUNCTION__, cq, cqe); 236 PDBG("%s ib_cq %p cqe %d\n", __func__, cq, cqe);
237 237
238 /* We don't downsize... */ 238 /* We don't downsize... */
239 if (cqe <= cq->cqe) 239 if (cqe <= cq->cqe)
@@ -281,7 +281,7 @@ static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
281 ret = cxio_destroy_cq(&chp->rhp->rdev, &oldcq); 281 ret = cxio_destroy_cq(&chp->rhp->rdev, &oldcq);
282 if (ret) { 282 if (ret) {
283 printk(KERN_ERR MOD "%s - cxio_destroy_cq failed %d\n", 283 printk(KERN_ERR MOD "%s - cxio_destroy_cq failed %d\n",
284 __FUNCTION__, ret); 284 __func__, ret);
285 } 285 }
286 286
287 /* add user hooks here */ 287 /* add user hooks here */
@@ -316,7 +316,7 @@ static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
316 chp->cq.rptr = rptr; 316 chp->cq.rptr = rptr;
317 } else 317 } else
318 spin_lock_irqsave(&chp->lock, flag); 318 spin_lock_irqsave(&chp->lock, flag);
319 PDBG("%s rptr 0x%x\n", __FUNCTION__, chp->cq.rptr); 319 PDBG("%s rptr 0x%x\n", __func__, chp->cq.rptr);
320 err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0); 320 err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
321 spin_unlock_irqrestore(&chp->lock, flag); 321 spin_unlock_irqrestore(&chp->lock, flag);
322 if (err < 0) 322 if (err < 0)
@@ -337,7 +337,7 @@ static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
337 struct iwch_ucontext *ucontext; 337 struct iwch_ucontext *ucontext;
338 u64 addr; 338 u64 addr;
339 339
340 PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __FUNCTION__, vma->vm_pgoff, 340 PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
341 key, len); 341 key, len);
342 342
343 if (vma->vm_start & (PAGE_SIZE-1)) { 343 if (vma->vm_start & (PAGE_SIZE-1)) {
@@ -390,7 +390,7 @@ static int iwch_deallocate_pd(struct ib_pd *pd)
390 390
391 php = to_iwch_pd(pd); 391 php = to_iwch_pd(pd);
392 rhp = php->rhp; 392 rhp = php->rhp;
393 PDBG("%s ibpd %p pdid 0x%x\n", __FUNCTION__, pd, php->pdid); 393 PDBG("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
394 cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid); 394 cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid);
395 kfree(php); 395 kfree(php);
396 return 0; 396 return 0;
@@ -404,7 +404,7 @@ static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev,
404 u32 pdid; 404 u32 pdid;
405 struct iwch_dev *rhp; 405 struct iwch_dev *rhp;
406 406
407 PDBG("%s ibdev %p\n", __FUNCTION__, ibdev); 407 PDBG("%s ibdev %p\n", __func__, ibdev);
408 rhp = (struct iwch_dev *) ibdev; 408 rhp = (struct iwch_dev *) ibdev;
409 pdid = cxio_hal_get_pdid(rhp->rdev.rscp); 409 pdid = cxio_hal_get_pdid(rhp->rdev.rscp);
410 if (!pdid) 410 if (!pdid)
@@ -422,7 +422,7 @@ static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev,
422 return ERR_PTR(-EFAULT); 422 return ERR_PTR(-EFAULT);
423 } 423 }
424 } 424 }
425 PDBG("%s pdid 0x%0x ptr 0x%p\n", __FUNCTION__, pdid, php); 425 PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
426 return &php->ibpd; 426 return &php->ibpd;
427} 427}
428 428
@@ -432,7 +432,7 @@ static int iwch_dereg_mr(struct ib_mr *ib_mr)
432 struct iwch_mr *mhp; 432 struct iwch_mr *mhp;
433 u32 mmid; 433 u32 mmid;
434 434
435 PDBG("%s ib_mr %p\n", __FUNCTION__, ib_mr); 435 PDBG("%s ib_mr %p\n", __func__, ib_mr);
436 /* There can be no memory windows */ 436 /* There can be no memory windows */
437 if (atomic_read(&ib_mr->usecnt)) 437 if (atomic_read(&ib_mr->usecnt))
438 return -EINVAL; 438 return -EINVAL;
@@ -447,7 +447,7 @@ static int iwch_dereg_mr(struct ib_mr *ib_mr)
447 kfree((void *) (unsigned long) mhp->kva); 447 kfree((void *) (unsigned long) mhp->kva);
448 if (mhp->umem) 448 if (mhp->umem)
449 ib_umem_release(mhp->umem); 449 ib_umem_release(mhp->umem);
450 PDBG("%s mmid 0x%x ptr %p\n", __FUNCTION__, mmid, mhp); 450 PDBG("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
451 kfree(mhp); 451 kfree(mhp);
452 return 0; 452 return 0;
453} 453}
@@ -467,7 +467,7 @@ static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd,
467 struct iwch_mr *mhp; 467 struct iwch_mr *mhp;
468 int ret; 468 int ret;
469 469
470 PDBG("%s ib_pd %p\n", __FUNCTION__, pd); 470 PDBG("%s ib_pd %p\n", __func__, pd);
471 php = to_iwch_pd(pd); 471 php = to_iwch_pd(pd);
472 rhp = php->rhp; 472 rhp = php->rhp;
473 473
@@ -531,7 +531,7 @@ static int iwch_reregister_phys_mem(struct ib_mr *mr,
531 int npages; 531 int npages;
532 int ret; 532 int ret;
533 533
534 PDBG("%s ib_mr %p ib_pd %p\n", __FUNCTION__, mr, pd); 534 PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd);
535 535
536 /* There can be no memory windows */ 536 /* There can be no memory windows */
537 if (atomic_read(&mr->usecnt)) 537 if (atomic_read(&mr->usecnt))
@@ -594,7 +594,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
594 struct iwch_mr *mhp; 594 struct iwch_mr *mhp;
595 struct iwch_reg_user_mr_resp uresp; 595 struct iwch_reg_user_mr_resp uresp;
596 596
597 PDBG("%s ib_pd %p\n", __FUNCTION__, pd); 597 PDBG("%s ib_pd %p\n", __func__, pd);
598 598
599 php = to_iwch_pd(pd); 599 php = to_iwch_pd(pd);
600 rhp = php->rhp; 600 rhp = php->rhp;
@@ -649,7 +649,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
649 if (udata && !t3a_device(rhp)) { 649 if (udata && !t3a_device(rhp)) {
650 uresp.pbl_addr = (mhp->attr.pbl_addr - 650 uresp.pbl_addr = (mhp->attr.pbl_addr -
651 rhp->rdev.rnic_info.pbl_base) >> 3; 651 rhp->rdev.rnic_info.pbl_base) >> 3;
652 PDBG("%s user resp pbl_addr 0x%x\n", __FUNCTION__, 652 PDBG("%s user resp pbl_addr 0x%x\n", __func__,
653 uresp.pbl_addr); 653 uresp.pbl_addr);
654 654
655 if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) { 655 if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
@@ -673,7 +673,7 @@ static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
673 u64 kva; 673 u64 kva;
674 struct ib_mr *ibmr; 674 struct ib_mr *ibmr;
675 675
676 PDBG("%s ib_pd %p\n", __FUNCTION__, pd); 676 PDBG("%s ib_pd %p\n", __func__, pd);
677 677
678 /* 678 /*
679 * T3 only supports 32 bits of size. 679 * T3 only supports 32 bits of size.
@@ -710,7 +710,7 @@ static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd)
710 mhp->attr.stag = stag; 710 mhp->attr.stag = stag;
711 mmid = (stag) >> 8; 711 mmid = (stag) >> 8;
712 insert_handle(rhp, &rhp->mmidr, mhp, mmid); 712 insert_handle(rhp, &rhp->mmidr, mhp, mmid);
713 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __FUNCTION__, mmid, mhp, stag); 713 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
714 return &(mhp->ibmw); 714 return &(mhp->ibmw);
715} 715}
716 716
@@ -726,7 +726,7 @@ static int iwch_dealloc_mw(struct ib_mw *mw)
726 cxio_deallocate_window(&rhp->rdev, mhp->attr.stag); 726 cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
727 remove_handle(rhp, &rhp->mmidr, mmid); 727 remove_handle(rhp, &rhp->mmidr, mmid);
728 kfree(mhp); 728 kfree(mhp);
729 PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __FUNCTION__, mw, mmid, mhp); 729 PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
730 return 0; 730 return 0;
731} 731}
732 732
@@ -754,7 +754,7 @@ static int iwch_destroy_qp(struct ib_qp *ib_qp)
754 cxio_destroy_qp(&rhp->rdev, &qhp->wq, 754 cxio_destroy_qp(&rhp->rdev, &qhp->wq,
755 ucontext ? &ucontext->uctx : &rhp->rdev.uctx); 755 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
756 756
757 PDBG("%s ib_qp %p qpid 0x%0x qhp %p\n", __FUNCTION__, 757 PDBG("%s ib_qp %p qpid 0x%0x qhp %p\n", __func__,
758 ib_qp, qhp->wq.qpid, qhp); 758 ib_qp, qhp->wq.qpid, qhp);
759 kfree(qhp); 759 kfree(qhp);
760 return 0; 760 return 0;
@@ -773,7 +773,7 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
773 int wqsize, sqsize, rqsize; 773 int wqsize, sqsize, rqsize;
774 struct iwch_ucontext *ucontext; 774 struct iwch_ucontext *ucontext;
775 775
776 PDBG("%s ib_pd %p\n", __FUNCTION__, pd); 776 PDBG("%s ib_pd %p\n", __func__, pd);
777 if (attrs->qp_type != IB_QPT_RC) 777 if (attrs->qp_type != IB_QPT_RC)
778 return ERR_PTR(-EINVAL); 778 return ERR_PTR(-EINVAL);
779 php = to_iwch_pd(pd); 779 php = to_iwch_pd(pd);
@@ -805,7 +805,7 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
805 */ 805 */
806 sqsize = roundup_pow_of_two(attrs->cap.max_send_wr); 806 sqsize = roundup_pow_of_two(attrs->cap.max_send_wr);
807 wqsize = roundup_pow_of_two(rqsize + sqsize); 807 wqsize = roundup_pow_of_two(rqsize + sqsize);
808 PDBG("%s wqsize %d sqsize %d rqsize %d\n", __FUNCTION__, 808 PDBG("%s wqsize %d sqsize %d rqsize %d\n", __func__,
809 wqsize, sqsize, rqsize); 809 wqsize, sqsize, rqsize);
810 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL); 810 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
811 if (!qhp) 811 if (!qhp)
@@ -898,7 +898,7 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
898 init_timer(&(qhp->timer)); 898 init_timer(&(qhp->timer));
899 PDBG("%s sq_num_entries %d, rq_num_entries %d " 899 PDBG("%s sq_num_entries %d, rq_num_entries %d "
900 "qpid 0x%0x qhp %p dma_addr 0x%llx size %d\n", 900 "qpid 0x%0x qhp %p dma_addr 0x%llx size %d\n",
901 __FUNCTION__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries, 901 __func__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
902 qhp->wq.qpid, qhp, (unsigned long long) qhp->wq.dma_addr, 902 qhp->wq.qpid, qhp, (unsigned long long) qhp->wq.dma_addr,
903 1 << qhp->wq.size_log2); 903 1 << qhp->wq.size_log2);
904 return &qhp->ibqp; 904 return &qhp->ibqp;
@@ -912,7 +912,7 @@ static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
912 enum iwch_qp_attr_mask mask = 0; 912 enum iwch_qp_attr_mask mask = 0;
913 struct iwch_qp_attributes attrs; 913 struct iwch_qp_attributes attrs;
914 914
915 PDBG("%s ib_qp %p\n", __FUNCTION__, ibqp); 915 PDBG("%s ib_qp %p\n", __func__, ibqp);
916 916
917 /* iwarp does not support the RTR state */ 917 /* iwarp does not support the RTR state */
918 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR)) 918 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
@@ -945,20 +945,20 @@ static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
945 945
946void iwch_qp_add_ref(struct ib_qp *qp) 946void iwch_qp_add_ref(struct ib_qp *qp)
947{ 947{
948 PDBG("%s ib_qp %p\n", __FUNCTION__, qp); 948 PDBG("%s ib_qp %p\n", __func__, qp);
949 atomic_inc(&(to_iwch_qp(qp)->refcnt)); 949 atomic_inc(&(to_iwch_qp(qp)->refcnt));
950} 950}
951 951
952void iwch_qp_rem_ref(struct ib_qp *qp) 952void iwch_qp_rem_ref(struct ib_qp *qp)
953{ 953{
954 PDBG("%s ib_qp %p\n", __FUNCTION__, qp); 954 PDBG("%s ib_qp %p\n", __func__, qp);
955 if (atomic_dec_and_test(&(to_iwch_qp(qp)->refcnt))) 955 if (atomic_dec_and_test(&(to_iwch_qp(qp)->refcnt)))
956 wake_up(&(to_iwch_qp(qp)->wait)); 956 wake_up(&(to_iwch_qp(qp)->wait));
957} 957}
958 958
959static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn) 959static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)
960{ 960{
961 PDBG("%s ib_dev %p qpn 0x%x\n", __FUNCTION__, dev, qpn); 961 PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
962 return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn); 962 return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);
963} 963}
964 964
@@ -966,7 +966,7 @@ static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)
966static int iwch_query_pkey(struct ib_device *ibdev, 966static int iwch_query_pkey(struct ib_device *ibdev,
967 u8 port, u16 index, u16 * pkey) 967 u8 port, u16 index, u16 * pkey)
968{ 968{
969 PDBG("%s ibdev %p\n", __FUNCTION__, ibdev); 969 PDBG("%s ibdev %p\n", __func__, ibdev);
970 *pkey = 0; 970 *pkey = 0;
971 return 0; 971 return 0;
972} 972}
@@ -977,7 +977,7 @@ static int iwch_query_gid(struct ib_device *ibdev, u8 port,
977 struct iwch_dev *dev; 977 struct iwch_dev *dev;
978 978
979 PDBG("%s ibdev %p, port %d, index %d, gid %p\n", 979 PDBG("%s ibdev %p, port %d, index %d, gid %p\n",
980 __FUNCTION__, ibdev, port, index, gid); 980 __func__, ibdev, port, index, gid);
981 dev = to_iwch_dev(ibdev); 981 dev = to_iwch_dev(ibdev);
982 BUG_ON(port == 0 || port > 2); 982 BUG_ON(port == 0 || port > 2);
983 memset(&(gid->raw[0]), 0, sizeof(gid->raw)); 983 memset(&(gid->raw[0]), 0, sizeof(gid->raw));
@@ -990,7 +990,7 @@ static int iwch_query_device(struct ib_device *ibdev,
990{ 990{
991 991
992 struct iwch_dev *dev; 992 struct iwch_dev *dev;
993 PDBG("%s ibdev %p\n", __FUNCTION__, ibdev); 993 PDBG("%s ibdev %p\n", __func__, ibdev);
994 994
995 dev = to_iwch_dev(ibdev); 995 dev = to_iwch_dev(ibdev);
996 memset(props, 0, sizeof *props); 996 memset(props, 0, sizeof *props);
@@ -1017,7 +1017,7 @@ static int iwch_query_device(struct ib_device *ibdev,
1017static int iwch_query_port(struct ib_device *ibdev, 1017static int iwch_query_port(struct ib_device *ibdev,
1018 u8 port, struct ib_port_attr *props) 1018 u8 port, struct ib_port_attr *props)
1019{ 1019{
1020 PDBG("%s ibdev %p\n", __FUNCTION__, ibdev); 1020 PDBG("%s ibdev %p\n", __func__, ibdev);
1021 props->max_mtu = IB_MTU_4096; 1021 props->max_mtu = IB_MTU_4096;
1022 props->lid = 0; 1022 props->lid = 0;
1023 props->lmc = 0; 1023 props->lmc = 0;
@@ -1045,7 +1045,7 @@ static ssize_t show_rev(struct class_device *cdev, char *buf)
1045{ 1045{
1046 struct iwch_dev *dev = container_of(cdev, struct iwch_dev, 1046 struct iwch_dev *dev = container_of(cdev, struct iwch_dev,
1047 ibdev.class_dev); 1047 ibdev.class_dev);
1048 PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev); 1048 PDBG("%s class dev 0x%p\n", __func__, cdev);
1049 return sprintf(buf, "%d\n", dev->rdev.t3cdev_p->type); 1049 return sprintf(buf, "%d\n", dev->rdev.t3cdev_p->type);
1050} 1050}
1051 1051
@@ -1056,7 +1056,7 @@ static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
1056 struct ethtool_drvinfo info; 1056 struct ethtool_drvinfo info;
1057 struct net_device *lldev = dev->rdev.t3cdev_p->lldev; 1057 struct net_device *lldev = dev->rdev.t3cdev_p->lldev;
1058 1058
1059 PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev); 1059 PDBG("%s class dev 0x%p\n", __func__, cdev);
1060 rtnl_lock(); 1060 rtnl_lock();
1061 lldev->ethtool_ops->get_drvinfo(lldev, &info); 1061 lldev->ethtool_ops->get_drvinfo(lldev, &info);
1062 rtnl_unlock(); 1062 rtnl_unlock();
@@ -1070,7 +1070,7 @@ static ssize_t show_hca(struct class_device *cdev, char *buf)
1070 struct ethtool_drvinfo info; 1070 struct ethtool_drvinfo info;
1071 struct net_device *lldev = dev->rdev.t3cdev_p->lldev; 1071 struct net_device *lldev = dev->rdev.t3cdev_p->lldev;
1072 1072
1073 PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev); 1073 PDBG("%s class dev 0x%p\n", __func__, cdev);
1074 rtnl_lock(); 1074 rtnl_lock();
1075 lldev->ethtool_ops->get_drvinfo(lldev, &info); 1075 lldev->ethtool_ops->get_drvinfo(lldev, &info);
1076 rtnl_unlock(); 1076 rtnl_unlock();
@@ -1081,7 +1081,7 @@ static ssize_t show_board(struct class_device *cdev, char *buf)
1081{ 1081{
1082 struct iwch_dev *dev = container_of(cdev, struct iwch_dev, 1082 struct iwch_dev *dev = container_of(cdev, struct iwch_dev,
1083 ibdev.class_dev); 1083 ibdev.class_dev);
1084 PDBG("%s class dev 0x%p\n", __FUNCTION__, dev); 1084 PDBG("%s class dev 0x%p\n", __func__, dev);
1085 return sprintf(buf, "%x.%x\n", dev->rdev.rnic_info.pdev->vendor, 1085 return sprintf(buf, "%x.%x\n", dev->rdev.rnic_info.pdev->vendor,
1086 dev->rdev.rnic_info.pdev->device); 1086 dev->rdev.rnic_info.pdev->device);
1087} 1087}
@@ -1103,14 +1103,13 @@ int iwch_register_device(struct iwch_dev *dev)
1103 int ret; 1103 int ret;
1104 int i; 1104 int i;
1105 1105
1106 PDBG("%s iwch_dev %p\n", __FUNCTION__, dev); 1106 PDBG("%s iwch_dev %p\n", __func__, dev);
1107 strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX); 1107 strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX);
1108 memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid)); 1108 memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
1109 memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6); 1109 memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
1110 dev->ibdev.owner = THIS_MODULE; 1110 dev->ibdev.owner = THIS_MODULE;
1111 dev->device_cap_flags = 1111 dev->device_cap_flags =
1112 (IB_DEVICE_ZERO_STAG | 1112 (IB_DEVICE_ZERO_STAG | IB_DEVICE_MEM_WINDOW);
1113 IB_DEVICE_SEND_W_INV | IB_DEVICE_MEM_WINDOW);
1114 1113
1115 dev->ibdev.uverbs_cmd_mask = 1114 dev->ibdev.uverbs_cmd_mask =
1116 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 1115 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
@@ -1207,7 +1206,7 @@ void iwch_unregister_device(struct iwch_dev *dev)
1207{ 1206{
1208 int i; 1207 int i;
1209 1208
1210 PDBG("%s iwch_dev %p\n", __FUNCTION__, dev); 1209 PDBG("%s iwch_dev %p\n", __func__, dev);
1211 for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i) 1210 for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i)
1212 class_device_remove_file(&dev->ibdev.class_dev, 1211 class_device_remove_file(&dev->ibdev.class_dev,
1213 iwch_class_attributes[i]); 1212 iwch_class_attributes[i]);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
index 48833f3f3bd0..61356f91109d 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h
@@ -213,7 +213,7 @@ static inline struct iwch_mm_entry *remove_mmap(struct iwch_ucontext *ucontext,
213 if (mm->key == key && mm->len == len) { 213 if (mm->key == key && mm->len == len) {
214 list_del_init(&mm->entry); 214 list_del_init(&mm->entry);
215 spin_unlock(&ucontext->mmap_lock); 215 spin_unlock(&ucontext->mmap_lock);
216 PDBG("%s key 0x%x addr 0x%llx len %d\n", __FUNCTION__, 216 PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
217 key, (unsigned long long) mm->addr, mm->len); 217 key, (unsigned long long) mm->addr, mm->len);
218 return mm; 218 return mm;
219 } 219 }
@@ -226,7 +226,7 @@ static inline void insert_mmap(struct iwch_ucontext *ucontext,
226 struct iwch_mm_entry *mm) 226 struct iwch_mm_entry *mm)
227{ 227{
228 spin_lock(&ucontext->mmap_lock); 228 spin_lock(&ucontext->mmap_lock);
229 PDBG("%s key 0x%x addr 0x%llx len %d\n", __FUNCTION__, 229 PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
230 mm->key, (unsigned long long) mm->addr, mm->len); 230 mm->key, (unsigned long long) mm->addr, mm->len);
231 list_add_tail(&mm->entry, &ucontext->mmaps); 231 list_add_tail(&mm->entry, &ucontext->mmaps);
232 spin_unlock(&ucontext->mmap_lock); 232 spin_unlock(&ucontext->mmap_lock);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index ea2cdd73dd85..8891c3b0a3d5 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -72,7 +72,7 @@ static int iwch_build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr,
72 wqe->send.reserved[2] = 0; 72 wqe->send.reserved[2] = 0;
73 if (wr->opcode == IB_WR_SEND_WITH_IMM) { 73 if (wr->opcode == IB_WR_SEND_WITH_IMM) {
74 plen = 4; 74 plen = 4;
75 wqe->send.sgl[0].stag = wr->imm_data; 75 wqe->send.sgl[0].stag = wr->ex.imm_data;
76 wqe->send.sgl[0].len = __constant_cpu_to_be32(0); 76 wqe->send.sgl[0].len = __constant_cpu_to_be32(0);
77 wqe->send.num_sgle = __constant_cpu_to_be32(0); 77 wqe->send.num_sgle = __constant_cpu_to_be32(0);
78 *flit_cnt = 5; 78 *flit_cnt = 5;
@@ -112,7 +112,7 @@ static int iwch_build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr,
112 112
113 if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { 113 if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
114 plen = 4; 114 plen = 4;
115 wqe->write.sgl[0].stag = wr->imm_data; 115 wqe->write.sgl[0].stag = wr->ex.imm_data;
116 wqe->write.sgl[0].len = __constant_cpu_to_be32(0); 116 wqe->write.sgl[0].len = __constant_cpu_to_be32(0);
117 wqe->write.num_sgle = __constant_cpu_to_be32(0); 117 wqe->write.num_sgle = __constant_cpu_to_be32(0);
118 *flit_cnt = 6; 118 *flit_cnt = 6;
@@ -168,30 +168,30 @@ static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,
168 168
169 mhp = get_mhp(rhp, (sg_list[i].lkey) >> 8); 169 mhp = get_mhp(rhp, (sg_list[i].lkey) >> 8);
170 if (!mhp) { 170 if (!mhp) {
171 PDBG("%s %d\n", __FUNCTION__, __LINE__); 171 PDBG("%s %d\n", __func__, __LINE__);
172 return -EIO; 172 return -EIO;
173 } 173 }
174 if (!mhp->attr.state) { 174 if (!mhp->attr.state) {
175 PDBG("%s %d\n", __FUNCTION__, __LINE__); 175 PDBG("%s %d\n", __func__, __LINE__);
176 return -EIO; 176 return -EIO;
177 } 177 }
178 if (mhp->attr.zbva) { 178 if (mhp->attr.zbva) {
179 PDBG("%s %d\n", __FUNCTION__, __LINE__); 179 PDBG("%s %d\n", __func__, __LINE__);
180 return -EIO; 180 return -EIO;
181 } 181 }
182 182
183 if (sg_list[i].addr < mhp->attr.va_fbo) { 183 if (sg_list[i].addr < mhp->attr.va_fbo) {
184 PDBG("%s %d\n", __FUNCTION__, __LINE__); 184 PDBG("%s %d\n", __func__, __LINE__);
185 return -EINVAL; 185 return -EINVAL;
186 } 186 }
187 if (sg_list[i].addr + ((u64) sg_list[i].length) < 187 if (sg_list[i].addr + ((u64) sg_list[i].length) <
188 sg_list[i].addr) { 188 sg_list[i].addr) {
189 PDBG("%s %d\n", __FUNCTION__, __LINE__); 189 PDBG("%s %d\n", __func__, __LINE__);
190 return -EINVAL; 190 return -EINVAL;
191 } 191 }
192 if (sg_list[i].addr + ((u64) sg_list[i].length) > 192 if (sg_list[i].addr + ((u64) sg_list[i].length) >
193 mhp->attr.va_fbo + ((u64) mhp->attr.len)) { 193 mhp->attr.va_fbo + ((u64) mhp->attr.len)) {
194 PDBG("%s %d\n", __FUNCTION__, __LINE__); 194 PDBG("%s %d\n", __func__, __LINE__);
195 return -EINVAL; 195 return -EINVAL;
196 } 196 }
197 offset = sg_list[i].addr - mhp->attr.va_fbo; 197 offset = sg_list[i].addr - mhp->attr.va_fbo;
@@ -290,7 +290,7 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
290 qhp->wq.oldest_read = sqp; 290 qhp->wq.oldest_read = sqp;
291 break; 291 break;
292 default: 292 default:
293 PDBG("%s post of type=%d TBD!\n", __FUNCTION__, 293 PDBG("%s post of type=%d TBD!\n", __func__,
294 wr->opcode); 294 wr->opcode);
295 err = -EINVAL; 295 err = -EINVAL;
296 } 296 }
@@ -309,7 +309,7 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
309 Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2), 309 Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
310 0, t3_wr_flit_cnt); 310 0, t3_wr_flit_cnt);
311 PDBG("%s cookie 0x%llx wq idx 0x%x swsq idx %ld opcode %d\n", 311 PDBG("%s cookie 0x%llx wq idx 0x%x swsq idx %ld opcode %d\n",
312 __FUNCTION__, (unsigned long long) wr->wr_id, idx, 312 __func__, (unsigned long long) wr->wr_id, idx,
313 Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2), 313 Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2),
314 sqp->opcode); 314 sqp->opcode);
315 wr = wr->next; 315 wr = wr->next;
@@ -361,7 +361,7 @@ int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
361 Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2), 361 Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
362 0, sizeof(struct t3_receive_wr) >> 3); 362 0, sizeof(struct t3_receive_wr) >> 3);
363 PDBG("%s cookie 0x%llx idx 0x%x rq_wptr 0x%x rw_rptr 0x%x " 363 PDBG("%s cookie 0x%llx idx 0x%x rq_wptr 0x%x rw_rptr 0x%x "
364 "wqe %p \n", __FUNCTION__, (unsigned long long) wr->wr_id, 364 "wqe %p \n", __func__, (unsigned long long) wr->wr_id,
365 idx, qhp->wq.rq_wptr, qhp->wq.rq_rptr, wqe); 365 idx, qhp->wq.rq_wptr, qhp->wq.rq_rptr, wqe);
366 ++(qhp->wq.rq_wptr); 366 ++(qhp->wq.rq_wptr);
367 ++(qhp->wq.wptr); 367 ++(qhp->wq.wptr);
@@ -407,7 +407,7 @@ int iwch_bind_mw(struct ib_qp *qp,
407 return -ENOMEM; 407 return -ENOMEM;
408 } 408 }
409 idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2); 409 idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
410 PDBG("%s: idx 0x%0x, mw 0x%p, mw_bind 0x%p\n", __FUNCTION__, idx, 410 PDBG("%s: idx 0x%0x, mw 0x%p, mw_bind 0x%p\n", __func__, idx,
411 mw, mw_bind); 411 mw, mw_bind);
412 wqe = (union t3_wr *) (qhp->wq.queue + idx); 412 wqe = (union t3_wr *) (qhp->wq.queue + idx);
413 413
@@ -595,10 +595,10 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
595 struct terminate_message *term; 595 struct terminate_message *term;
596 struct sk_buff *skb; 596 struct sk_buff *skb;
597 597
598 PDBG("%s %d\n", __FUNCTION__, __LINE__); 598 PDBG("%s %d\n", __func__, __LINE__);
599 skb = alloc_skb(40, GFP_ATOMIC); 599 skb = alloc_skb(40, GFP_ATOMIC);
600 if (!skb) { 600 if (!skb) {
601 printk(KERN_ERR "%s cannot send TERMINATE!\n", __FUNCTION__); 601 printk(KERN_ERR "%s cannot send TERMINATE!\n", __func__);
602 return -ENOMEM; 602 return -ENOMEM;
603 } 603 }
604 wqe = (union t3_wr *)skb_put(skb, 40); 604 wqe = (union t3_wr *)skb_put(skb, 40);
@@ -629,7 +629,7 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
629 rchp = get_chp(qhp->rhp, qhp->attr.rcq); 629 rchp = get_chp(qhp->rhp, qhp->attr.rcq);
630 schp = get_chp(qhp->rhp, qhp->attr.scq); 630 schp = get_chp(qhp->rhp, qhp->attr.scq);
631 631
632 PDBG("%s qhp %p rchp %p schp %p\n", __FUNCTION__, qhp, rchp, schp); 632 PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
633 /* take a ref on the qhp since we must release the lock */ 633 /* take a ref on the qhp since we must release the lock */
634 atomic_inc(&qhp->refcnt); 634 atomic_inc(&qhp->refcnt);
635 spin_unlock_irqrestore(&qhp->lock, *flag); 635 spin_unlock_irqrestore(&qhp->lock, *flag);
@@ -720,11 +720,11 @@ static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
720 init_attr.flags |= capable(CAP_NET_BIND_SERVICE) ? PRIV_QP : 0; 720 init_attr.flags |= capable(CAP_NET_BIND_SERVICE) ? PRIV_QP : 0;
721 init_attr.irs = qhp->ep->rcv_seq; 721 init_attr.irs = qhp->ep->rcv_seq;
722 PDBG("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d " 722 PDBG("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d "
723 "flags 0x%x qpcaps 0x%x\n", __FUNCTION__, 723 "flags 0x%x qpcaps 0x%x\n", __func__,
724 init_attr.rq_addr, init_attr.rq_size, 724 init_attr.rq_addr, init_attr.rq_size,
725 init_attr.flags, init_attr.qpcaps); 725 init_attr.flags, init_attr.qpcaps);
726 ret = cxio_rdma_init(&rhp->rdev, &init_attr); 726 ret = cxio_rdma_init(&rhp->rdev, &init_attr);
727 PDBG("%s ret %d\n", __FUNCTION__, ret); 727 PDBG("%s ret %d\n", __func__, ret);
728 return ret; 728 return ret;
729} 729}
730 730
@@ -742,7 +742,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
742 int free = 0; 742 int free = 0;
743 struct iwch_ep *ep = NULL; 743 struct iwch_ep *ep = NULL;
744 744
745 PDBG("%s qhp %p qpid 0x%x ep %p state %d -> %d\n", __FUNCTION__, 745 PDBG("%s qhp %p qpid 0x%x ep %p state %d -> %d\n", __func__,
746 qhp, qhp->wq.qpid, qhp->ep, qhp->attr.state, 746 qhp, qhp->wq.qpid, qhp->ep, qhp->attr.state,
747 (mask & IWCH_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1); 747 (mask & IWCH_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
748 748
@@ -899,14 +899,14 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
899 break; 899 break;
900 default: 900 default:
901 printk(KERN_ERR "%s in a bad state %d\n", 901 printk(KERN_ERR "%s in a bad state %d\n",
902 __FUNCTION__, qhp->attr.state); 902 __func__, qhp->attr.state);
903 ret = -EINVAL; 903 ret = -EINVAL;
904 goto err; 904 goto err;
905 break; 905 break;
906 } 906 }
907 goto out; 907 goto out;
908err: 908err:
909 PDBG("%s disassociating ep %p qpid 0x%x\n", __FUNCTION__, qhp->ep, 909 PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
910 qhp->wq.qpid); 910 qhp->wq.qpid);
911 911
912 /* disassociate the LLP connection */ 912 /* disassociate the LLP connection */
@@ -939,7 +939,7 @@ out:
939 if (free) 939 if (free)
940 put_ep(&ep->com); 940 put_ep(&ep->com);
941 941
942 PDBG("%s exit state %d\n", __FUNCTION__, qhp->attr.state); 942 PDBG("%s exit state %d\n", __func__, qhp->attr.state);
943 return ret; 943 return ret;
944} 944}
945 945
diff --git a/drivers/infiniband/hw/ehca/ehca_av.c b/drivers/infiniband/hw/ehca/ehca_av.c
index 194c1c30cf63..56735ea2fc57 100644
--- a/drivers/infiniband/hw/ehca/ehca_av.c
+++ b/drivers/infiniband/hw/ehca/ehca_av.c
@@ -41,9 +41,6 @@
41 * POSSIBILITY OF SUCH DAMAGE. 41 * POSSIBILITY OF SUCH DAMAGE.
42 */ 42 */
43 43
44
45#include <asm/current.h>
46
47#include "ehca_tools.h" 44#include "ehca_tools.h"
48#include "ehca_iverbs.h" 45#include "ehca_iverbs.h"
49#include "hcp_if.h" 46#include "hcp_if.h"
@@ -170,17 +167,8 @@ int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
170{ 167{
171 struct ehca_av *av; 168 struct ehca_av *av;
172 struct ehca_ud_av new_ehca_av; 169 struct ehca_ud_av new_ehca_av;
173 struct ehca_pd *my_pd = container_of(ah->pd, struct ehca_pd, ib_pd);
174 struct ehca_shca *shca = container_of(ah->pd->device, struct ehca_shca, 170 struct ehca_shca *shca = container_of(ah->pd->device, struct ehca_shca,
175 ib_device); 171 ib_device);
176 u32 cur_pid = current->tgid;
177
178 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
179 my_pd->ownpid != cur_pid) {
180 ehca_err(ah->device, "Invalid caller pid=%x ownpid=%x",
181 cur_pid, my_pd->ownpid);
182 return -EINVAL;
183 }
184 172
185 memset(&new_ehca_av, 0, sizeof(new_ehca_av)); 173 memset(&new_ehca_av, 0, sizeof(new_ehca_av));
186 new_ehca_av.sl = ah_attr->sl; 174 new_ehca_av.sl = ah_attr->sl;
@@ -242,15 +230,6 @@ int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
242int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) 230int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
243{ 231{
244 struct ehca_av *av = container_of(ah, struct ehca_av, ib_ah); 232 struct ehca_av *av = container_of(ah, struct ehca_av, ib_ah);
245 struct ehca_pd *my_pd = container_of(ah->pd, struct ehca_pd, ib_pd);
246 u32 cur_pid = current->tgid;
247
248 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
249 my_pd->ownpid != cur_pid) {
250 ehca_err(ah->device, "Invalid caller pid=%x ownpid=%x",
251 cur_pid, my_pd->ownpid);
252 return -EINVAL;
253 }
254 233
255 memcpy(&ah_attr->grh.dgid, &av->av.grh.word_3, 234 memcpy(&ah_attr->grh.dgid, &av->av.grh.word_3,
256 sizeof(ah_attr->grh.dgid)); 235 sizeof(ah_attr->grh.dgid));
@@ -273,16 +252,6 @@ int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
273 252
274int ehca_destroy_ah(struct ib_ah *ah) 253int ehca_destroy_ah(struct ib_ah *ah)
275{ 254{
276 struct ehca_pd *my_pd = container_of(ah->pd, struct ehca_pd, ib_pd);
277 u32 cur_pid = current->tgid;
278
279 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
280 my_pd->ownpid != cur_pid) {
281 ehca_err(ah->device, "Invalid caller pid=%x ownpid=%x",
282 cur_pid, my_pd->ownpid);
283 return -EINVAL;
284 }
285
286 kmem_cache_free(av_cache, container_of(ah, struct ehca_av, ib_ah)); 255 kmem_cache_free(av_cache, container_of(ah, struct ehca_av, ib_ah));
287 256
288 return 0; 257 return 0;
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 92cce8aacbb7..0d13fe0a260b 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -132,7 +132,6 @@ struct ehca_shca {
132struct ehca_pd { 132struct ehca_pd {
133 struct ib_pd ib_pd; 133 struct ib_pd ib_pd;
134 struct ipz_pd fw_pd; 134 struct ipz_pd fw_pd;
135 u32 ownpid;
136 /* small queue mgmt */ 135 /* small queue mgmt */
137 struct mutex lock; 136 struct mutex lock;
138 struct list_head free[2]; 137 struct list_head free[2];
@@ -215,7 +214,6 @@ struct ehca_cq {
215 atomic_t nr_events; /* #events seen */ 214 atomic_t nr_events; /* #events seen */
216 wait_queue_head_t wait_completion; 215 wait_queue_head_t wait_completion;
217 spinlock_t task_lock; 216 spinlock_t task_lock;
218 u32 ownpid;
219 /* mmap counter for resources mapped into user space */ 217 /* mmap counter for resources mapped into user space */
220 u32 mm_count_queue; 218 u32 mm_count_queue;
221 u32 mm_count_galpa; 219 u32 mm_count_galpa;
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 0467c158d4a9..ec0cfcf3073f 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -43,8 +43,6 @@
43 * POSSIBILITY OF SUCH DAMAGE. 43 * POSSIBILITY OF SUCH DAMAGE.
44 */ 44 */
45 45
46#include <asm/current.h>
47
48#include "ehca_iverbs.h" 46#include "ehca_iverbs.h"
49#include "ehca_classes.h" 47#include "ehca_classes.h"
50#include "ehca_irq.h" 48#include "ehca_irq.h"
@@ -148,7 +146,6 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
148 spin_lock_init(&my_cq->task_lock); 146 spin_lock_init(&my_cq->task_lock);
149 atomic_set(&my_cq->nr_events, 0); 147 atomic_set(&my_cq->nr_events, 0);
150 init_waitqueue_head(&my_cq->wait_completion); 148 init_waitqueue_head(&my_cq->wait_completion);
151 my_cq->ownpid = current->tgid;
152 149
153 cq = &my_cq->ib_cq; 150 cq = &my_cq->ib_cq;
154 151
@@ -320,7 +317,6 @@ int ehca_destroy_cq(struct ib_cq *cq)
320 struct ehca_shca *shca = container_of(device, struct ehca_shca, 317 struct ehca_shca *shca = container_of(device, struct ehca_shca,
321 ib_device); 318 ib_device);
322 struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle; 319 struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
323 u32 cur_pid = current->tgid;
324 unsigned long flags; 320 unsigned long flags;
325 321
326 if (cq->uobject) { 322 if (cq->uobject) {
@@ -329,12 +325,6 @@ int ehca_destroy_cq(struct ib_cq *cq)
329 "user space cq_num=%x", my_cq->cq_number); 325 "user space cq_num=%x", my_cq->cq_number);
330 return -EINVAL; 326 return -EINVAL;
331 } 327 }
332 if (my_cq->ownpid != cur_pid) {
333 ehca_err(device, "Invalid caller pid=%x ownpid=%x "
334 "cq_num=%x",
335 cur_pid, my_cq->ownpid, my_cq->cq_number);
336 return -EINVAL;
337 }
338 } 328 }
339 329
340 /* 330 /*
@@ -374,15 +364,6 @@ int ehca_destroy_cq(struct ib_cq *cq)
374 364
375int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata) 365int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
376{ 366{
377 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
378 u32 cur_pid = current->tgid;
379
380 if (cq->uobject && my_cq->ownpid != cur_pid) {
381 ehca_err(cq->device, "Invalid caller pid=%x ownpid=%x",
382 cur_pid, my_cq->ownpid);
383 return -EINVAL;
384 }
385
386 /* TODO: proper resize needs to be done */ 367 /* TODO: proper resize needs to be done */
387 ehca_err(cq->device, "not implemented yet"); 368 ehca_err(cq->device, "not implemented yet");
388 369
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c
index 5bd7b591987e..2515cbde7e65 100644
--- a/drivers/infiniband/hw/ehca/ehca_hca.c
+++ b/drivers/infiniband/hw/ehca/ehca_hca.c
@@ -43,6 +43,11 @@
43#include "ehca_iverbs.h" 43#include "ehca_iverbs.h"
44#include "hcp_if.h" 44#include "hcp_if.h"
45 45
46static unsigned int limit_uint(unsigned int value)
47{
48 return min_t(unsigned int, value, INT_MAX);
49}
50
46int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props) 51int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
47{ 52{
48 int i, ret = 0; 53 int i, ret = 0;
@@ -83,37 +88,40 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
83 props->vendor_id = rblock->vendor_id >> 8; 88 props->vendor_id = rblock->vendor_id >> 8;
84 props->vendor_part_id = rblock->vendor_part_id >> 16; 89 props->vendor_part_id = rblock->vendor_part_id >> 16;
85 props->hw_ver = rblock->hw_ver; 90 props->hw_ver = rblock->hw_ver;
86 props->max_qp = min_t(unsigned, rblock->max_qp, INT_MAX); 91 props->max_qp = limit_uint(rblock->max_qp);
87 props->max_qp_wr = min_t(unsigned, rblock->max_wqes_wq, INT_MAX); 92 props->max_qp_wr = limit_uint(rblock->max_wqes_wq);
88 props->max_sge = min_t(unsigned, rblock->max_sge, INT_MAX); 93 props->max_sge = limit_uint(rblock->max_sge);
89 props->max_sge_rd = min_t(unsigned, rblock->max_sge_rd, INT_MAX); 94 props->max_sge_rd = limit_uint(rblock->max_sge_rd);
90 props->max_cq = min_t(unsigned, rblock->max_cq, INT_MAX); 95 props->max_cq = limit_uint(rblock->max_cq);
91 props->max_cqe = min_t(unsigned, rblock->max_cqe, INT_MAX); 96 props->max_cqe = limit_uint(rblock->max_cqe);
92 props->max_mr = min_t(unsigned, rblock->max_mr, INT_MAX); 97 props->max_mr = limit_uint(rblock->max_mr);
93 props->max_mw = min_t(unsigned, rblock->max_mw, INT_MAX); 98 props->max_mw = limit_uint(rblock->max_mw);
94 props->max_pd = min_t(unsigned, rblock->max_pd, INT_MAX); 99 props->max_pd = limit_uint(rblock->max_pd);
95 props->max_ah = min_t(unsigned, rblock->max_ah, INT_MAX); 100 props->max_ah = limit_uint(rblock->max_ah);
96 props->max_fmr = min_t(unsigned, rblock->max_mr, INT_MAX); 101 props->max_ee = limit_uint(rblock->max_rd_ee_context);
102 props->max_rdd = limit_uint(rblock->max_rd_domain);
103 props->max_fmr = limit_uint(rblock->max_mr);
104 props->local_ca_ack_delay = limit_uint(rblock->local_ca_ack_delay);
105 props->max_qp_rd_atom = limit_uint(rblock->max_rr_qp);
106 props->max_ee_rd_atom = limit_uint(rblock->max_rr_ee_context);
107 props->max_res_rd_atom = limit_uint(rblock->max_rr_hca);
108 props->max_qp_init_rd_atom = limit_uint(rblock->max_act_wqs_qp);
109 props->max_ee_init_rd_atom = limit_uint(rblock->max_act_wqs_ee_context);
97 110
98 if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) { 111 if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
99 props->max_srq = props->max_qp; 112 props->max_srq = limit_uint(props->max_qp);
100 props->max_srq_wr = props->max_qp_wr; 113 props->max_srq_wr = limit_uint(props->max_qp_wr);
101 props->max_srq_sge = 3; 114 props->max_srq_sge = 3;
102 } 115 }
103 116
104 props->max_pkeys = 16; 117 props->max_pkeys = 16;
105 props->local_ca_ack_delay 118 props->local_ca_ack_delay = limit_uint(rblock->local_ca_ack_delay);
106 = rblock->local_ca_ack_delay; 119 props->max_raw_ipv6_qp = limit_uint(rblock->max_raw_ipv6_qp);
107 props->max_raw_ipv6_qp 120 props->max_raw_ethy_qp = limit_uint(rblock->max_raw_ethy_qp);
108 = min_t(unsigned, rblock->max_raw_ipv6_qp, INT_MAX); 121 props->max_mcast_grp = limit_uint(rblock->max_mcast_grp);
109 props->max_raw_ethy_qp 122 props->max_mcast_qp_attach = limit_uint(rblock->max_mcast_qp_attach);
110 = min_t(unsigned, rblock->max_raw_ethy_qp, INT_MAX);
111 props->max_mcast_grp
112 = min_t(unsigned, rblock->max_mcast_grp, INT_MAX);
113 props->max_mcast_qp_attach
114 = min_t(unsigned, rblock->max_mcast_qp_attach, INT_MAX);
115 props->max_total_mcast_qp_attach 123 props->max_total_mcast_qp_attach
116 = min_t(unsigned, rblock->max_total_mcast_qp_attach, INT_MAX); 124 = limit_uint(rblock->max_total_mcast_qp_attach);
117 125
118 /* translate device capabilities */ 126 /* translate device capabilities */
119 props->device_cap_flags = IB_DEVICE_SYS_IMAGE_GUID | 127 props->device_cap_flags = IB_DEVICE_SYS_IMAGE_GUID |
@@ -128,6 +136,46 @@ query_device1:
128 return ret; 136 return ret;
129} 137}
130 138
139static int map_mtu(struct ehca_shca *shca, u32 fw_mtu)
140{
141 switch (fw_mtu) {
142 case 0x1:
143 return IB_MTU_256;
144 case 0x2:
145 return IB_MTU_512;
146 case 0x3:
147 return IB_MTU_1024;
148 case 0x4:
149 return IB_MTU_2048;
150 case 0x5:
151 return IB_MTU_4096;
152 default:
153 ehca_err(&shca->ib_device, "Unknown MTU size: %x.",
154 fw_mtu);
155 return 0;
156 }
157}
158
159static int map_number_of_vls(struct ehca_shca *shca, u32 vl_cap)
160{
161 switch (vl_cap) {
162 case 0x1:
163 return 1;
164 case 0x2:
165 return 2;
166 case 0x3:
167 return 4;
168 case 0x4:
169 return 8;
170 case 0x5:
171 return 15;
172 default:
173 ehca_err(&shca->ib_device, "invalid Vl Capability: %x.",
174 vl_cap);
175 return 0;
176 }
177}
178
131int ehca_query_port(struct ib_device *ibdev, 179int ehca_query_port(struct ib_device *ibdev,
132 u8 port, struct ib_port_attr *props) 180 u8 port, struct ib_port_attr *props)
133{ 181{
@@ -152,31 +200,13 @@ int ehca_query_port(struct ib_device *ibdev,
152 200
153 memset(props, 0, sizeof(struct ib_port_attr)); 201 memset(props, 0, sizeof(struct ib_port_attr));
154 202
155 switch (rblock->max_mtu) { 203 props->active_mtu = props->max_mtu = map_mtu(shca, rblock->max_mtu);
156 case 0x1:
157 props->active_mtu = props->max_mtu = IB_MTU_256;
158 break;
159 case 0x2:
160 props->active_mtu = props->max_mtu = IB_MTU_512;
161 break;
162 case 0x3:
163 props->active_mtu = props->max_mtu = IB_MTU_1024;
164 break;
165 case 0x4:
166 props->active_mtu = props->max_mtu = IB_MTU_2048;
167 break;
168 case 0x5:
169 props->active_mtu = props->max_mtu = IB_MTU_4096;
170 break;
171 default:
172 ehca_err(&shca->ib_device, "Unknown MTU size: %x.",
173 rblock->max_mtu);
174 break;
175 }
176
177 props->port_cap_flags = rblock->capability_mask; 204 props->port_cap_flags = rblock->capability_mask;
178 props->gid_tbl_len = rblock->gid_tbl_len; 205 props->gid_tbl_len = rblock->gid_tbl_len;
179 props->max_msg_sz = rblock->max_msg_sz; 206 if (rblock->max_msg_sz)
207 props->max_msg_sz = rblock->max_msg_sz;
208 else
209 props->max_msg_sz = 0x1 << 31;
180 props->bad_pkey_cntr = rblock->bad_pkey_cntr; 210 props->bad_pkey_cntr = rblock->bad_pkey_cntr;
181 props->qkey_viol_cntr = rblock->qkey_viol_cntr; 211 props->qkey_viol_cntr = rblock->qkey_viol_cntr;
182 props->pkey_tbl_len = rblock->pkey_tbl_len; 212 props->pkey_tbl_len = rblock->pkey_tbl_len;
@@ -186,6 +216,7 @@ int ehca_query_port(struct ib_device *ibdev,
186 props->sm_sl = rblock->sm_sl; 216 props->sm_sl = rblock->sm_sl;
187 props->subnet_timeout = rblock->subnet_timeout; 217 props->subnet_timeout = rblock->subnet_timeout;
188 props->init_type_reply = rblock->init_type_reply; 218 props->init_type_reply = rblock->init_type_reply;
219 props->max_vl_num = map_number_of_vls(shca, rblock->vl_cap);
189 220
190 if (rblock->state && rblock->phys_width) { 221 if (rblock->state && rblock->phys_width) {
191 props->phys_state = rblock->phys_pstate; 222 props->phys_state = rblock->phys_pstate;
@@ -314,7 +345,7 @@ query_gid1:
314 return ret; 345 return ret;
315} 346}
316 347
317const u32 allowed_port_caps = ( 348static const u32 allowed_port_caps = (
318 IB_PORT_SM | IB_PORT_LED_INFO_SUP | IB_PORT_CM_SUP | 349 IB_PORT_SM | IB_PORT_LED_INFO_SUP | IB_PORT_CM_SUP |
319 IB_PORT_SNMP_TUNNEL_SUP | IB_PORT_DEVICE_MGMT_SUP | 350 IB_PORT_SNMP_TUNNEL_SUP | IB_PORT_DEVICE_MGMT_SUP |
320 IB_PORT_VENDOR_CLASS_SUP); 351 IB_PORT_VENDOR_CLASS_SUP);
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index a86ebcc79a95..65b3362cdb9b 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -57,16 +57,17 @@ MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
57MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver"); 57MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
58MODULE_VERSION(HCAD_VERSION); 58MODULE_VERSION(HCAD_VERSION);
59 59
60int ehca_open_aqp1 = 0; 60static int ehca_open_aqp1 = 0;
61static int ehca_hw_level = 0;
62static int ehca_poll_all_eqs = 1;
63static int ehca_mr_largepage = 1;
64
61int ehca_debug_level = 0; 65int ehca_debug_level = 0;
62int ehca_hw_level = 0;
63int ehca_nr_ports = 2; 66int ehca_nr_ports = 2;
64int ehca_use_hp_mr = 0; 67int ehca_use_hp_mr = 0;
65int ehca_port_act_time = 30; 68int ehca_port_act_time = 30;
66int ehca_poll_all_eqs = 1;
67int ehca_static_rate = -1; 69int ehca_static_rate = -1;
68int ehca_scaling_code = 0; 70int ehca_scaling_code = 0;
69int ehca_mr_largepage = 1;
70int ehca_lock_hcalls = -1; 71int ehca_lock_hcalls = -1;
71 72
72module_param_named(open_aqp1, ehca_open_aqp1, int, S_IRUGO); 73module_param_named(open_aqp1, ehca_open_aqp1, int, S_IRUGO);
@@ -396,7 +397,7 @@ init_node_guid1:
396 return ret; 397 return ret;
397} 398}
398 399
399int ehca_init_device(struct ehca_shca *shca) 400static int ehca_init_device(struct ehca_shca *shca)
400{ 401{
401 int ret; 402 int ret;
402 403
@@ -579,8 +580,8 @@ static ssize_t ehca_store_debug_level(struct device_driver *ddp,
579 return 1; 580 return 1;
580} 581}
581 582
582DRIVER_ATTR(debug_level, S_IRUSR | S_IWUSR, 583static DRIVER_ATTR(debug_level, S_IRUSR | S_IWUSR,
583 ehca_show_debug_level, ehca_store_debug_level); 584 ehca_show_debug_level, ehca_store_debug_level);
584 585
585static struct attribute *ehca_drv_attrs[] = { 586static struct attribute *ehca_drv_attrs[] = {
586 &driver_attr_debug_level.attr, 587 &driver_attr_debug_level.attr,
@@ -941,7 +942,7 @@ void ehca_poll_eqs(unsigned long data)
941 spin_unlock(&shca_list_lock); 942 spin_unlock(&shca_list_lock);
942} 943}
943 944
944int __init ehca_module_init(void) 945static int __init ehca_module_init(void)
945{ 946{
946 int ret; 947 int ret;
947 948
@@ -988,7 +989,7 @@ module_init1:
988 return ret; 989 return ret;
989}; 990};
990 991
991void __exit ehca_module_exit(void) 992static void __exit ehca_module_exit(void)
992{ 993{
993 if (ehca_poll_all_eqs == 1) 994 if (ehca_poll_all_eqs == 1)
994 del_timer_sync(&poll_eqs_timer); 995 del_timer_sync(&poll_eqs_timer);
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index e239bbf54da1..f26997fc00f8 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -40,8 +40,6 @@
40 * POSSIBILITY OF SUCH DAMAGE. 40 * POSSIBILITY OF SUCH DAMAGE.
41 */ 41 */
42 42
43#include <asm/current.h>
44
45#include <rdma/ib_umem.h> 43#include <rdma/ib_umem.h>
46 44
47#include "ehca_iverbs.h" 45#include "ehca_iverbs.h"
@@ -419,7 +417,6 @@ int ehca_rereg_phys_mr(struct ib_mr *mr,
419 struct ehca_shca *shca = 417 struct ehca_shca *shca =
420 container_of(mr->device, struct ehca_shca, ib_device); 418 container_of(mr->device, struct ehca_shca, ib_device);
421 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr); 419 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
422 struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
423 u64 new_size; 420 u64 new_size;
424 u64 *new_start; 421 u64 *new_start;
425 u32 new_acl; 422 u32 new_acl;
@@ -429,15 +426,6 @@ int ehca_rereg_phys_mr(struct ib_mr *mr,
429 u32 num_kpages = 0; 426 u32 num_kpages = 0;
430 u32 num_hwpages = 0; 427 u32 num_hwpages = 0;
431 struct ehca_mr_pginfo pginfo; 428 struct ehca_mr_pginfo pginfo;
432 u32 cur_pid = current->tgid;
433
434 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
435 (my_pd->ownpid != cur_pid)) {
436 ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
437 cur_pid, my_pd->ownpid);
438 ret = -EINVAL;
439 goto rereg_phys_mr_exit0;
440 }
441 429
442 if (!(mr_rereg_mask & IB_MR_REREG_TRANS)) { 430 if (!(mr_rereg_mask & IB_MR_REREG_TRANS)) {
443 /* TODO not supported, because PHYP rereg hCall needs pages */ 431 /* TODO not supported, because PHYP rereg hCall needs pages */
@@ -577,19 +565,9 @@ int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
577 struct ehca_shca *shca = 565 struct ehca_shca *shca =
578 container_of(mr->device, struct ehca_shca, ib_device); 566 container_of(mr->device, struct ehca_shca, ib_device);
579 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr); 567 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
580 struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
581 u32 cur_pid = current->tgid;
582 unsigned long sl_flags; 568 unsigned long sl_flags;
583 struct ehca_mr_hipzout_parms hipzout; 569 struct ehca_mr_hipzout_parms hipzout;
584 570
585 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
586 (my_pd->ownpid != cur_pid)) {
587 ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
588 cur_pid, my_pd->ownpid);
589 ret = -EINVAL;
590 goto query_mr_exit0;
591 }
592
593 if ((e_mr->flags & EHCA_MR_FLAG_FMR)) { 571 if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
594 ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p " 572 ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
595 "e_mr->flags=%x", mr, e_mr, e_mr->flags); 573 "e_mr->flags=%x", mr, e_mr, e_mr->flags);
@@ -634,16 +612,6 @@ int ehca_dereg_mr(struct ib_mr *mr)
634 struct ehca_shca *shca = 612 struct ehca_shca *shca =
635 container_of(mr->device, struct ehca_shca, ib_device); 613 container_of(mr->device, struct ehca_shca, ib_device);
636 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr); 614 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
637 struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
638 u32 cur_pid = current->tgid;
639
640 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
641 (my_pd->ownpid != cur_pid)) {
642 ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
643 cur_pid, my_pd->ownpid);
644 ret = -EINVAL;
645 goto dereg_mr_exit0;
646 }
647 615
648 if ((e_mr->flags & EHCA_MR_FLAG_FMR)) { 616 if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
649 ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p " 617 ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
@@ -1952,9 +1920,8 @@ next_kpage:
1952 return ret; 1920 return ret;
1953} 1921}
1954 1922
1955int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo, 1923static int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
1956 u32 number, 1924 u32 number, u64 *kpage)
1957 u64 *kpage)
1958{ 1925{
1959 int ret = 0; 1926 int ret = 0;
1960 struct ib_phys_buf *pbuf; 1927 struct ib_phys_buf *pbuf;
@@ -2012,9 +1979,8 @@ int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
2012 return ret; 1979 return ret;
2013} 1980}
2014 1981
2015int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo, 1982static int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo,
2016 u32 number, 1983 u32 number, u64 *kpage)
2017 u64 *kpage)
2018{ 1984{
2019 int ret = 0; 1985 int ret = 0;
2020 u64 *fmrlist; 1986 u64 *fmrlist;
diff --git a/drivers/infiniband/hw/ehca/ehca_pd.c b/drivers/infiniband/hw/ehca/ehca_pd.c
index 43bcf085fcf2..2fe554855fa5 100644
--- a/drivers/infiniband/hw/ehca/ehca_pd.c
+++ b/drivers/infiniband/hw/ehca/ehca_pd.c
@@ -38,8 +38,6 @@
38 * POSSIBILITY OF SUCH DAMAGE. 38 * POSSIBILITY OF SUCH DAMAGE.
39 */ 39 */
40 40
41#include <asm/current.h>
42
43#include "ehca_tools.h" 41#include "ehca_tools.h"
44#include "ehca_iverbs.h" 42#include "ehca_iverbs.h"
45 43
@@ -58,7 +56,6 @@ struct ib_pd *ehca_alloc_pd(struct ib_device *device,
58 return ERR_PTR(-ENOMEM); 56 return ERR_PTR(-ENOMEM);
59 } 57 }
60 58
61 pd->ownpid = current->tgid;
62 for (i = 0; i < 2; i++) { 59 for (i = 0; i < 2; i++) {
63 INIT_LIST_HEAD(&pd->free[i]); 60 INIT_LIST_HEAD(&pd->free[i]);
64 INIT_LIST_HEAD(&pd->full[i]); 61 INIT_LIST_HEAD(&pd->full[i]);
@@ -85,18 +82,10 @@ struct ib_pd *ehca_alloc_pd(struct ib_device *device,
85 82
86int ehca_dealloc_pd(struct ib_pd *pd) 83int ehca_dealloc_pd(struct ib_pd *pd)
87{ 84{
88 u32 cur_pid = current->tgid;
89 struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd); 85 struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
90 int i, leftovers = 0; 86 int i, leftovers = 0;
91 struct ipz_small_queue_page *page, *tmp; 87 struct ipz_small_queue_page *page, *tmp;
92 88
93 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
94 my_pd->ownpid != cur_pid) {
95 ehca_err(pd->device, "Invalid caller pid=%x ownpid=%x",
96 cur_pid, my_pd->ownpid);
97 return -EINVAL;
98 }
99
100 for (i = 0; i < 2; i++) { 89 for (i = 0; i < 2; i++) {
101 list_splice(&my_pd->full[i], &my_pd->free[i]); 90 list_splice(&my_pd->full[i], &my_pd->free[i]);
102 list_for_each_entry_safe(page, tmp, &my_pd->free[i], list) { 91 list_for_each_entry_safe(page, tmp, &my_pd->free[i], list) {
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 1012f15a7140..3eb14a52cbf2 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -43,9 +43,6 @@
43 * POSSIBILITY OF SUCH DAMAGE. 43 * POSSIBILITY OF SUCH DAMAGE.
44 */ 44 */
45 45
46
47#include <asm/current.h>
48
49#include "ehca_classes.h" 46#include "ehca_classes.h"
50#include "ehca_tools.h" 47#include "ehca_tools.h"
51#include "ehca_qes.h" 48#include "ehca_qes.h"
@@ -424,6 +421,9 @@ static struct ehca_qp *internal_create_qp(
424 u32 swqe_size = 0, rwqe_size = 0, ib_qp_num; 421 u32 swqe_size = 0, rwqe_size = 0, ib_qp_num;
425 unsigned long flags; 422 unsigned long flags;
426 423
424 if (init_attr->create_flags)
425 return ERR_PTR(-EINVAL);
426
427 memset(&parms, 0, sizeof(parms)); 427 memset(&parms, 0, sizeof(parms));
428 qp_type = init_attr->qp_type; 428 qp_type = init_attr->qp_type;
429 429
@@ -1526,16 +1526,6 @@ int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1526 struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca, 1526 struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
1527 ib_device); 1527 ib_device);
1528 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp); 1528 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
1529 struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
1530 ib_pd);
1531 u32 cur_pid = current->tgid;
1532
1533 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
1534 my_pd->ownpid != cur_pid) {
1535 ehca_err(ibqp->pd->device, "Invalid caller pid=%x ownpid=%x",
1536 cur_pid, my_pd->ownpid);
1537 return -EINVAL;
1538 }
1539 1529
1540 /* The if-block below caches qp_attr to be modified for GSI and SMI 1530 /* The if-block below caches qp_attr to be modified for GSI and SMI
1541 * qps during the initialization by ib_mad. When the respective port 1531 * qps during the initialization by ib_mad. When the respective port
@@ -1636,23 +1626,13 @@ int ehca_query_qp(struct ib_qp *qp,
1636 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) 1626 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
1637{ 1627{
1638 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp); 1628 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
1639 struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
1640 ib_pd);
1641 struct ehca_shca *shca = container_of(qp->device, struct ehca_shca, 1629 struct ehca_shca *shca = container_of(qp->device, struct ehca_shca,
1642 ib_device); 1630 ib_device);
1643 struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle; 1631 struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
1644 struct hcp_modify_qp_control_block *qpcb; 1632 struct hcp_modify_qp_control_block *qpcb;
1645 u32 cur_pid = current->tgid;
1646 int cnt, ret = 0; 1633 int cnt, ret = 0;
1647 u64 h_ret; 1634 u64 h_ret;
1648 1635
1649 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
1650 my_pd->ownpid != cur_pid) {
1651 ehca_err(qp->device, "Invalid caller pid=%x ownpid=%x",
1652 cur_pid, my_pd->ownpid);
1653 return -EINVAL;
1654 }
1655
1656 if (qp_attr_mask & QP_ATTR_QUERY_NOT_SUPPORTED) { 1636 if (qp_attr_mask & QP_ATTR_QUERY_NOT_SUPPORTED) {
1657 ehca_err(qp->device, "Invalid attribute mask " 1637 ehca_err(qp->device, "Invalid attribute mask "
1658 "ehca_qp=%p qp_num=%x qp_attr_mask=%x ", 1638 "ehca_qp=%p qp_num=%x qp_attr_mask=%x ",
@@ -1797,8 +1777,6 @@ int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
1797{ 1777{
1798 struct ehca_qp *my_qp = 1778 struct ehca_qp *my_qp =
1799 container_of(ibsrq, struct ehca_qp, ib_srq); 1779 container_of(ibsrq, struct ehca_qp, ib_srq);
1800 struct ehca_pd *my_pd =
1801 container_of(ibsrq->pd, struct ehca_pd, ib_pd);
1802 struct ehca_shca *shca = 1780 struct ehca_shca *shca =
1803 container_of(ibsrq->pd->device, struct ehca_shca, ib_device); 1781 container_of(ibsrq->pd->device, struct ehca_shca, ib_device);
1804 struct hcp_modify_qp_control_block *mqpcb; 1782 struct hcp_modify_qp_control_block *mqpcb;
@@ -1806,14 +1784,6 @@ int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
1806 u64 h_ret; 1784 u64 h_ret;
1807 int ret = 0; 1785 int ret = 0;
1808 1786
1809 u32 cur_pid = current->tgid;
1810 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
1811 my_pd->ownpid != cur_pid) {
1812 ehca_err(ibsrq->pd->device, "Invalid caller pid=%x ownpid=%x",
1813 cur_pid, my_pd->ownpid);
1814 return -EINVAL;
1815 }
1816
1817 mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL); 1787 mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1818 if (!mqpcb) { 1788 if (!mqpcb) {
1819 ehca_err(ibsrq->device, "Could not get zeroed page for mqpcb " 1789 ehca_err(ibsrq->device, "Could not get zeroed page for mqpcb "
@@ -1864,22 +1834,13 @@ modify_srq_exit0:
1864int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr) 1834int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr)
1865{ 1835{
1866 struct ehca_qp *my_qp = container_of(srq, struct ehca_qp, ib_srq); 1836 struct ehca_qp *my_qp = container_of(srq, struct ehca_qp, ib_srq);
1867 struct ehca_pd *my_pd = container_of(srq->pd, struct ehca_pd, ib_pd);
1868 struct ehca_shca *shca = container_of(srq->device, struct ehca_shca, 1837 struct ehca_shca *shca = container_of(srq->device, struct ehca_shca,
1869 ib_device); 1838 ib_device);
1870 struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle; 1839 struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
1871 struct hcp_modify_qp_control_block *qpcb; 1840 struct hcp_modify_qp_control_block *qpcb;
1872 u32 cur_pid = current->tgid;
1873 int ret = 0; 1841 int ret = 0;
1874 u64 h_ret; 1842 u64 h_ret;
1875 1843
1876 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
1877 my_pd->ownpid != cur_pid) {
1878 ehca_err(srq->device, "Invalid caller pid=%x ownpid=%x",
1879 cur_pid, my_pd->ownpid);
1880 return -EINVAL;
1881 }
1882
1883 qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL); 1844 qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1884 if (!qpcb) { 1845 if (!qpcb) {
1885 ehca_err(srq->device, "Out of memory for qpcb " 1846 ehca_err(srq->device, "Out of memory for qpcb "
@@ -1919,7 +1880,6 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
1919 struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd, 1880 struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
1920 ib_pd); 1881 ib_pd);
1921 struct ehca_sport *sport = &shca->sport[my_qp->init_attr.port_num - 1]; 1882 struct ehca_sport *sport = &shca->sport[my_qp->init_attr.port_num - 1];
1922 u32 cur_pid = current->tgid;
1923 u32 qp_num = my_qp->real_qp_num; 1883 u32 qp_num = my_qp->real_qp_num;
1924 int ret; 1884 int ret;
1925 u64 h_ret; 1885 u64 h_ret;
@@ -1934,11 +1894,6 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
1934 "user space qp_num=%x", qp_num); 1894 "user space qp_num=%x", qp_num);
1935 return -EINVAL; 1895 return -EINVAL;
1936 } 1896 }
1937 if (my_pd->ownpid != cur_pid) {
1938 ehca_err(dev, "Invalid caller pid=%x ownpid=%x",
1939 cur_pid, my_pd->ownpid);
1940 return -EINVAL;
1941 }
1942 } 1897 }
1943 1898
1944 if (my_qp->send_cq) { 1899 if (my_qp->send_cq) {
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index 2ce8cffb8664..a20bbf466188 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -188,7 +188,7 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
188 if (send_wr->opcode == IB_WR_SEND_WITH_IMM || 188 if (send_wr->opcode == IB_WR_SEND_WITH_IMM ||
189 send_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { 189 send_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
190 /* this might not work as long as HW does not support it */ 190 /* this might not work as long as HW does not support it */
191 wqe_p->immediate_data = be32_to_cpu(send_wr->imm_data); 191 wqe_p->immediate_data = be32_to_cpu(send_wr->ex.imm_data);
192 wqe_p->wr_flag |= WQE_WRFLAG_IMM_DATA_PRESENT; 192 wqe_p->wr_flag |= WQE_WRFLAG_IMM_DATA_PRESENT;
193 } 193 }
194 194
diff --git a/drivers/infiniband/hw/ehca/ehca_tools.h b/drivers/infiniband/hw/ehca/ehca_tools.h
index 4a8346a2bc9e..ec950bf8c479 100644
--- a/drivers/infiniband/hw/ehca/ehca_tools.h
+++ b/drivers/infiniband/hw/ehca/ehca_tools.h
@@ -73,37 +73,37 @@ extern int ehca_debug_level;
73 if (unlikely(ehca_debug_level)) \ 73 if (unlikely(ehca_debug_level)) \
74 dev_printk(KERN_DEBUG, (ib_dev)->dma_device, \ 74 dev_printk(KERN_DEBUG, (ib_dev)->dma_device, \
75 "PU%04x EHCA_DBG:%s " format "\n", \ 75 "PU%04x EHCA_DBG:%s " format "\n", \
76 raw_smp_processor_id(), __FUNCTION__, \ 76 raw_smp_processor_id(), __func__, \
77 ## arg); \ 77 ## arg); \
78 } while (0) 78 } while (0)
79 79
80#define ehca_info(ib_dev, format, arg...) \ 80#define ehca_info(ib_dev, format, arg...) \
81 dev_info((ib_dev)->dma_device, "PU%04x EHCA_INFO:%s " format "\n", \ 81 dev_info((ib_dev)->dma_device, "PU%04x EHCA_INFO:%s " format "\n", \
82 raw_smp_processor_id(), __FUNCTION__, ## arg) 82 raw_smp_processor_id(), __func__, ## arg)
83 83
84#define ehca_warn(ib_dev, format, arg...) \ 84#define ehca_warn(ib_dev, format, arg...) \
85 dev_warn((ib_dev)->dma_device, "PU%04x EHCA_WARN:%s " format "\n", \ 85 dev_warn((ib_dev)->dma_device, "PU%04x EHCA_WARN:%s " format "\n", \
86 raw_smp_processor_id(), __FUNCTION__, ## arg) 86 raw_smp_processor_id(), __func__, ## arg)
87 87
88#define ehca_err(ib_dev, format, arg...) \ 88#define ehca_err(ib_dev, format, arg...) \
89 dev_err((ib_dev)->dma_device, "PU%04x EHCA_ERR:%s " format "\n", \ 89 dev_err((ib_dev)->dma_device, "PU%04x EHCA_ERR:%s " format "\n", \
90 raw_smp_processor_id(), __FUNCTION__, ## arg) 90 raw_smp_processor_id(), __func__, ## arg)
91 91
92/* use this one only if no ib_dev available */ 92/* use this one only if no ib_dev available */
93#define ehca_gen_dbg(format, arg...) \ 93#define ehca_gen_dbg(format, arg...) \
94 do { \ 94 do { \
95 if (unlikely(ehca_debug_level)) \ 95 if (unlikely(ehca_debug_level)) \
96 printk(KERN_DEBUG "PU%04x EHCA_DBG:%s " format "\n", \ 96 printk(KERN_DEBUG "PU%04x EHCA_DBG:%s " format "\n", \
97 raw_smp_processor_id(), __FUNCTION__, ## arg); \ 97 raw_smp_processor_id(), __func__, ## arg); \
98 } while (0) 98 } while (0)
99 99
100#define ehca_gen_warn(format, arg...) \ 100#define ehca_gen_warn(format, arg...) \
101 printk(KERN_INFO "PU%04x EHCA_WARN:%s " format "\n", \ 101 printk(KERN_INFO "PU%04x EHCA_WARN:%s " format "\n", \
102 raw_smp_processor_id(), __FUNCTION__, ## arg) 102 raw_smp_processor_id(), __func__, ## arg)
103 103
104#define ehca_gen_err(format, arg...) \ 104#define ehca_gen_err(format, arg...) \
105 printk(KERN_ERR "PU%04x EHCA_ERR:%s " format "\n", \ 105 printk(KERN_ERR "PU%04x EHCA_ERR:%s " format "\n", \
106 raw_smp_processor_id(), __FUNCTION__, ## arg) 106 raw_smp_processor_id(), __func__, ## arg)
107 107
108/** 108/**
109 * ehca_dmp - printk a memory block, whose length is n*8 bytes. 109 * ehca_dmp - printk a memory block, whose length is n*8 bytes.
@@ -118,7 +118,7 @@ extern int ehca_debug_level;
118 for (x = 0; x < l; x += 16) { \ 118 for (x = 0; x < l; x += 16) { \
119 printk(KERN_INFO "EHCA_DMP:%s " format \ 119 printk(KERN_INFO "EHCA_DMP:%s " format \
120 " adr=%p ofs=%04x %016lx %016lx\n", \ 120 " adr=%p ofs=%04x %016lx %016lx\n", \
121 __FUNCTION__, ##args, deb, x, \ 121 __func__, ##args, deb, x, \
122 *((u64 *)&deb[0]), *((u64 *)&deb[8])); \ 122 *((u64 *)&deb[0]), *((u64 *)&deb[8])); \
123 deb += 16; \ 123 deb += 16; \
124 } \ 124 } \
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c
index 5234d6c15c49..1b07f2beafaf 100644
--- a/drivers/infiniband/hw/ehca/ehca_uverbs.c
+++ b/drivers/infiniband/hw/ehca/ehca_uverbs.c
@@ -40,8 +40,6 @@
40 * POSSIBILITY OF SUCH DAMAGE. 40 * POSSIBILITY OF SUCH DAMAGE.
41 */ 41 */
42 42
43#include <asm/current.h>
44
45#include "ehca_classes.h" 43#include "ehca_classes.h"
46#include "ehca_iverbs.h" 44#include "ehca_iverbs.h"
47#include "ehca_mrmw.h" 45#include "ehca_mrmw.h"
@@ -253,11 +251,9 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
253 u32 idr_handle = fileoffset & 0x1FFFFFF; 251 u32 idr_handle = fileoffset & 0x1FFFFFF;
254 u32 q_type = (fileoffset >> 27) & 0x1; /* CQ, QP,... */ 252 u32 q_type = (fileoffset >> 27) & 0x1; /* CQ, QP,... */
255 u32 rsrc_type = (fileoffset >> 25) & 0x3; /* sq,rq,cmnd_window */ 253 u32 rsrc_type = (fileoffset >> 25) & 0x3; /* sq,rq,cmnd_window */
256 u32 cur_pid = current->tgid;
257 u32 ret; 254 u32 ret;
258 struct ehca_cq *cq; 255 struct ehca_cq *cq;
259 struct ehca_qp *qp; 256 struct ehca_qp *qp;
260 struct ehca_pd *pd;
261 struct ib_uobject *uobject; 257 struct ib_uobject *uobject;
262 258
263 switch (q_type) { 259 switch (q_type) {
@@ -270,13 +266,6 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
270 if (!cq) 266 if (!cq)
271 return -EINVAL; 267 return -EINVAL;
272 268
273 if (cq->ownpid != cur_pid) {
274 ehca_err(cq->ib_cq.device,
275 "Invalid caller pid=%x ownpid=%x",
276 cur_pid, cq->ownpid);
277 return -ENOMEM;
278 }
279
280 if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context) 269 if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context)
281 return -EINVAL; 270 return -EINVAL;
282 271
@@ -298,14 +287,6 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
298 if (!qp) 287 if (!qp)
299 return -EINVAL; 288 return -EINVAL;
300 289
301 pd = container_of(qp->ib_qp.pd, struct ehca_pd, ib_pd);
302 if (pd->ownpid != cur_pid) {
303 ehca_err(qp->ib_qp.device,
304 "Invalid caller pid=%x ownpid=%x",
305 cur_pid, pd->ownpid);
306 return -ENOMEM;
307 }
308
309 uobject = IS_SRQ(qp) ? qp->ib_srq.uobject : qp->ib_qp.uobject; 290 uobject = IS_SRQ(qp) ? qp->ib_srq.uobject : qp->ib_qp.uobject;
310 if (!uobject || uobject->context != context) 291 if (!uobject || uobject->context != context)
311 return -EINVAL; 292 return -EINVAL;
diff --git a/drivers/infiniband/hw/ipath/Makefile b/drivers/infiniband/hw/ipath/Makefile
index fe6738826865..75a6c91944c4 100644
--- a/drivers/infiniband/hw/ipath/Makefile
+++ b/drivers/infiniband/hw/ipath/Makefile
@@ -20,17 +20,20 @@ ib_ipath-y := \
20 ipath_qp.o \ 20 ipath_qp.o \
21 ipath_rc.o \ 21 ipath_rc.o \
22 ipath_ruc.o \ 22 ipath_ruc.o \
23 ipath_sdma.o \
23 ipath_srq.o \ 24 ipath_srq.o \
24 ipath_stats.o \ 25 ipath_stats.o \
25 ipath_sysfs.o \ 26 ipath_sysfs.o \
26 ipath_uc.o \ 27 ipath_uc.o \
27 ipath_ud.o \ 28 ipath_ud.o \
28 ipath_user_pages.o \ 29 ipath_user_pages.o \
30 ipath_user_sdma.o \
29 ipath_verbs_mcast.o \ 31 ipath_verbs_mcast.o \
30 ipath_verbs.o 32 ipath_verbs.o
31 33
32ib_ipath-$(CONFIG_HT_IRQ) += ipath_iba6110.o 34ib_ipath-$(CONFIG_HT_IRQ) += ipath_iba6110.o
33ib_ipath-$(CONFIG_PCI_MSI) += ipath_iba6120.o 35ib_ipath-$(CONFIG_PCI_MSI) += ipath_iba6120.o
36ib_ipath-$(CONFIG_PCI_MSI) += ipath_iba7220.o ipath_sd7220.o ipath_sd7220_img.o
34 37
35ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o 38ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o
36ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o 39ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o
diff --git a/drivers/infiniband/hw/ipath/ipath_7220.h b/drivers/infiniband/hw/ipath/ipath_7220.h
new file mode 100644
index 000000000000..74fa5cc5131d
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_7220.h
@@ -0,0 +1,57 @@
1#ifndef _IPATH_7220_H
2#define _IPATH_7220_H
3/*
4 * Copyright (c) 2007 QLogic Corporation. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35/*
36 * This header file provides the declarations and common definitions
37 * for (mostly) manipulation of the SerDes blocks within the IBA7220.
38 * the functions declared should only be called from within other
39 * 7220-related files such as ipath_iba7220.c or ipath_sd7220.c.
40 */
41int ipath_sd7220_presets(struct ipath_devdata *dd);
42int ipath_sd7220_init(struct ipath_devdata *dd, int was_reset);
43int ipath_sd7220_prog_ld(struct ipath_devdata *dd, int sdnum, u8 *img,
44 int len, int offset);
45int ipath_sd7220_prog_vfy(struct ipath_devdata *dd, int sdnum, const u8 *img,
46 int len, int offset);
47/*
48 * Below used for sdnum parameter, selecting one of the two sections
49 * used for PCIe, or the single SerDes used for IB, which is the
50 * only one currently used
51 */
52#define IB_7220_SERDES 2
53
54int ipath_sd7220_ib_load(struct ipath_devdata *dd);
55int ipath_sd7220_ib_vfy(struct ipath_devdata *dd);
56
57#endif /* _IPATH_7220_H */
diff --git a/drivers/infiniband/hw/ipath/ipath_common.h b/drivers/infiniband/hw/ipath/ipath_common.h
index 591901aab6b7..28cfe97cf1e9 100644
--- a/drivers/infiniband/hw/ipath/ipath_common.h
+++ b/drivers/infiniband/hw/ipath/ipath_common.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -80,6 +80,8 @@
80#define IPATH_IB_LINKDOWN_DISABLE 5 80#define IPATH_IB_LINKDOWN_DISABLE 5
81#define IPATH_IB_LINK_LOOPBACK 6 /* enable local loopback */ 81#define IPATH_IB_LINK_LOOPBACK 6 /* enable local loopback */
82#define IPATH_IB_LINK_EXTERNAL 7 /* normal, disable local loopback */ 82#define IPATH_IB_LINK_EXTERNAL 7 /* normal, disable local loopback */
83#define IPATH_IB_LINK_NO_HRTBT 8 /* disable Heartbeat, e.g. for loopback */
84#define IPATH_IB_LINK_HRTBT 9 /* enable heartbeat, normal, non-loopback */
83 85
84/* 86/*
85 * These 3 values (SDR and DDR may be ORed for auto-speed 87 * These 3 values (SDR and DDR may be ORed for auto-speed
@@ -198,7 +200,8 @@ typedef enum _ipath_ureg {
198#define IPATH_RUNTIME_FORCE_WC_ORDER 0x4 200#define IPATH_RUNTIME_FORCE_WC_ORDER 0x4
199#define IPATH_RUNTIME_RCVHDR_COPY 0x8 201#define IPATH_RUNTIME_RCVHDR_COPY 0x8
200#define IPATH_RUNTIME_MASTER 0x10 202#define IPATH_RUNTIME_MASTER 0x10
201/* 0x20 and 0x40 are no longer used, but are reserved for ABI compatibility */ 203#define IPATH_RUNTIME_NODMA_RTAIL 0x80
204#define IPATH_RUNTIME_SDMA 0x200
202#define IPATH_RUNTIME_FORCE_PIOAVAIL 0x400 205#define IPATH_RUNTIME_FORCE_PIOAVAIL 0x400
203#define IPATH_RUNTIME_PIO_REGSWAPPED 0x800 206#define IPATH_RUNTIME_PIO_REGSWAPPED 0x800
204 207
@@ -444,8 +447,9 @@ struct ipath_user_info {
444#define IPATH_CMD_PIOAVAILUPD 27 /* force an update of PIOAvail reg */ 447#define IPATH_CMD_PIOAVAILUPD 27 /* force an update of PIOAvail reg */
445#define IPATH_CMD_POLL_TYPE 28 /* set the kind of polling we want */ 448#define IPATH_CMD_POLL_TYPE 28 /* set the kind of polling we want */
446#define IPATH_CMD_ARMLAUNCH_CTRL 29 /* armlaunch detection control */ 449#define IPATH_CMD_ARMLAUNCH_CTRL 29 /* armlaunch detection control */
447 450/* 30 is unused */
448#define IPATH_CMD_MAX 29 451#define IPATH_CMD_SDMA_INFLIGHT 31 /* sdma inflight counter request */
452#define IPATH_CMD_SDMA_COMPLETE 32 /* sdma completion counter request */
449 453
450/* 454/*
451 * Poll types 455 * Poll types
@@ -483,6 +487,17 @@ struct ipath_cmd {
483 union { 487 union {
484 struct ipath_tid_info tid_info; 488 struct ipath_tid_info tid_info;
485 struct ipath_user_info user_info; 489 struct ipath_user_info user_info;
490
491 /*
492 * address in userspace where we should put the sdma
493 * inflight counter
494 */
495 __u64 sdma_inflight;
496 /*
497 * address in userspace where we should put the sdma
498 * completion counter
499 */
500 __u64 sdma_complete;
486 /* address in userspace of struct ipath_port_info to 501 /* address in userspace of struct ipath_port_info to
487 write result to */ 502 write result to */
488 __u64 port_info; 503 __u64 port_info;
@@ -537,7 +552,7 @@ struct ipath_diag_pkt {
537 552
538/* The second diag_pkt struct is the expanded version that allows 553/* The second diag_pkt struct is the expanded version that allows
539 * more control over the packet, specifically, by allowing a custom 554 * more control over the packet, specifically, by allowing a custom
540 * pbc (+ extra) qword, so that special modes and deliberate 555 * pbc (+ static rate) qword, so that special modes and deliberate
541 * changes to CRCs can be used. The elements were also re-ordered 556 * changes to CRCs can be used. The elements were also re-ordered
542 * for better alignment and to avoid padding issues. 557 * for better alignment and to avoid padding issues.
543 */ 558 */
@@ -662,8 +677,12 @@ struct infinipath_counters {
662#define INFINIPATH_RHF_LENGTH_SHIFT 0 677#define INFINIPATH_RHF_LENGTH_SHIFT 0
663#define INFINIPATH_RHF_RCVTYPE_MASK 0x7 678#define INFINIPATH_RHF_RCVTYPE_MASK 0x7
664#define INFINIPATH_RHF_RCVTYPE_SHIFT 11 679#define INFINIPATH_RHF_RCVTYPE_SHIFT 11
665#define INFINIPATH_RHF_EGRINDEX_MASK 0x7FF 680#define INFINIPATH_RHF_EGRINDEX_MASK 0xFFF
666#define INFINIPATH_RHF_EGRINDEX_SHIFT 16 681#define INFINIPATH_RHF_EGRINDEX_SHIFT 16
682#define INFINIPATH_RHF_SEQ_MASK 0xF
683#define INFINIPATH_RHF_SEQ_SHIFT 0
684#define INFINIPATH_RHF_HDRQ_OFFSET_MASK 0x7FF
685#define INFINIPATH_RHF_HDRQ_OFFSET_SHIFT 4
667#define INFINIPATH_RHF_H_ICRCERR 0x80000000 686#define INFINIPATH_RHF_H_ICRCERR 0x80000000
668#define INFINIPATH_RHF_H_VCRCERR 0x40000000 687#define INFINIPATH_RHF_H_VCRCERR 0x40000000
669#define INFINIPATH_RHF_H_PARITYERR 0x20000000 688#define INFINIPATH_RHF_H_PARITYERR 0x20000000
@@ -673,6 +692,8 @@ struct infinipath_counters {
673#define INFINIPATH_RHF_H_TIDERR 0x02000000 692#define INFINIPATH_RHF_H_TIDERR 0x02000000
674#define INFINIPATH_RHF_H_MKERR 0x01000000 693#define INFINIPATH_RHF_H_MKERR 0x01000000
675#define INFINIPATH_RHF_H_IBERR 0x00800000 694#define INFINIPATH_RHF_H_IBERR 0x00800000
695#define INFINIPATH_RHF_H_ERR_MASK 0xFF800000
696#define INFINIPATH_RHF_L_USE_EGR 0x80000000
676#define INFINIPATH_RHF_L_SWA 0x00008000 697#define INFINIPATH_RHF_L_SWA 0x00008000
677#define INFINIPATH_RHF_L_SWB 0x00004000 698#define INFINIPATH_RHF_L_SWB 0x00004000
678 699
@@ -696,6 +717,7 @@ struct infinipath_counters {
696/* SendPIO per-buffer control */ 717/* SendPIO per-buffer control */
697#define INFINIPATH_SP_TEST 0x40 718#define INFINIPATH_SP_TEST 0x40
698#define INFINIPATH_SP_TESTEBP 0x20 719#define INFINIPATH_SP_TESTEBP 0x20
720#define INFINIPATH_SP_TRIGGER_SHIFT 15
699 721
700/* SendPIOAvail bits */ 722/* SendPIOAvail bits */
701#define INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT 1 723#define INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT 1
@@ -762,6 +784,7 @@ struct ether_header {
762#define IPATH_MSN_MASK 0xFFFFFF 784#define IPATH_MSN_MASK 0xFFFFFF
763#define IPATH_QPN_MASK 0xFFFFFF 785#define IPATH_QPN_MASK 0xFFFFFF
764#define IPATH_MULTICAST_LID_BASE 0xC000 786#define IPATH_MULTICAST_LID_BASE 0xC000
787#define IPATH_EAGER_TID_ID INFINIPATH_I_TID_MASK
765#define IPATH_MULTICAST_QPN 0xFFFFFF 788#define IPATH_MULTICAST_QPN 0xFFFFFF
766 789
767/* Receive Header Queue: receive type (from infinipath) */ 790/* Receive Header Queue: receive type (from infinipath) */
@@ -781,7 +804,7 @@ struct ether_header {
781 */ 804 */
782static inline __u32 ipath_hdrget_err_flags(const __le32 * rbuf) 805static inline __u32 ipath_hdrget_err_flags(const __le32 * rbuf)
783{ 806{
784 return __le32_to_cpu(rbuf[1]); 807 return __le32_to_cpu(rbuf[1]) & INFINIPATH_RHF_H_ERR_MASK;
785} 808}
786 809
787static inline __u32 ipath_hdrget_rcv_type(const __le32 * rbuf) 810static inline __u32 ipath_hdrget_rcv_type(const __le32 * rbuf)
@@ -802,6 +825,23 @@ static inline __u32 ipath_hdrget_index(const __le32 * rbuf)
802 & INFINIPATH_RHF_EGRINDEX_MASK; 825 & INFINIPATH_RHF_EGRINDEX_MASK;
803} 826}
804 827
828static inline __u32 ipath_hdrget_seq(const __le32 *rbuf)
829{
830 return (__le32_to_cpu(rbuf[1]) >> INFINIPATH_RHF_SEQ_SHIFT)
831 & INFINIPATH_RHF_SEQ_MASK;
832}
833
834static inline __u32 ipath_hdrget_offset(const __le32 *rbuf)
835{
836 return (__le32_to_cpu(rbuf[1]) >> INFINIPATH_RHF_HDRQ_OFFSET_SHIFT)
837 & INFINIPATH_RHF_HDRQ_OFFSET_MASK;
838}
839
840static inline __u32 ipath_hdrget_use_egr_buf(const __le32 *rbuf)
841{
842 return __le32_to_cpu(rbuf[0]) & INFINIPATH_RHF_L_USE_EGR;
843}
844
805static inline __u32 ipath_hdrget_ipath_ver(__le32 hdrword) 845static inline __u32 ipath_hdrget_ipath_ver(__le32 hdrword)
806{ 846{
807 return (__le32_to_cpu(hdrword) >> INFINIPATH_I_VERS_SHIFT) 847 return (__le32_to_cpu(hdrword) >> INFINIPATH_I_VERS_SHIFT)
diff --git a/drivers/infiniband/hw/ipath/ipath_debug.h b/drivers/infiniband/hw/ipath/ipath_debug.h
index d6f69532d83f..65926cd35759 100644
--- a/drivers/infiniband/hw/ipath/ipath_debug.h
+++ b/drivers/infiniband/hw/ipath/ipath_debug.h
@@ -66,6 +66,7 @@
66#define __IPATH_IPATHERR 0x40000 /* Ethernet (IPATH) errors */ 66#define __IPATH_IPATHERR 0x40000 /* Ethernet (IPATH) errors */
67#define __IPATH_IPATHPD 0x80000 /* Ethernet (IPATH) packet dump */ 67#define __IPATH_IPATHPD 0x80000 /* Ethernet (IPATH) packet dump */
68#define __IPATH_IPATHTABLE 0x100000 /* Ethernet (IPATH) table dump */ 68#define __IPATH_IPATHTABLE 0x100000 /* Ethernet (IPATH) table dump */
69#define __IPATH_LINKVERBDBG 0x200000 /* very verbose linkchange debug */
69 70
70#else /* _IPATH_DEBUGGING */ 71#else /* _IPATH_DEBUGGING */
71 72
@@ -89,6 +90,7 @@
89#define __IPATH_IPATHERR 0x0 /* Ethernet (IPATH) errors on */ 90#define __IPATH_IPATHERR 0x0 /* Ethernet (IPATH) errors on */
90#define __IPATH_IPATHPD 0x0 /* Ethernet (IPATH) packet dump on */ 91#define __IPATH_IPATHPD 0x0 /* Ethernet (IPATH) packet dump on */
91#define __IPATH_IPATHTABLE 0x0 /* Ethernet (IPATH) packet dump on */ 92#define __IPATH_IPATHTABLE 0x0 /* Ethernet (IPATH) packet dump on */
93#define __IPATH_LINKVERBDBG 0x0 /* very verbose linkchange debug */
92 94
93#endif /* _IPATH_DEBUGGING */ 95#endif /* _IPATH_DEBUGGING */
94 96
diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c
index 4137c7770f1b..6d49d2f18a88 100644
--- a/drivers/infiniband/hw/ipath/ipath_diag.c
+++ b/drivers/infiniband/hw/ipath/ipath_diag.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -330,13 +330,19 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
330 struct ipath_devdata *dd; 330 struct ipath_devdata *dd;
331 ssize_t ret = 0; 331 ssize_t ret = 0;
332 u64 val; 332 u64 val;
333 u32 l_state, lt_state; /* LinkState, LinkTrainingState */
333 334
334 if (count != sizeof(dp)) { 335 if (count < sizeof(odp)) {
335 ret = -EINVAL; 336 ret = -EINVAL;
336 goto bail; 337 goto bail;
337 } 338 }
338 339
339 if (copy_from_user(&dp, data, sizeof(dp))) { 340 if (count == sizeof(dp)) {
341 if (copy_from_user(&dp, data, sizeof(dp))) {
342 ret = -EFAULT;
343 goto bail;
344 }
345 } else if (copy_from_user(&odp, data, sizeof(odp))) {
340 ret = -EFAULT; 346 ret = -EFAULT;
341 goto bail; 347 goto bail;
342 } 348 }
@@ -396,10 +402,17 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
396 ret = -ENODEV; 402 ret = -ENODEV;
397 goto bail; 403 goto bail;
398 } 404 }
399 /* Check link state, but not if we have custom PBC */ 405 /*
400 val = dd->ipath_lastibcstat & IPATH_IBSTATE_MASK; 406 * Want to skip check for l_state if using custom PBC,
401 if (!dp.pbc_wd && val != IPATH_IBSTATE_INIT && 407 * because we might be trying to force an SM packet out.
402 val != IPATH_IBSTATE_ARM && val != IPATH_IBSTATE_ACTIVE) { 408 * first-cut, skip _all_ state checking in that case.
409 */
410 val = ipath_ib_state(dd, dd->ipath_lastibcstat);
411 lt_state = ipath_ib_linktrstate(dd, dd->ipath_lastibcstat);
412 l_state = ipath_ib_linkstate(dd, dd->ipath_lastibcstat);
413 if (!dp.pbc_wd && (lt_state != INFINIPATH_IBCS_LT_STATE_LINKUP ||
414 (val != dd->ib_init && val != dd->ib_arm &&
415 val != dd->ib_active))) {
403 ipath_cdbg(VERBOSE, "unit %u not ready (state %llx)\n", 416 ipath_cdbg(VERBOSE, "unit %u not ready (state %llx)\n",
404 dd->ipath_unit, (unsigned long long) val); 417 dd->ipath_unit, (unsigned long long) val);
405 ret = -EINVAL; 418 ret = -EINVAL;
@@ -431,15 +444,17 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
431 goto bail; 444 goto bail;
432 } 445 }
433 446
434 piobuf = ipath_getpiobuf(dd, &pbufn); 447 plen >>= 2; /* in dwords */
448
449 piobuf = ipath_getpiobuf(dd, plen, &pbufn);
435 if (!piobuf) { 450 if (!piobuf) {
436 ipath_cdbg(VERBOSE, "No PIO buffers avail unit for %u\n", 451 ipath_cdbg(VERBOSE, "No PIO buffers avail unit for %u\n",
437 dd->ipath_unit); 452 dd->ipath_unit);
438 ret = -EBUSY; 453 ret = -EBUSY;
439 goto bail; 454 goto bail;
440 } 455 }
441 456 /* disarm it just to be extra sure */
442 plen >>= 2; /* in dwords */ 457 ipath_disarm_piobufs(dd, pbufn, 1);
443 458
444 if (ipath_debug & __IPATH_PKTDBG) 459 if (ipath_debug & __IPATH_PKTDBG)
445 ipath_cdbg(VERBOSE, "unit %u 0x%x+1w pio%d\n", 460 ipath_cdbg(VERBOSE, "unit %u 0x%x+1w pio%d\n",
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index ca4d0acc6786..e0a64f070b97 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -41,7 +41,6 @@
41 41
42#include "ipath_kernel.h" 42#include "ipath_kernel.h"
43#include "ipath_verbs.h" 43#include "ipath_verbs.h"
44#include "ipath_common.h"
45 44
46static void ipath_update_pio_bufs(struct ipath_devdata *); 45static void ipath_update_pio_bufs(struct ipath_devdata *);
47 46
@@ -73,10 +72,27 @@ module_param_named(debug, ipath_debug, uint, S_IWUSR | S_IRUGO);
73MODULE_PARM_DESC(debug, "mask for debug prints"); 72MODULE_PARM_DESC(debug, "mask for debug prints");
74EXPORT_SYMBOL_GPL(ipath_debug); 73EXPORT_SYMBOL_GPL(ipath_debug);
75 74
75unsigned ipath_mtu4096 = 1; /* max 4KB IB mtu by default, if supported */
76module_param_named(mtu4096, ipath_mtu4096, uint, S_IRUGO);
77MODULE_PARM_DESC(mtu4096, "enable MTU of 4096 bytes, if supported");
78
79static unsigned ipath_hol_timeout_ms = 13000;
80module_param_named(hol_timeout_ms, ipath_hol_timeout_ms, uint, S_IRUGO);
81MODULE_PARM_DESC(hol_timeout_ms,
82 "duration of user app suspension after link failure");
83
84unsigned ipath_linkrecovery = 1;
85module_param_named(linkrecovery, ipath_linkrecovery, uint, S_IWUSR | S_IRUGO);
86MODULE_PARM_DESC(linkrecovery, "enable workaround for link recovery issue");
87
76MODULE_LICENSE("GPL"); 88MODULE_LICENSE("GPL");
77MODULE_AUTHOR("QLogic <support@pathscale.com>"); 89MODULE_AUTHOR("QLogic <support@qlogic.com>");
78MODULE_DESCRIPTION("QLogic InfiniPath driver"); 90MODULE_DESCRIPTION("QLogic InfiniPath driver");
79 91
92/*
93 * Table to translate the LINKTRAININGSTATE portion of
94 * IBCStatus to a human-readable form.
95 */
80const char *ipath_ibcstatus_str[] = { 96const char *ipath_ibcstatus_str[] = {
81 "Disabled", 97 "Disabled",
82 "LinkUp", 98 "LinkUp",
@@ -91,9 +107,20 @@ const char *ipath_ibcstatus_str[] = {
91 "CfgWaitRmt", 107 "CfgWaitRmt",
92 "CfgIdle", 108 "CfgIdle",
93 "RecovRetrain", 109 "RecovRetrain",
94 "LState0xD", /* unused */ 110 "CfgTxRevLane", /* unused before IBA7220 */
95 "RecovWaitRmt", 111 "RecovWaitRmt",
96 "RecovIdle", 112 "RecovIdle",
113 /* below were added for IBA7220 */
114 "CfgEnhanced",
115 "CfgTest",
116 "CfgWaitRmtTest",
117 "CfgWaitCfgEnhanced",
118 "SendTS_T",
119 "SendTstIdles",
120 "RcvTS_T",
121 "SendTst_TS1s",
122 "LTState18", "LTState19", "LTState1A", "LTState1B",
123 "LTState1C", "LTState1D", "LTState1E", "LTState1F"
97}; 124};
98 125
99static void __devexit ipath_remove_one(struct pci_dev *); 126static void __devexit ipath_remove_one(struct pci_dev *);
@@ -102,8 +129,10 @@ static int __devinit ipath_init_one(struct pci_dev *,
102 129
103/* Only needed for registration, nothing else needs this info */ 130/* Only needed for registration, nothing else needs this info */
104#define PCI_VENDOR_ID_PATHSCALE 0x1fc1 131#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
132#define PCI_VENDOR_ID_QLOGIC 0x1077
105#define PCI_DEVICE_ID_INFINIPATH_HT 0xd 133#define PCI_DEVICE_ID_INFINIPATH_HT 0xd
106#define PCI_DEVICE_ID_INFINIPATH_PE800 0x10 134#define PCI_DEVICE_ID_INFINIPATH_PE800 0x10
135#define PCI_DEVICE_ID_INFINIPATH_7220 0x7220
107 136
108/* Number of seconds before our card status check... */ 137/* Number of seconds before our card status check... */
109#define STATUS_TIMEOUT 60 138#define STATUS_TIMEOUT 60
@@ -111,6 +140,7 @@ static int __devinit ipath_init_one(struct pci_dev *,
111static const struct pci_device_id ipath_pci_tbl[] = { 140static const struct pci_device_id ipath_pci_tbl[] = {
112 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) }, 141 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) },
113 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_PE800) }, 142 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_PE800) },
143 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_INFINIPATH_7220) },
114 { 0, } 144 { 0, }
115}; 145};
116 146
@@ -126,19 +156,6 @@ static struct pci_driver ipath_driver = {
126 }, 156 },
127}; 157};
128 158
129static void ipath_check_status(struct work_struct *work)
130{
131 struct ipath_devdata *dd = container_of(work, struct ipath_devdata,
132 status_work.work);
133
134 /*
135 * If we don't have any interrupts, let the user know and
136 * don't bother checking again.
137 */
138 if (dd->ipath_int_counter == 0)
139 dev_err(&dd->pcidev->dev, "No interrupts detected.\n");
140}
141
142static inline void read_bars(struct ipath_devdata *dd, struct pci_dev *dev, 159static inline void read_bars(struct ipath_devdata *dd, struct pci_dev *dev,
143 u32 *bar0, u32 *bar1) 160 u32 *bar0, u32 *bar1)
144{ 161{
@@ -206,8 +223,6 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
206 dd->pcidev = pdev; 223 dd->pcidev = pdev;
207 pci_set_drvdata(pdev, dd); 224 pci_set_drvdata(pdev, dd);
208 225
209 INIT_DELAYED_WORK(&dd->status_work, ipath_check_status);
210
211 list_add(&dd->ipath_list, &ipath_dev_list); 226 list_add(&dd->ipath_list, &ipath_dev_list);
212 227
213bail_unlock: 228bail_unlock:
@@ -234,12 +249,12 @@ struct ipath_devdata *ipath_lookup(int unit)
234 return dd; 249 return dd;
235} 250}
236 251
237int ipath_count_units(int *npresentp, int *nupp, u32 *maxportsp) 252int ipath_count_units(int *npresentp, int *nupp, int *maxportsp)
238{ 253{
239 int nunits, npresent, nup; 254 int nunits, npresent, nup;
240 struct ipath_devdata *dd; 255 struct ipath_devdata *dd;
241 unsigned long flags; 256 unsigned long flags;
242 u32 maxports; 257 int maxports;
243 258
244 nunits = npresent = nup = maxports = 0; 259 nunits = npresent = nup = maxports = 0;
245 260
@@ -304,7 +319,7 @@ static void ipath_verify_pioperf(struct ipath_devdata *dd)
304 u32 *addr; 319 u32 *addr;
305 u64 msecs, emsecs; 320 u64 msecs, emsecs;
306 321
307 piobuf = ipath_getpiobuf(dd, &pbnum); 322 piobuf = ipath_getpiobuf(dd, 0, &pbnum);
308 if (!piobuf) { 323 if (!piobuf) {
309 dev_info(&dd->pcidev->dev, 324 dev_info(&dd->pcidev->dev,
310 "No PIObufs for checking perf, skipping\n"); 325 "No PIObufs for checking perf, skipping\n");
@@ -336,7 +351,14 @@ static void ipath_verify_pioperf(struct ipath_devdata *dd)
336 351
337 ipath_disable_armlaunch(dd); 352 ipath_disable_armlaunch(dd);
338 353
339 writeq(0, piobuf); /* length 0, no dwords actually sent */ 354 /*
355 * length 0, no dwords actually sent, and mark as VL15
356 * on chips where that may matter (due to IB flowcontrol)
357 */
358 if ((dd->ipath_flags & IPATH_HAS_PBC_CNT))
359 writeq(1UL << 63, piobuf);
360 else
361 writeq(0, piobuf);
340 ipath_flush_wc(); 362 ipath_flush_wc();
341 363
342 /* 364 /*
@@ -377,6 +399,7 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
377 struct ipath_devdata *dd; 399 struct ipath_devdata *dd;
378 unsigned long long addr; 400 unsigned long long addr;
379 u32 bar0 = 0, bar1 = 0; 401 u32 bar0 = 0, bar1 = 0;
402 u8 rev;
380 403
381 dd = ipath_alloc_devdata(pdev); 404 dd = ipath_alloc_devdata(pdev);
382 if (IS_ERR(dd)) { 405 if (IS_ERR(dd)) {
@@ -408,7 +431,7 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
408 } 431 }
409 addr = pci_resource_start(pdev, 0); 432 addr = pci_resource_start(pdev, 0);
410 len = pci_resource_len(pdev, 0); 433 len = pci_resource_len(pdev, 0);
411 ipath_cdbg(VERBOSE, "regbase (0) %llx len %d pdev->irq %d, vend %x/%x " 434 ipath_cdbg(VERBOSE, "regbase (0) %llx len %d irq %d, vend %x/%x "
412 "driver_data %lx\n", addr, len, pdev->irq, ent->vendor, 435 "driver_data %lx\n", addr, len, pdev->irq, ent->vendor,
413 ent->device, ent->driver_data); 436 ent->device, ent->driver_data);
414 437
@@ -512,6 +535,13 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
512 "CONFIG_PCI_MSI is not enabled\n", ent->device); 535 "CONFIG_PCI_MSI is not enabled\n", ent->device);
513 return -ENODEV; 536 return -ENODEV;
514#endif 537#endif
538 case PCI_DEVICE_ID_INFINIPATH_7220:
539#ifndef CONFIG_PCI_MSI
540 ipath_dbg("CONFIG_PCI_MSI is not enabled, "
541 "using IntX for unit %u\n", dd->ipath_unit);
542#endif
543 ipath_init_iba7220_funcs(dd);
544 break;
515 default: 545 default:
516 ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, " 546 ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, "
517 "failing\n", ent->device); 547 "failing\n", ent->device);
@@ -533,7 +563,13 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
533 goto bail_regions; 563 goto bail_regions;
534 } 564 }
535 565
536 dd->ipath_pcirev = pdev->revision; 566 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
567 if (ret) {
568 ipath_dev_err(dd, "Failed to read PCI revision ID unit "
569 "%u: err %d\n", dd->ipath_unit, -ret);
570 goto bail_regions; /* shouldn't ever happen */
571 }
572 dd->ipath_pcirev = rev;
537 573
538#if defined(__powerpc__) 574#if defined(__powerpc__)
539 /* There isn't a generic way to specify writethrough mappings */ 575 /* There isn't a generic way to specify writethrough mappings */
@@ -556,14 +592,6 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
556 ipath_cdbg(VERBOSE, "mapped io addr %llx to kregbase %p\n", 592 ipath_cdbg(VERBOSE, "mapped io addr %llx to kregbase %p\n",
557 addr, dd->ipath_kregbase); 593 addr, dd->ipath_kregbase);
558 594
559 /*
560 * clear ipath_flags here instead of in ipath_init_chip as it is set
561 * by ipath_setup_htconfig.
562 */
563 dd->ipath_flags = 0;
564 dd->ipath_lli_counter = 0;
565 dd->ipath_lli_errors = 0;
566
567 if (dd->ipath_f_bus(dd, pdev)) 595 if (dd->ipath_f_bus(dd, pdev))
568 ipath_dev_err(dd, "Failed to setup config space; " 596 ipath_dev_err(dd, "Failed to setup config space; "
569 "continuing anyway\n"); 597 "continuing anyway\n");
@@ -608,13 +636,11 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
608 ipath_diag_add(dd); 636 ipath_diag_add(dd);
609 ipath_register_ib_device(dd); 637 ipath_register_ib_device(dd);
610 638
611 /* Check that card status in STATUS_TIMEOUT seconds. */
612 schedule_delayed_work(&dd->status_work, HZ * STATUS_TIMEOUT);
613
614 goto bail; 639 goto bail;
615 640
616bail_irqsetup: 641bail_irqsetup:
617 if (pdev->irq) free_irq(pdev->irq, dd); 642 if (pdev->irq)
643 free_irq(pdev->irq, dd);
618 644
619bail_iounmap: 645bail_iounmap:
620 iounmap((volatile void __iomem *) dd->ipath_kregbase); 646 iounmap((volatile void __iomem *) dd->ipath_kregbase);
@@ -654,6 +680,10 @@ static void __devexit cleanup_device(struct ipath_devdata *dd)
654 ipath_disable_wc(dd); 680 ipath_disable_wc(dd);
655 } 681 }
656 682
683 if (dd->ipath_spectriggerhit)
684 dev_info(&dd->pcidev->dev, "%lu special trigger hits\n",
685 dd->ipath_spectriggerhit);
686
657 if (dd->ipath_pioavailregs_dma) { 687 if (dd->ipath_pioavailregs_dma) {
658 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, 688 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
659 (void *) dd->ipath_pioavailregs_dma, 689 (void *) dd->ipath_pioavailregs_dma,
@@ -706,6 +736,8 @@ static void __devexit cleanup_device(struct ipath_devdata *dd)
706 tmpp = dd->ipath_pageshadow; 736 tmpp = dd->ipath_pageshadow;
707 dd->ipath_pageshadow = NULL; 737 dd->ipath_pageshadow = NULL;
708 vfree(tmpp); 738 vfree(tmpp);
739
740 dd->ipath_egrtidbase = NULL;
709 } 741 }
710 742
711 /* 743 /*
@@ -738,7 +770,6 @@ static void __devexit ipath_remove_one(struct pci_dev *pdev)
738 */ 770 */
739 ipath_shutdown_device(dd); 771 ipath_shutdown_device(dd);
740 772
741 cancel_delayed_work(&dd->status_work);
742 flush_scheduled_work(); 773 flush_scheduled_work();
743 774
744 if (dd->verbs_dev) 775 if (dd->verbs_dev)
@@ -823,20 +854,8 @@ void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first,
823 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 854 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
824 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); 855 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
825 } 856 }
826 857 /* on some older chips, update may not happen after cancel */
827 /* 858 ipath_force_pio_avail_update(dd);
828 * Disable PIOAVAILUPD, then re-enable, reading scratch in
829 * between. This seems to avoid a chip timing race that causes
830 * pioavail updates to memory to stop. We xor as we don't
831 * know the state of the bit when we're called.
832 */
833 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
834 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
835 dd->ipath_sendctrl ^ INFINIPATH_S_PIOBUFAVAILUPD);
836 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
837 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
838 dd->ipath_sendctrl);
839 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
840} 859}
841 860
842/** 861/**
@@ -873,18 +892,52 @@ int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state, int msecs)
873 (unsigned long long) ipath_read_kreg64( 892 (unsigned long long) ipath_read_kreg64(
874 dd, dd->ipath_kregs->kr_ibcctrl), 893 dd, dd->ipath_kregs->kr_ibcctrl),
875 (unsigned long long) val, 894 (unsigned long long) val,
876 ipath_ibcstatus_str[val & 0xf]); 895 ipath_ibcstatus_str[val & dd->ibcs_lts_mask]);
877 } 896 }
878 return (dd->ipath_flags & state) ? 0 : -ETIMEDOUT; 897 return (dd->ipath_flags & state) ? 0 : -ETIMEDOUT;
879} 898}
880 899
900static void decode_sdma_errs(struct ipath_devdata *dd, ipath_err_t err,
901 char *buf, size_t blen)
902{
903 static const struct {
904 ipath_err_t err;
905 const char *msg;
906 } errs[] = {
907 { INFINIPATH_E_SDMAGENMISMATCH, "SDmaGenMismatch" },
908 { INFINIPATH_E_SDMAOUTOFBOUND, "SDmaOutOfBound" },
909 { INFINIPATH_E_SDMATAILOUTOFBOUND, "SDmaTailOutOfBound" },
910 { INFINIPATH_E_SDMABASE, "SDmaBase" },
911 { INFINIPATH_E_SDMA1STDESC, "SDma1stDesc" },
912 { INFINIPATH_E_SDMARPYTAG, "SDmaRpyTag" },
913 { INFINIPATH_E_SDMADWEN, "SDmaDwEn" },
914 { INFINIPATH_E_SDMAMISSINGDW, "SDmaMissingDw" },
915 { INFINIPATH_E_SDMAUNEXPDATA, "SDmaUnexpData" },
916 { INFINIPATH_E_SDMADESCADDRMISALIGN, "SDmaDescAddrMisalign" },
917 { INFINIPATH_E_SENDBUFMISUSE, "SendBufMisuse" },
918 { INFINIPATH_E_SDMADISABLED, "SDmaDisabled" },
919 };
920 int i;
921 int expected;
922 size_t bidx = 0;
923
924 for (i = 0; i < ARRAY_SIZE(errs); i++) {
925 expected = (errs[i].err != INFINIPATH_E_SDMADISABLED) ? 0 :
926 test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
927 if ((err & errs[i].err) && !expected)
928 bidx += snprintf(buf + bidx, blen - bidx,
929 "%s ", errs[i].msg);
930 }
931}
932
881/* 933/*
882 * Decode the error status into strings, deciding whether to always 934 * Decode the error status into strings, deciding whether to always
883 * print * it or not depending on "normal packet errors" vs everything 935 * print * it or not depending on "normal packet errors" vs everything
884 * else. Return 1 if "real" errors, otherwise 0 if only packet 936 * else. Return 1 if "real" errors, otherwise 0 if only packet
885 * errors, so caller can decide what to print with the string. 937 * errors, so caller can decide what to print with the string.
886 */ 938 */
887int ipath_decode_err(char *buf, size_t blen, ipath_err_t err) 939int ipath_decode_err(struct ipath_devdata *dd, char *buf, size_t blen,
940 ipath_err_t err)
888{ 941{
889 int iserr = 1; 942 int iserr = 1;
890 *buf = '\0'; 943 *buf = '\0';
@@ -922,6 +975,8 @@ int ipath_decode_err(char *buf, size_t blen, ipath_err_t err)
922 strlcat(buf, "rbadversion ", blen); 975 strlcat(buf, "rbadversion ", blen);
923 if (err & INFINIPATH_E_RHDR) 976 if (err & INFINIPATH_E_RHDR)
924 strlcat(buf, "rhdr ", blen); 977 strlcat(buf, "rhdr ", blen);
978 if (err & INFINIPATH_E_SENDSPECIALTRIGGER)
979 strlcat(buf, "sendspecialtrigger ", blen);
925 if (err & INFINIPATH_E_RLONGPKTLEN) 980 if (err & INFINIPATH_E_RLONGPKTLEN)
926 strlcat(buf, "rlongpktlen ", blen); 981 strlcat(buf, "rlongpktlen ", blen);
927 if (err & INFINIPATH_E_RMAXPKTLEN) 982 if (err & INFINIPATH_E_RMAXPKTLEN)
@@ -964,6 +1019,10 @@ int ipath_decode_err(char *buf, size_t blen, ipath_err_t err)
964 strlcat(buf, "hardware ", blen); 1019 strlcat(buf, "hardware ", blen);
965 if (err & INFINIPATH_E_RESET) 1020 if (err & INFINIPATH_E_RESET)
966 strlcat(buf, "reset ", blen); 1021 strlcat(buf, "reset ", blen);
1022 if (err & INFINIPATH_E_SDMAERRS)
1023 decode_sdma_errs(dd, err, buf, blen);
1024 if (err & INFINIPATH_E_INVALIDEEPCMD)
1025 strlcat(buf, "invalideepromcmd ", blen);
967done: 1026done:
968 return iserr; 1027 return iserr;
969} 1028}
@@ -1076,18 +1135,17 @@ static void ipath_rcv_hdrerr(struct ipath_devdata *dd,
1076 u32 eflags, 1135 u32 eflags,
1077 u32 l, 1136 u32 l,
1078 u32 etail, 1137 u32 etail,
1079 u64 *rc) 1138 __le32 *rhf_addr,
1139 struct ipath_message_header *hdr)
1080{ 1140{
1081 char emsg[128]; 1141 char emsg[128];
1082 struct ipath_message_header *hdr;
1083 1142
1084 get_rhf_errstring(eflags, emsg, sizeof emsg); 1143 get_rhf_errstring(eflags, emsg, sizeof emsg);
1085 hdr = (struct ipath_message_header *)&rc[1];
1086 ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u " 1144 ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u "
1087 "tlen=%x opcode=%x egridx=%x: %s\n", 1145 "tlen=%x opcode=%x egridx=%x: %s\n",
1088 eflags, l, 1146 eflags, l,
1089 ipath_hdrget_rcv_type((__le32 *) rc), 1147 ipath_hdrget_rcv_type(rhf_addr),
1090 ipath_hdrget_length_in_bytes((__le32 *) rc), 1148 ipath_hdrget_length_in_bytes(rhf_addr),
1091 be32_to_cpu(hdr->bth[0]) >> 24, 1149 be32_to_cpu(hdr->bth[0]) >> 24,
1092 etail, emsg); 1150 etail, emsg);
1093 1151
@@ -1112,55 +1170,52 @@ static void ipath_rcv_hdrerr(struct ipath_devdata *dd,
1112 */ 1170 */
1113void ipath_kreceive(struct ipath_portdata *pd) 1171void ipath_kreceive(struct ipath_portdata *pd)
1114{ 1172{
1115 u64 *rc;
1116 struct ipath_devdata *dd = pd->port_dd; 1173 struct ipath_devdata *dd = pd->port_dd;
1174 __le32 *rhf_addr;
1117 void *ebuf; 1175 void *ebuf;
1118 const u32 rsize = dd->ipath_rcvhdrentsize; /* words */ 1176 const u32 rsize = dd->ipath_rcvhdrentsize; /* words */
1119 const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize; /* words */ 1177 const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize; /* words */
1120 u32 etail = -1, l, hdrqtail; 1178 u32 etail = -1, l, hdrqtail;
1121 struct ipath_message_header *hdr; 1179 struct ipath_message_header *hdr;
1122 u32 eflags, i, etype, tlen, pkttot = 0, updegr=0, reloop=0; 1180 u32 eflags, i, etype, tlen, pkttot = 0, updegr = 0, reloop = 0;
1123 static u64 totcalls; /* stats, may eventually remove */ 1181 static u64 totcalls; /* stats, may eventually remove */
1124 1182 int last;
1125 if (!dd->ipath_hdrqtailptr) {
1126 ipath_dev_err(dd,
1127 "hdrqtailptr not set, can't do receives\n");
1128 goto bail;
1129 }
1130 1183
1131 l = pd->port_head; 1184 l = pd->port_head;
1132 hdrqtail = ipath_get_rcvhdrtail(pd); 1185 rhf_addr = (__le32 *) pd->port_rcvhdrq + l + dd->ipath_rhf_offset;
1133 if (l == hdrqtail) 1186 if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
1134 goto bail; 1187 u32 seq = ipath_hdrget_seq(rhf_addr);
1135
1136reloop:
1137 for (i = 0; l != hdrqtail; i++) {
1138 u32 qp;
1139 u8 *bthbytes;
1140 1188
1141 rc = (u64 *) (pd->port_rcvhdrq + (l << 2)); 1189 if (seq != pd->port_seq_cnt)
1142 hdr = (struct ipath_message_header *)&rc[1]; 1190 goto bail;
1143 /* 1191 hdrqtail = 0;
1144 * could make a network order version of IPATH_KD_QP, and 1192 } else {
1145 * do the obvious shift before masking to speed this up. 1193 hdrqtail = ipath_get_rcvhdrtail(pd);
1146 */ 1194 if (l == hdrqtail)
1147 qp = ntohl(hdr->bth[1]) & 0xffffff; 1195 goto bail;
1148 bthbytes = (u8 *) hdr->bth; 1196 smp_rmb();
1197 }
1149 1198
1150 eflags = ipath_hdrget_err_flags((__le32 *) rc); 1199reloop:
1151 etype = ipath_hdrget_rcv_type((__le32 *) rc); 1200 for (last = 0, i = 1; !last; i++) {
1201 hdr = dd->ipath_f_get_msgheader(dd, rhf_addr);
1202 eflags = ipath_hdrget_err_flags(rhf_addr);
1203 etype = ipath_hdrget_rcv_type(rhf_addr);
1152 /* total length */ 1204 /* total length */
1153 tlen = ipath_hdrget_length_in_bytes((__le32 *) rc); 1205 tlen = ipath_hdrget_length_in_bytes(rhf_addr);
1154 ebuf = NULL; 1206 ebuf = NULL;
1155 if (etype != RCVHQ_RCV_TYPE_EXPECTED) { 1207 if ((dd->ipath_flags & IPATH_NODMA_RTAIL) ?
1208 ipath_hdrget_use_egr_buf(rhf_addr) :
1209 (etype != RCVHQ_RCV_TYPE_EXPECTED)) {
1156 /* 1210 /*
1157 * it turns out that the chips uses an eager buffer 1211 * It turns out that the chip uses an eager buffer
1158 * for all non-expected packets, whether it "needs" 1212 * for all non-expected packets, whether it "needs"
1159 * one or not. So always get the index, but don't 1213 * one or not. So always get the index, but don't
1160 * set ebuf (so we try to copy data) unless the 1214 * set ebuf (so we try to copy data) unless the
1161 * length requires it. 1215 * length requires it.
1162 */ 1216 */
1163 etail = ipath_hdrget_index((__le32 *) rc); 1217 etail = ipath_hdrget_index(rhf_addr);
1218 updegr = 1;
1164 if (tlen > sizeof(*hdr) || 1219 if (tlen > sizeof(*hdr) ||
1165 etype == RCVHQ_RCV_TYPE_NON_KD) 1220 etype == RCVHQ_RCV_TYPE_NON_KD)
1166 ebuf = ipath_get_egrbuf(dd, etail); 1221 ebuf = ipath_get_egrbuf(dd, etail);
@@ -1171,75 +1226,91 @@ reloop:
1171 * packets; only ipathhdrerr should be set. 1226 * packets; only ipathhdrerr should be set.
1172 */ 1227 */
1173 1228
1174 if (etype != RCVHQ_RCV_TYPE_NON_KD && etype != 1229 if (etype != RCVHQ_RCV_TYPE_NON_KD &&
1175 RCVHQ_RCV_TYPE_ERROR && ipath_hdrget_ipath_ver( 1230 etype != RCVHQ_RCV_TYPE_ERROR &&
1176 hdr->iph.ver_port_tid_offset) != 1231 ipath_hdrget_ipath_ver(hdr->iph.ver_port_tid_offset) !=
1177 IPS_PROTO_VERSION) { 1232 IPS_PROTO_VERSION)
1178 ipath_cdbg(PKT, "Bad InfiniPath protocol version " 1233 ipath_cdbg(PKT, "Bad InfiniPath protocol version "
1179 "%x\n", etype); 1234 "%x\n", etype);
1180 }
1181 1235
1182 if (unlikely(eflags)) 1236 if (unlikely(eflags))
1183 ipath_rcv_hdrerr(dd, eflags, l, etail, rc); 1237 ipath_rcv_hdrerr(dd, eflags, l, etail, rhf_addr, hdr);
1184 else if (etype == RCVHQ_RCV_TYPE_NON_KD) { 1238 else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
1185 ipath_ib_rcv(dd->verbs_dev, rc + 1, ebuf, tlen); 1239 ipath_ib_rcv(dd->verbs_dev, (u32 *)hdr, ebuf, tlen);
1186 if (dd->ipath_lli_counter) 1240 if (dd->ipath_lli_counter)
1187 dd->ipath_lli_counter--; 1241 dd->ipath_lli_counter--;
1242 } else if (etype == RCVHQ_RCV_TYPE_EAGER) {
1243 u8 opcode = be32_to_cpu(hdr->bth[0]) >> 24;
1244 u32 qp = be32_to_cpu(hdr->bth[1]) & 0xffffff;
1188 ipath_cdbg(PKT, "typ %x, opcode %x (eager, " 1245 ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
1189 "qp=%x), len %x; ignored\n", 1246 "qp=%x), len %x; ignored\n",
1190 etype, bthbytes[0], qp, tlen); 1247 etype, opcode, qp, tlen);
1191 } 1248 }
1192 else if (etype == RCVHQ_RCV_TYPE_EAGER)
1193 ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
1194 "qp=%x), len %x; ignored\n",
1195 etype, bthbytes[0], qp, tlen);
1196 else if (etype == RCVHQ_RCV_TYPE_EXPECTED) 1249 else if (etype == RCVHQ_RCV_TYPE_EXPECTED)
1197 ipath_dbg("Bug: Expected TID, opcode %x; ignored\n", 1250 ipath_dbg("Bug: Expected TID, opcode %x; ignored\n",
1198 be32_to_cpu(hdr->bth[0]) & 0xff); 1251 be32_to_cpu(hdr->bth[0]) >> 24);
1199 else { 1252 else {
1200 /* 1253 /*
1201 * error packet, type of error unknown. 1254 * error packet, type of error unknown.
1202 * Probably type 3, but we don't know, so don't 1255 * Probably type 3, but we don't know, so don't
1203 * even try to print the opcode, etc. 1256 * even try to print the opcode, etc.
1257 * Usually caused by a "bad packet", that has no
1258 * BTH, when the LRH says it should.
1204 */ 1259 */
1205 ipath_dbg("Error Pkt, but no eflags! egrbuf %x, " 1260 ipath_cdbg(ERRPKT, "Error Pkt, but no eflags! egrbuf"
1206 "len %x\nhdrq@%lx;hdrq+%x rhf: %llx; " 1261 " %x, len %x hdrq+%x rhf: %Lx\n",
1207 "hdr %llx %llx %llx %llx %llx\n", 1262 etail, tlen, l,
1208 etail, tlen, (unsigned long) rc, l, 1263 le64_to_cpu(*(__le64 *) rhf_addr));
1209 (unsigned long long) rc[0], 1264 if (ipath_debug & __IPATH_ERRPKTDBG) {
1210 (unsigned long long) rc[1], 1265 u32 j, *d, dw = rsize-2;
1211 (unsigned long long) rc[2], 1266 if (rsize > (tlen>>2))
1212 (unsigned long long) rc[3], 1267 dw = tlen>>2;
1213 (unsigned long long) rc[4], 1268 d = (u32 *)hdr;
1214 (unsigned long long) rc[5]); 1269 printk(KERN_DEBUG "EPkt rcvhdr(%x dw):\n",
1270 dw);
1271 for (j = 0; j < dw; j++)
1272 printk(KERN_DEBUG "%8x%s", d[j],
1273 (j%8) == 7 ? "\n" : " ");
1274 printk(KERN_DEBUG ".\n");
1275 }
1215 } 1276 }
1216 l += rsize; 1277 l += rsize;
1217 if (l >= maxcnt) 1278 if (l >= maxcnt)
1218 l = 0; 1279 l = 0;
1219 if (etype != RCVHQ_RCV_TYPE_EXPECTED) 1280 rhf_addr = (__le32 *) pd->port_rcvhdrq +
1220 updegr = 1; 1281 l + dd->ipath_rhf_offset;
1282 if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
1283 u32 seq = ipath_hdrget_seq(rhf_addr);
1284
1285 if (++pd->port_seq_cnt > 13)
1286 pd->port_seq_cnt = 1;
1287 if (seq != pd->port_seq_cnt)
1288 last = 1;
1289 } else if (l == hdrqtail)
1290 last = 1;
1221 /* 1291 /*
1222 * update head regs on last packet, and every 16 packets. 1292 * update head regs on last packet, and every 16 packets.
1223 * Reduce bus traffic, while still trying to prevent 1293 * Reduce bus traffic, while still trying to prevent
1224 * rcvhdrq overflows, for when the queue is nearly full 1294 * rcvhdrq overflows, for when the queue is nearly full
1225 */ 1295 */
1226 if (l == hdrqtail || (i && !(i&0xf))) { 1296 if (last || !(i & 0xf)) {
1227 u64 lval; 1297 u64 lval = l;
1228 if (l == hdrqtail) 1298
1229 /* request IBA6120 interrupt only on last */ 1299 /* request IBA6120 and 7220 interrupt only on last */
1230 lval = dd->ipath_rhdrhead_intr_off | l; 1300 if (last)
1231 else 1301 lval |= dd->ipath_rhdrhead_intr_off;
1232 lval = l; 1302 ipath_write_ureg(dd, ur_rcvhdrhead, lval,
1233 (void)ipath_write_ureg(dd, ur_rcvhdrhead, lval, 0); 1303 pd->port_port);
1234 if (updegr) { 1304 if (updegr) {
1235 (void)ipath_write_ureg(dd, ur_rcvegrindexhead, 1305 ipath_write_ureg(dd, ur_rcvegrindexhead,
1236 etail, 0); 1306 etail, pd->port_port);
1237 updegr = 0; 1307 updegr = 0;
1238 } 1308 }
1239 } 1309 }
1240 } 1310 }
1241 1311
1242 if (!dd->ipath_rhdrhead_intr_off && !reloop) { 1312 if (!dd->ipath_rhdrhead_intr_off && !reloop &&
1313 !(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
1243 /* IBA6110 workaround; we can have a race clearing chip 1314 /* IBA6110 workaround; we can have a race clearing chip
1244 * interrupt with another interrupt about to be delivered, 1315 * interrupt with another interrupt about to be delivered,
1245 * and can clear it before it is delivered on the GPIO 1316 * and can clear it before it is delivered on the GPIO
@@ -1301,7 +1372,6 @@ static void ipath_update_pio_bufs(struct ipath_devdata *dd)
1301 * happens when all buffers are in use, so only cpu overhead, not 1372 * happens when all buffers are in use, so only cpu overhead, not
1302 * latency or bandwidth is affected. 1373 * latency or bandwidth is affected.
1303 */ 1374 */
1304#define _IPATH_ALL_CHECKBITS 0x5555555555555555ULL
1305 if (!dd->ipath_pioavailregs_dma) { 1375 if (!dd->ipath_pioavailregs_dma) {
1306 ipath_dbg("Update shadow pioavail, but regs_dma NULL!\n"); 1376 ipath_dbg("Update shadow pioavail, but regs_dma NULL!\n");
1307 return; 1377 return;
@@ -1346,7 +1416,7 @@ static void ipath_update_pio_bufs(struct ipath_devdata *dd)
1346 piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i ^ 1]); 1416 piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i ^ 1]);
1347 else 1417 else
1348 piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i]); 1418 piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i]);
1349 pchg = _IPATH_ALL_CHECKBITS & 1419 pchg = dd->ipath_pioavailkernel[i] &
1350 ~(dd->ipath_pioavailshadow[i] ^ piov); 1420 ~(dd->ipath_pioavailshadow[i] ^ piov);
1351 pchbusy = pchg << INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT; 1421 pchbusy = pchg << INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT;
1352 if (pchg && (pchbusy & dd->ipath_pioavailshadow[i])) { 1422 if (pchg && (pchbusy & dd->ipath_pioavailshadow[i])) {
@@ -1397,27 +1467,63 @@ int ipath_setrcvhdrsize(struct ipath_devdata *dd, unsigned rhdrsize)
1397 return ret; 1467 return ret;
1398} 1468}
1399 1469
1400/** 1470/*
1401 * ipath_getpiobuf - find an available pio buffer 1471 * debugging code and stats updates if no pio buffers available.
1402 * @dd: the infinipath device 1472 */
1403 * @pbufnum: the buffer number is placed here 1473static noinline void no_pio_bufs(struct ipath_devdata *dd)
1474{
1475 unsigned long *shadow = dd->ipath_pioavailshadow;
1476 __le64 *dma = (__le64 *)dd->ipath_pioavailregs_dma;
1477
1478 dd->ipath_upd_pio_shadow = 1;
1479
1480 /*
1481 * not atomic, but if we lose a stat count in a while, that's OK
1482 */
1483 ipath_stats.sps_nopiobufs++;
1484 if (!(++dd->ipath_consec_nopiobuf % 100000)) {
1485 ipath_dbg("%u pio sends with no bufavail; dmacopy: "
1486 "%llx %llx %llx %llx; shadow: %lx %lx %lx %lx\n",
1487 dd->ipath_consec_nopiobuf,
1488 (unsigned long long) le64_to_cpu(dma[0]),
1489 (unsigned long long) le64_to_cpu(dma[1]),
1490 (unsigned long long) le64_to_cpu(dma[2]),
1491 (unsigned long long) le64_to_cpu(dma[3]),
1492 shadow[0], shadow[1], shadow[2], shadow[3]);
1493 /*
1494 * 4 buffers per byte, 4 registers above, cover rest
1495 * below
1496 */
1497 if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) >
1498 (sizeof(shadow[0]) * 4 * 4))
1499 ipath_dbg("2nd group: dmacopy: %llx %llx "
1500 "%llx %llx; shadow: %lx %lx %lx %lx\n",
1501 (unsigned long long)le64_to_cpu(dma[4]),
1502 (unsigned long long)le64_to_cpu(dma[5]),
1503 (unsigned long long)le64_to_cpu(dma[6]),
1504 (unsigned long long)le64_to_cpu(dma[7]),
1505 shadow[4], shadow[5], shadow[6],
1506 shadow[7]);
1507 }
1508}
1509
1510/*
1511 * common code for normal driver pio buffer allocation, and reserved
1512 * allocation.
1404 * 1513 *
1405 * do appropriate marking as busy, etc. 1514 * do appropriate marking as busy, etc.
1406 * returns buffer number if one found (>=0), negative number is error. 1515 * returns buffer number if one found (>=0), negative number is error.
1407 * Used by ipath_layer_send
1408 */ 1516 */
1409u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 * pbufnum) 1517static u32 __iomem *ipath_getpiobuf_range(struct ipath_devdata *dd,
1518 u32 *pbufnum, u32 first, u32 last, u32 firsti)
1410{ 1519{
1411 int i, j, starti, updated = 0; 1520 int i, j, updated = 0;
1412 unsigned piobcnt, iter; 1521 unsigned piobcnt;
1413 unsigned long flags; 1522 unsigned long flags;
1414 unsigned long *shadow = dd->ipath_pioavailshadow; 1523 unsigned long *shadow = dd->ipath_pioavailshadow;
1415 u32 __iomem *buf; 1524 u32 __iomem *buf;
1416 1525
1417 piobcnt = (unsigned)(dd->ipath_piobcnt2k 1526 piobcnt = last - first;
1418 + dd->ipath_piobcnt4k);
1419 starti = dd->ipath_lastport_piobuf;
1420 iter = piobcnt - starti;
1421 if (dd->ipath_upd_pio_shadow) { 1527 if (dd->ipath_upd_pio_shadow) {
1422 /* 1528 /*
1423 * Minor optimization. If we had no buffers on last call, 1529 * Minor optimization. If we had no buffers on last call,
@@ -1425,12 +1531,10 @@ u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 * pbufnum)
1425 * if no buffers were updated, to be paranoid 1531 * if no buffers were updated, to be paranoid
1426 */ 1532 */
1427 ipath_update_pio_bufs(dd); 1533 ipath_update_pio_bufs(dd);
1428 /* we scanned here, don't do it at end of scan */ 1534 updated++;
1429 updated = 1; 1535 i = first;
1430 i = starti;
1431 } else 1536 } else
1432 i = dd->ipath_lastpioindex; 1537 i = firsti;
1433
1434rescan: 1538rescan:
1435 /* 1539 /*
1436 * while test_and_set_bit() is atomic, we do that and then the 1540 * while test_and_set_bit() is atomic, we do that and then the
@@ -1438,104 +1542,141 @@ rescan:
1438 * of the remaining armlaunch errors. 1542 * of the remaining armlaunch errors.
1439 */ 1543 */
1440 spin_lock_irqsave(&ipath_pioavail_lock, flags); 1544 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1441 for (j = 0; j < iter; j++, i++) { 1545 for (j = 0; j < piobcnt; j++, i++) {
1442 if (i >= piobcnt) 1546 if (i >= last)
1443 i = starti; 1547 i = first;
1444 /* 1548 if (__test_and_set_bit((2 * i) + 1, shadow))
1445 * To avoid bus lock overhead, we first find a candidate
1446 * buffer, then do the test and set, and continue if that
1447 * fails.
1448 */
1449 if (test_bit((2 * i) + 1, shadow) ||
1450 test_and_set_bit((2 * i) + 1, shadow))
1451 continue; 1549 continue;
1452 /* flip generation bit */ 1550 /* flip generation bit */
1453 change_bit(2 * i, shadow); 1551 __change_bit(2 * i, shadow);
1454 break; 1552 break;
1455 } 1553 }
1456 spin_unlock_irqrestore(&ipath_pioavail_lock, flags); 1554 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1457 1555
1458 if (j == iter) { 1556 if (j == piobcnt) {
1459 volatile __le64 *dma = dd->ipath_pioavailregs_dma;
1460
1461 /*
1462 * first time through; shadow exhausted, but may be real
1463 * buffers available, so go see; if any updated, rescan
1464 * (once)
1465 */
1466 if (!updated) { 1557 if (!updated) {
1558 /*
1559 * first time through; shadow exhausted, but may be
1560 * buffers available, try an update and then rescan.
1561 */
1467 ipath_update_pio_bufs(dd); 1562 ipath_update_pio_bufs(dd);
1468 updated = 1; 1563 updated++;
1469 i = starti; 1564 i = first;
1470 goto rescan; 1565 goto rescan;
1471 } 1566 } else if (updated == 1 && piobcnt <=
1472 dd->ipath_upd_pio_shadow = 1; 1567 ((dd->ipath_sendctrl
1473 /* 1568 >> INFINIPATH_S_UPDTHRESH_SHIFT) &
1474 * not atomic, but if we lose one once in a while, that's OK 1569 INFINIPATH_S_UPDTHRESH_MASK)) {
1475 */
1476 ipath_stats.sps_nopiobufs++;
1477 if (!(++dd->ipath_consec_nopiobuf % 100000)) {
1478 ipath_dbg(
1479 "%u pio sends with no bufavail; dmacopy: "
1480 "%llx %llx %llx %llx; shadow: "
1481 "%lx %lx %lx %lx\n",
1482 dd->ipath_consec_nopiobuf,
1483 (unsigned long long) le64_to_cpu(dma[0]),
1484 (unsigned long long) le64_to_cpu(dma[1]),
1485 (unsigned long long) le64_to_cpu(dma[2]),
1486 (unsigned long long) le64_to_cpu(dma[3]),
1487 shadow[0], shadow[1], shadow[2],
1488 shadow[3]);
1489 /* 1570 /*
1490 * 4 buffers per byte, 4 registers above, cover rest 1571 * for chips supporting and using the update
1491 * below 1572 * threshold we need to force an update of the
1573 * in-memory copy if the count is less than the
1574 * thershold, then check one more time.
1492 */ 1575 */
1493 if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) > 1576 ipath_force_pio_avail_update(dd);
1494 (sizeof(shadow[0]) * 4 * 4)) 1577 ipath_update_pio_bufs(dd);
1495 ipath_dbg("2nd group: dmacopy: %llx %llx " 1578 updated++;
1496 "%llx %llx; shadow: %lx %lx " 1579 i = first;
1497 "%lx %lx\n", 1580 goto rescan;
1498 (unsigned long long)
1499 le64_to_cpu(dma[4]),
1500 (unsigned long long)
1501 le64_to_cpu(dma[5]),
1502 (unsigned long long)
1503 le64_to_cpu(dma[6]),
1504 (unsigned long long)
1505 le64_to_cpu(dma[7]),
1506 shadow[4], shadow[5],
1507 shadow[6], shadow[7]);
1508 } 1581 }
1582
1583 no_pio_bufs(dd);
1509 buf = NULL; 1584 buf = NULL;
1510 goto bail; 1585 } else {
1586 if (i < dd->ipath_piobcnt2k)
1587 buf = (u32 __iomem *) (dd->ipath_pio2kbase +
1588 i * dd->ipath_palign);
1589 else
1590 buf = (u32 __iomem *)
1591 (dd->ipath_pio4kbase +
1592 (i - dd->ipath_piobcnt2k) * dd->ipath_4kalign);
1593 if (pbufnum)
1594 *pbufnum = i;
1511 } 1595 }
1512 1596
1513 /* 1597 return buf;
1514 * set next starting place. Since it's just an optimization, 1598}
1515 * it doesn't matter who wins on this, so no locking
1516 */
1517 dd->ipath_lastpioindex = i + 1;
1518 if (dd->ipath_upd_pio_shadow)
1519 dd->ipath_upd_pio_shadow = 0;
1520 if (dd->ipath_consec_nopiobuf)
1521 dd->ipath_consec_nopiobuf = 0;
1522 if (i < dd->ipath_piobcnt2k)
1523 buf = (u32 __iomem *) (dd->ipath_pio2kbase +
1524 i * dd->ipath_palign);
1525 else
1526 buf = (u32 __iomem *)
1527 (dd->ipath_pio4kbase +
1528 (i - dd->ipath_piobcnt2k) * dd->ipath_4kalign);
1529 ipath_cdbg(VERBOSE, "Return piobuf%u %uk @ %p\n",
1530 i, (i < dd->ipath_piobcnt2k) ? 2 : 4, buf);
1531 if (pbufnum)
1532 *pbufnum = i;
1533 1599
1534bail: 1600/**
1601 * ipath_getpiobuf - find an available pio buffer
1602 * @dd: the infinipath device
1603 * @plen: the size of the PIO buffer needed in 32-bit words
1604 * @pbufnum: the buffer number is placed here
1605 */
1606u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 plen, u32 *pbufnum)
1607{
1608 u32 __iomem *buf;
1609 u32 pnum, nbufs;
1610 u32 first, lasti;
1611
1612 if (plen + 1 >= IPATH_SMALLBUF_DWORDS) {
1613 first = dd->ipath_piobcnt2k;
1614 lasti = dd->ipath_lastpioindexl;
1615 } else {
1616 first = 0;
1617 lasti = dd->ipath_lastpioindex;
1618 }
1619 nbufs = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
1620 buf = ipath_getpiobuf_range(dd, &pnum, first, nbufs, lasti);
1621
1622 if (buf) {
1623 /*
1624 * Set next starting place. It's just an optimization,
1625 * it doesn't matter who wins on this, so no locking
1626 */
1627 if (plen + 1 >= IPATH_SMALLBUF_DWORDS)
1628 dd->ipath_lastpioindexl = pnum + 1;
1629 else
1630 dd->ipath_lastpioindex = pnum + 1;
1631 if (dd->ipath_upd_pio_shadow)
1632 dd->ipath_upd_pio_shadow = 0;
1633 if (dd->ipath_consec_nopiobuf)
1634 dd->ipath_consec_nopiobuf = 0;
1635 ipath_cdbg(VERBOSE, "Return piobuf%u %uk @ %p\n",
1636 pnum, (pnum < dd->ipath_piobcnt2k) ? 2 : 4, buf);
1637 if (pbufnum)
1638 *pbufnum = pnum;
1639
1640 }
1535 return buf; 1641 return buf;
1536} 1642}
1537 1643
1538/** 1644/**
1645 * ipath_chg_pioavailkernel - change which send buffers are available for kernel
1646 * @dd: the infinipath device
1647 * @start: the starting send buffer number
1648 * @len: the number of send buffers
1649 * @avail: true if the buffers are available for kernel use, false otherwise
1650 */
1651void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
1652 unsigned len, int avail)
1653{
1654 unsigned long flags;
1655 unsigned end;
1656
1657 /* There are two bits per send buffer (busy and generation) */
1658 start *= 2;
1659 len *= 2;
1660 end = start + len;
1661
1662 /* Set or clear the generation bits. */
1663 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1664 while (start < end) {
1665 if (avail) {
1666 __clear_bit(start + INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT,
1667 dd->ipath_pioavailshadow);
1668 __set_bit(start, dd->ipath_pioavailkernel);
1669 } else {
1670 __set_bit(start + INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT,
1671 dd->ipath_pioavailshadow);
1672 __clear_bit(start, dd->ipath_pioavailkernel);
1673 }
1674 start += 2;
1675 }
1676 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1677}
1678
1679/**
1539 * ipath_create_rcvhdrq - create a receive header queue 1680 * ipath_create_rcvhdrq - create a receive header queue
1540 * @dd: the infinipath device 1681 * @dd: the infinipath device
1541 * @pd: the port data 1682 * @pd: the port data
@@ -1566,19 +1707,27 @@ int ipath_create_rcvhdrq(struct ipath_devdata *dd,
1566 ret = -ENOMEM; 1707 ret = -ENOMEM;
1567 goto bail; 1708 goto bail;
1568 } 1709 }
1569 pd->port_rcvhdrtail_kvaddr = dma_alloc_coherent( 1710
1570 &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail, GFP_KERNEL); 1711 if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
1571 if (!pd->port_rcvhdrtail_kvaddr) { 1712 pd->port_rcvhdrtail_kvaddr = dma_alloc_coherent(
1572 ipath_dev_err(dd, "attempt to allocate 1 page " 1713 &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
1573 "for port %u rcvhdrqtailaddr failed\n", 1714 GFP_KERNEL);
1574 pd->port_port); 1715 if (!pd->port_rcvhdrtail_kvaddr) {
1575 ret = -ENOMEM; 1716 ipath_dev_err(dd, "attempt to allocate 1 page "
1576 dma_free_coherent(&dd->pcidev->dev, amt, 1717 "for port %u rcvhdrqtailaddr "
1577 pd->port_rcvhdrq, pd->port_rcvhdrq_phys); 1718 "failed\n", pd->port_port);
1578 pd->port_rcvhdrq = NULL; 1719 ret = -ENOMEM;
1579 goto bail; 1720 dma_free_coherent(&dd->pcidev->dev, amt,
1721 pd->port_rcvhdrq,
1722 pd->port_rcvhdrq_phys);
1723 pd->port_rcvhdrq = NULL;
1724 goto bail;
1725 }
1726 pd->port_rcvhdrqtailaddr_phys = phys_hdrqtail;
1727 ipath_cdbg(VERBOSE, "port %d hdrtailaddr, %llx "
1728 "physical\n", pd->port_port,
1729 (unsigned long long) phys_hdrqtail);
1580 } 1730 }
1581 pd->port_rcvhdrqtailaddr_phys = phys_hdrqtail;
1582 1731
1583 pd->port_rcvhdrq_size = amt; 1732 pd->port_rcvhdrq_size = amt;
1584 1733
@@ -1588,10 +1737,6 @@ int ipath_create_rcvhdrq(struct ipath_devdata *dd,
1588 (unsigned long) pd->port_rcvhdrq_phys, 1737 (unsigned long) pd->port_rcvhdrq_phys,
1589 (unsigned long) pd->port_rcvhdrq_size, 1738 (unsigned long) pd->port_rcvhdrq_size,
1590 pd->port_port); 1739 pd->port_port);
1591
1592 ipath_cdbg(VERBOSE, "port %d hdrtailaddr, %llx physical\n",
1593 pd->port_port,
1594 (unsigned long long) phys_hdrqtail);
1595 } 1740 }
1596 else 1741 else
1597 ipath_cdbg(VERBOSE, "reuse port %d rcvhdrq @%p %llx phys; " 1742 ipath_cdbg(VERBOSE, "reuse port %d rcvhdrq @%p %llx phys; "
@@ -1615,7 +1760,6 @@ int ipath_create_rcvhdrq(struct ipath_devdata *dd,
1615 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr, 1760 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
1616 pd->port_port, pd->port_rcvhdrq_phys); 1761 pd->port_port, pd->port_rcvhdrq_phys);
1617 1762
1618 ret = 0;
1619bail: 1763bail:
1620 return ret; 1764 return ret;
1621} 1765}
@@ -1632,52 +1776,149 @@ bail:
1632 */ 1776 */
1633void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl) 1777void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl)
1634{ 1778{
1779 unsigned long flags;
1780
1781 if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) {
1782 ipath_cdbg(VERBOSE, "Ignore while in autonegotiation\n");
1783 goto bail;
1784 }
1785 /*
1786 * If we have SDMA, and it's not disabled, we have to kick off the
1787 * abort state machine, provided we aren't already aborting.
1788 * If we are in the process of aborting SDMA (!DISABLED, but ABORTING),
1789 * we skip the rest of this routine. It is already "in progress"
1790 */
1791 if (dd->ipath_flags & IPATH_HAS_SEND_DMA) {
1792 int skip_cancel;
1793 u64 *statp = &dd->ipath_sdma_status;
1794
1795 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
1796 skip_cancel =
1797 !test_bit(IPATH_SDMA_DISABLED, statp) &&
1798 test_and_set_bit(IPATH_SDMA_ABORTING, statp);
1799 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
1800 if (skip_cancel)
1801 goto bail;
1802 }
1803
1635 ipath_dbg("Cancelling all in-progress send buffers\n"); 1804 ipath_dbg("Cancelling all in-progress send buffers\n");
1636 dd->ipath_lastcancel = jiffies+HZ/2; /* skip armlaunch errs a bit */ 1805
1806 /* skip armlaunch errs for a while */
1807 dd->ipath_lastcancel = jiffies + HZ / 2;
1808
1637 /* 1809 /*
1638 * the abort bit is auto-clearing. We read scratch to be sure 1810 * The abort bit is auto-clearing. We also don't want pioavail
1639 * that cancels and the abort have taken effect in the chip. 1811 * update happening during this, and we don't want any other
1812 * sends going out, so turn those off for the duration. We read
1813 * the scratch register to be sure that cancels and the abort
1814 * have taken effect in the chip. Otherwise two parts are same
1815 * as ipath_force_pio_avail_update()
1640 */ 1816 */
1817 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1818 dd->ipath_sendctrl &= ~(INFINIPATH_S_PIOBUFAVAILUPD
1819 | INFINIPATH_S_PIOENABLE);
1641 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 1820 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1642 INFINIPATH_S_ABORT); 1821 dd->ipath_sendctrl | INFINIPATH_S_ABORT);
1643 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 1822 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1823 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1824
1825 /* disarm all send buffers */
1644 ipath_disarm_piobufs(dd, 0, 1826 ipath_disarm_piobufs(dd, 0,
1645 (unsigned)(dd->ipath_piobcnt2k + dd->ipath_piobcnt4k)); 1827 dd->ipath_piobcnt2k + dd->ipath_piobcnt4k);
1646 if (restore_sendctrl) /* else done by caller later */ 1828
1829 if (restore_sendctrl) {
1830 /* else done by caller later if needed */
1831 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1832 dd->ipath_sendctrl |= INFINIPATH_S_PIOBUFAVAILUPD |
1833 INFINIPATH_S_PIOENABLE;
1647 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 1834 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1648 dd->ipath_sendctrl); 1835 dd->ipath_sendctrl);
1836 /* and again, be sure all have hit the chip */
1837 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1838 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1839 }
1649 1840
1650 /* and again, be sure all have hit the chip */ 1841 if ((dd->ipath_flags & IPATH_HAS_SEND_DMA) &&
1651 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 1842 !test_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status) &&
1843 test_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status)) {
1844 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
1845 /* only wait so long for intr */
1846 dd->ipath_sdma_abort_intr_timeout = jiffies + HZ;
1847 dd->ipath_sdma_reset_wait = 200;
1848 __set_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);
1849 if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
1850 tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
1851 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
1852 }
1853bail:;
1652} 1854}
1653 1855
1856/*
1857 * Force an update of in-memory copy of the pioavail registers, when
1858 * needed for any of a variety of reasons. We read the scratch register
1859 * to make it highly likely that the update will have happened by the
1860 * time we return. If already off (as in cancel_sends above), this
1861 * routine is a nop, on the assumption that the caller will "do the
1862 * right thing".
1863 */
1864void ipath_force_pio_avail_update(struct ipath_devdata *dd)
1865{
1866 unsigned long flags;
1867
1868 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1869 if (dd->ipath_sendctrl & INFINIPATH_S_PIOBUFAVAILUPD) {
1870 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1871 dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
1872 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1873 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1874 dd->ipath_sendctrl);
1875 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1876 }
1877 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1878}
1654 1879
1655static void ipath_set_ib_lstate(struct ipath_devdata *dd, int which) 1880static void ipath_set_ib_lstate(struct ipath_devdata *dd, int linkcmd,
1881 int linitcmd)
1656{ 1882{
1883 u64 mod_wd;
1657 static const char *what[4] = { 1884 static const char *what[4] = {
1658 [0] = "NOP", 1885 [0] = "NOP",
1659 [INFINIPATH_IBCC_LINKCMD_DOWN] = "DOWN", 1886 [INFINIPATH_IBCC_LINKCMD_DOWN] = "DOWN",
1660 [INFINIPATH_IBCC_LINKCMD_ARMED] = "ARMED", 1887 [INFINIPATH_IBCC_LINKCMD_ARMED] = "ARMED",
1661 [INFINIPATH_IBCC_LINKCMD_ACTIVE] = "ACTIVE" 1888 [INFINIPATH_IBCC_LINKCMD_ACTIVE] = "ACTIVE"
1662 }; 1889 };
1663 int linkcmd = (which >> INFINIPATH_IBCC_LINKCMD_SHIFT) & 1890
1664 INFINIPATH_IBCC_LINKCMD_MASK; 1891 if (linitcmd == INFINIPATH_IBCC_LINKINITCMD_DISABLE) {
1665 1892 /*
1666 ipath_cdbg(VERBOSE, "Trying to move unit %u to %s, current ltstate " 1893 * If we are told to disable, note that so link-recovery
1667 "is %s\n", dd->ipath_unit, 1894 * code does not attempt to bring us back up.
1668 what[linkcmd], 1895 */
1669 ipath_ibcstatus_str[ 1896 preempt_disable();
1670 (ipath_read_kreg64 1897 dd->ipath_flags |= IPATH_IB_LINK_DISABLED;
1671 (dd, dd->ipath_kregs->kr_ibcstatus) >> 1898 preempt_enable();
1672 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) & 1899 } else if (linitcmd) {
1673 INFINIPATH_IBCS_LINKTRAININGSTATE_MASK]); 1900 /*
1674 /* flush all queued sends when going to DOWN to be sure that 1901 * Any other linkinitcmd will lead to LINKDOWN and then
1675 * they don't block MAD packets */ 1902 * to INIT (if all is well), so clear flag to let
1676 if (linkcmd == INFINIPATH_IBCC_LINKCMD_DOWN) 1903 * link-recovery code attempt to bring us back up.
1677 ipath_cancel_sends(dd, 1); 1904 */
1905 preempt_disable();
1906 dd->ipath_flags &= ~IPATH_IB_LINK_DISABLED;
1907 preempt_enable();
1908 }
1909
1910 mod_wd = (linkcmd << dd->ibcc_lc_shift) |
1911 (linitcmd << INFINIPATH_IBCC_LINKINITCMD_SHIFT);
1912 ipath_cdbg(VERBOSE,
1913 "Moving unit %u to %s (initcmd=0x%x), current ltstate is %s\n",
1914 dd->ipath_unit, what[linkcmd], linitcmd,
1915 ipath_ibcstatus_str[ipath_ib_linktrstate(dd,
1916 ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus))]);
1678 1917
1679 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, 1918 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1680 dd->ipath_ibcctrl | which); 1919 dd->ipath_ibcctrl | mod_wd);
1920 /* read from chip so write is flushed */
1921 (void) ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
1681} 1922}
1682 1923
1683int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate) 1924int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate)
@@ -1687,30 +1928,28 @@ int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate)
1687 1928
1688 switch (newstate) { 1929 switch (newstate) {
1689 case IPATH_IB_LINKDOWN_ONLY: 1930 case IPATH_IB_LINKDOWN_ONLY:
1690 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN << 1931 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN, 0);
1691 INFINIPATH_IBCC_LINKCMD_SHIFT);
1692 /* don't wait */ 1932 /* don't wait */
1693 ret = 0; 1933 ret = 0;
1694 goto bail; 1934 goto bail;
1695 1935
1696 case IPATH_IB_LINKDOWN: 1936 case IPATH_IB_LINKDOWN:
1697 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL << 1937 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
1698 INFINIPATH_IBCC_LINKINITCMD_SHIFT); 1938 INFINIPATH_IBCC_LINKINITCMD_POLL);
1699 /* don't wait */ 1939 /* don't wait */
1700 ret = 0; 1940 ret = 0;
1701 goto bail; 1941 goto bail;
1702 1942
1703 case IPATH_IB_LINKDOWN_SLEEP: 1943 case IPATH_IB_LINKDOWN_SLEEP:
1704 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_SLEEP << 1944 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
1705 INFINIPATH_IBCC_LINKINITCMD_SHIFT); 1945 INFINIPATH_IBCC_LINKINITCMD_SLEEP);
1706 /* don't wait */ 1946 /* don't wait */
1707 ret = 0; 1947 ret = 0;
1708 goto bail; 1948 goto bail;
1709 1949
1710 case IPATH_IB_LINKDOWN_DISABLE: 1950 case IPATH_IB_LINKDOWN_DISABLE:
1711 ipath_set_ib_lstate(dd, 1951 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
1712 INFINIPATH_IBCC_LINKINITCMD_DISABLE << 1952 INFINIPATH_IBCC_LINKINITCMD_DISABLE);
1713 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
1714 /* don't wait */ 1953 /* don't wait */
1715 ret = 0; 1954 ret = 0;
1716 goto bail; 1955 goto bail;
@@ -1725,8 +1964,8 @@ int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate)
1725 ret = -EINVAL; 1964 ret = -EINVAL;
1726 goto bail; 1965 goto bail;
1727 } 1966 }
1728 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED << 1967 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED, 0);
1729 INFINIPATH_IBCC_LINKCMD_SHIFT); 1968
1730 /* 1969 /*
1731 * Since the port can transition to ACTIVE by receiving 1970 * Since the port can transition to ACTIVE by receiving
1732 * a non VL 15 packet, wait for either state. 1971 * a non VL 15 packet, wait for either state.
@@ -1743,8 +1982,7 @@ int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate)
1743 ret = -EINVAL; 1982 ret = -EINVAL;
1744 goto bail; 1983 goto bail;
1745 } 1984 }
1746 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE << 1985 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE, 0);
1747 INFINIPATH_IBCC_LINKCMD_SHIFT);
1748 lstate = IPATH_LINKACTIVE; 1986 lstate = IPATH_LINKACTIVE;
1749 break; 1987 break;
1750 1988
@@ -1753,16 +1991,41 @@ int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate)
1753 dd->ipath_ibcctrl |= INFINIPATH_IBCC_LOOPBACK; 1991 dd->ipath_ibcctrl |= INFINIPATH_IBCC_LOOPBACK;
1754 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, 1992 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1755 dd->ipath_ibcctrl); 1993 dd->ipath_ibcctrl);
1994
1995 /* turn heartbeat off, as it causes loopback to fail */
1996 dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
1997 IPATH_IB_HRTBT_OFF);
1998 /* don't wait */
1756 ret = 0; 1999 ret = 0;
1757 goto bail; // no state change to wait for 2000 goto bail;
1758 2001
1759 case IPATH_IB_LINK_EXTERNAL: 2002 case IPATH_IB_LINK_EXTERNAL:
1760 dev_info(&dd->pcidev->dev, "Disabling IB local loopback (normal)\n"); 2003 dev_info(&dd->pcidev->dev,
2004 "Disabling IB local loopback (normal)\n");
2005 dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
2006 IPATH_IB_HRTBT_ON);
1761 dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LOOPBACK; 2007 dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LOOPBACK;
1762 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, 2008 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1763 dd->ipath_ibcctrl); 2009 dd->ipath_ibcctrl);
2010 /* don't wait */
1764 ret = 0; 2011 ret = 0;
1765 goto bail; // no state change to wait for 2012 goto bail;
2013
2014 /*
2015 * Heartbeat can be explicitly enabled by the user via
2016 * "hrtbt_enable" "file", and if disabled, trying to enable here
2017 * will have no effect. Implicit changes (heartbeat off when
2018 * loopback on, and vice versa) are included to ease testing.
2019 */
2020 case IPATH_IB_LINK_HRTBT:
2021 ret = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
2022 IPATH_IB_HRTBT_ON);
2023 goto bail;
2024
2025 case IPATH_IB_LINK_NO_HRTBT:
2026 ret = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
2027 IPATH_IB_HRTBT_OFF);
2028 goto bail;
1766 2029
1767 default: 2030 default:
1768 ipath_dbg("Invalid linkstate 0x%x requested\n", newstate); 2031 ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
@@ -1785,7 +2048,7 @@ bail:
1785 * sanity checking on this, and we don't deal with what happens to 2048 * sanity checking on this, and we don't deal with what happens to
1786 * programs that are already running when the size changes. 2049 * programs that are already running when the size changes.
1787 * NOTE: changing the MTU will usually cause the IBC to go back to 2050 * NOTE: changing the MTU will usually cause the IBC to go back to
1788 * link initialize (IPATH_IBSTATE_INIT) state... 2051 * link INIT state...
1789 */ 2052 */
1790int ipath_set_mtu(struct ipath_devdata *dd, u16 arg) 2053int ipath_set_mtu(struct ipath_devdata *dd, u16 arg)
1791{ 2054{
@@ -1800,7 +2063,7 @@ int ipath_set_mtu(struct ipath_devdata *dd, u16 arg)
1800 * piosize). We check that it's one of the valid IB sizes. 2063 * piosize). We check that it's one of the valid IB sizes.
1801 */ 2064 */
1802 if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 && 2065 if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
1803 arg != 4096) { 2066 (arg != 4096 || !ipath_mtu4096)) {
1804 ipath_dbg("Trying to set invalid mtu %u, failing\n", arg); 2067 ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
1805 ret = -EINVAL; 2068 ret = -EINVAL;
1806 goto bail; 2069 goto bail;
@@ -1816,6 +2079,8 @@ int ipath_set_mtu(struct ipath_devdata *dd, u16 arg)
1816 if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) { 2079 if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
1817 /* Only if it's not the initial value (or reset to it) */ 2080 /* Only if it's not the initial value (or reset to it) */
1818 if (piosize != dd->ipath_init_ibmaxlen) { 2081 if (piosize != dd->ipath_init_ibmaxlen) {
2082 if (arg > piosize && arg <= dd->ipath_init_ibmaxlen)
2083 piosize = dd->ipath_init_ibmaxlen;
1819 dd->ipath_ibmaxlen = piosize; 2084 dd->ipath_ibmaxlen = piosize;
1820 changed = 1; 2085 changed = 1;
1821 } 2086 }
@@ -1829,24 +2094,17 @@ int ipath_set_mtu(struct ipath_devdata *dd, u16 arg)
1829 } 2094 }
1830 2095
1831 if (changed) { 2096 if (changed) {
2097 u64 ibc = dd->ipath_ibcctrl, ibdw;
1832 /* 2098 /*
1833 * set the IBC maxpktlength to the size of our pio 2099 * update our housekeeping variables, and set IBC max
1834 * buffers in words 2100 * size, same as init code; max IBC is max we allow in
2101 * buffer, less the qword pbc, plus 1 for ICRC, in dwords
1835 */ 2102 */
1836 u64 ibc = dd->ipath_ibcctrl; 2103 dd->ipath_ibmaxlen = piosize - 2 * sizeof(u32);
2104 ibdw = (dd->ipath_ibmaxlen >> 2) + 1;
1837 ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK << 2105 ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
1838 INFINIPATH_IBCC_MAXPKTLEN_SHIFT); 2106 dd->ibcc_mpl_shift);
1839 2107 ibc |= ibdw << dd->ibcc_mpl_shift;
1840 piosize = piosize - 2 * sizeof(u32); /* ignore pbc */
1841 dd->ipath_ibmaxlen = piosize;
1842 piosize /= sizeof(u32); /* in words */
1843 /*
1844 * for ICRC, which we only send in diag test pkt mode, and
1845 * we don't need to worry about that for mtu
1846 */
1847 piosize += 1;
1848
1849 ibc |= piosize << INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
1850 dd->ipath_ibcctrl = ibc; 2108 dd->ipath_ibcctrl = ibc;
1851 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, 2109 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1852 dd->ipath_ibcctrl); 2110 dd->ipath_ibcctrl);
@@ -1859,11 +2117,16 @@ bail:
1859 return ret; 2117 return ret;
1860} 2118}
1861 2119
1862int ipath_set_lid(struct ipath_devdata *dd, u32 arg, u8 lmc) 2120int ipath_set_lid(struct ipath_devdata *dd, u32 lid, u8 lmc)
1863{ 2121{
1864 dd->ipath_lid = arg; 2122 dd->ipath_lid = lid;
1865 dd->ipath_lmc = lmc; 2123 dd->ipath_lmc = lmc;
1866 2124
2125 dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LIDLMC, lid |
2126 (~((1U << lmc) - 1)) << 16);
2127
2128 dev_info(&dd->pcidev->dev, "We got a lid: 0x%x\n", lid);
2129
1867 return 0; 2130 return 0;
1868} 2131}
1869 2132
@@ -1925,10 +2188,8 @@ static void ipath_run_led_override(unsigned long opaque)
1925 * but leave that to per-chip functions. 2188 * but leave that to per-chip functions.
1926 */ 2189 */
1927 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus); 2190 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
1928 ltstate = (val >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) & 2191 ltstate = ipath_ib_linktrstate(dd, val);
1929 INFINIPATH_IBCS_LINKTRAININGSTATE_MASK; 2192 lstate = ipath_ib_linkstate(dd, val);
1930 lstate = (val >> INFINIPATH_IBCS_LINKSTATE_SHIFT) &
1931 INFINIPATH_IBCS_LINKSTATE_MASK;
1932 2193
1933 dd->ipath_f_setextled(dd, lstate, ltstate); 2194 dd->ipath_f_setextled(dd, lstate, ltstate);
1934 mod_timer(&dd->ipath_led_override_timer, jiffies + timeoff); 2195 mod_timer(&dd->ipath_led_override_timer, jiffies + timeoff);
@@ -1969,9 +2230,8 @@ void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val)
1969 dd->ipath_led_override_timer.data = (unsigned long) dd; 2230 dd->ipath_led_override_timer.data = (unsigned long) dd;
1970 dd->ipath_led_override_timer.expires = jiffies + 1; 2231 dd->ipath_led_override_timer.expires = jiffies + 1;
1971 add_timer(&dd->ipath_led_override_timer); 2232 add_timer(&dd->ipath_led_override_timer);
1972 } else { 2233 } else
1973 atomic_dec(&dd->ipath_led_override_timer_active); 2234 atomic_dec(&dd->ipath_led_override_timer_active);
1974 }
1975} 2235}
1976 2236
1977/** 2237/**
@@ -1989,6 +2249,8 @@ void ipath_shutdown_device(struct ipath_devdata *dd)
1989 2249
1990 ipath_dbg("Shutting down the device\n"); 2250 ipath_dbg("Shutting down the device\n");
1991 2251
2252 ipath_hol_up(dd); /* make sure user processes aren't suspended */
2253
1992 dd->ipath_flags |= IPATH_LINKUNK; 2254 dd->ipath_flags |= IPATH_LINKUNK;
1993 dd->ipath_flags &= ~(IPATH_INITTED | IPATH_LINKDOWN | 2255 dd->ipath_flags &= ~(IPATH_INITTED | IPATH_LINKDOWN |
1994 IPATH_LINKINIT | IPATH_LINKARMED | 2256 IPATH_LINKINIT | IPATH_LINKARMED |
@@ -2003,6 +2265,9 @@ void ipath_shutdown_device(struct ipath_devdata *dd)
2003 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 2265 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
2004 dd->ipath_rcvctrl); 2266 dd->ipath_rcvctrl);
2005 2267
2268 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
2269 teardown_sdma(dd);
2270
2006 /* 2271 /*
2007 * gracefully stop all sends allowing any in progress to trickle out 2272 * gracefully stop all sends allowing any in progress to trickle out
2008 * first. 2273 * first.
@@ -2020,10 +2285,16 @@ void ipath_shutdown_device(struct ipath_devdata *dd)
2020 */ 2285 */
2021 udelay(5); 2286 udelay(5);
2022 2287
2023 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_DISABLE << 2288 dd->ipath_f_setextled(dd, 0, 0); /* make sure LEDs are off */
2024 INFINIPATH_IBCC_LINKINITCMD_SHIFT); 2289
2290 ipath_set_ib_lstate(dd, 0, INFINIPATH_IBCC_LINKINITCMD_DISABLE);
2025 ipath_cancel_sends(dd, 0); 2291 ipath_cancel_sends(dd, 0);
2026 2292
2293 /*
2294 * we are shutting down, so tell components that care. We don't do
2295 * this on just a link state change, much like ethernet, a cable
2296 * unplug, etc. doesn't change driver state
2297 */
2027 signal_ib_event(dd, IB_EVENT_PORT_ERR); 2298 signal_ib_event(dd, IB_EVENT_PORT_ERR);
2028 2299
2029 /* disable IBC */ 2300 /* disable IBC */
@@ -2038,10 +2309,20 @@ void ipath_shutdown_device(struct ipath_devdata *dd)
2038 */ 2309 */
2039 dd->ipath_f_quiet_serdes(dd); 2310 dd->ipath_f_quiet_serdes(dd);
2040 2311
2312 /* stop all the timers that might still be running */
2313 del_timer_sync(&dd->ipath_hol_timer);
2041 if (dd->ipath_stats_timer_active) { 2314 if (dd->ipath_stats_timer_active) {
2042 del_timer_sync(&dd->ipath_stats_timer); 2315 del_timer_sync(&dd->ipath_stats_timer);
2043 dd->ipath_stats_timer_active = 0; 2316 dd->ipath_stats_timer_active = 0;
2044 } 2317 }
2318 if (dd->ipath_intrchk_timer.data) {
2319 del_timer_sync(&dd->ipath_intrchk_timer);
2320 dd->ipath_intrchk_timer.data = 0;
2321 }
2322 if (atomic_read(&dd->ipath_led_override_timer_active)) {
2323 del_timer_sync(&dd->ipath_led_override_timer);
2324 atomic_set(&dd->ipath_led_override_timer_active, 0);
2325 }
2045 2326
2046 /* 2327 /*
2047 * clear all interrupts and errors, so that the next time the driver 2328 * clear all interrupts and errors, so that the next time the driver
@@ -2115,13 +2396,13 @@ void ipath_free_pddata(struct ipath_devdata *dd, struct ipath_portdata *pd)
2115 ipath_cdbg(VERBOSE, "free closed port %d " 2396 ipath_cdbg(VERBOSE, "free closed port %d "
2116 "ipath_port0_skbinfo @ %p\n", pd->port_port, 2397 "ipath_port0_skbinfo @ %p\n", pd->port_port,
2117 skbinfo); 2398 skbinfo);
2118 for (e = 0; e < dd->ipath_rcvegrcnt; e++) 2399 for (e = 0; e < dd->ipath_p0_rcvegrcnt; e++)
2119 if (skbinfo[e].skb) { 2400 if (skbinfo[e].skb) {
2120 pci_unmap_single(dd->pcidev, skbinfo[e].phys, 2401 pci_unmap_single(dd->pcidev, skbinfo[e].phys,
2121 dd->ipath_ibmaxlen, 2402 dd->ipath_ibmaxlen,
2122 PCI_DMA_FROMDEVICE); 2403 PCI_DMA_FROMDEVICE);
2123 dev_kfree_skb(skbinfo[e].skb); 2404 dev_kfree_skb(skbinfo[e].skb);
2124 } 2405 }
2125 vfree(skbinfo); 2406 vfree(skbinfo);
2126 } 2407 }
2127 kfree(pd->port_tid_pg_list); 2408 kfree(pd->port_tid_pg_list);
@@ -2144,6 +2425,7 @@ static int __init infinipath_init(void)
2144 */ 2425 */
2145 idr_init(&unit_table); 2426 idr_init(&unit_table);
2146 if (!idr_pre_get(&unit_table, GFP_KERNEL)) { 2427 if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
2428 printk(KERN_ERR IPATH_DRV_NAME ": idr_pre_get() failed\n");
2147 ret = -ENOMEM; 2429 ret = -ENOMEM;
2148 goto bail; 2430 goto bail;
2149 } 2431 }
@@ -2235,13 +2517,18 @@ int ipath_reset_device(int unit)
2235 } 2517 }
2236 } 2518 }
2237 2519
2520 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
2521 teardown_sdma(dd);
2522
2238 dd->ipath_flags &= ~IPATH_INITTED; 2523 dd->ipath_flags &= ~IPATH_INITTED;
2524 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
2239 ret = dd->ipath_f_reset(dd); 2525 ret = dd->ipath_f_reset(dd);
2240 if (ret != 1) 2526 if (ret == 1) {
2241 ipath_dbg("reset was not successful\n"); 2527 ipath_dbg("Reinitializing unit %u after reset attempt\n",
2242 ipath_dbg("Trying to reinitialize unit %u after reset attempt\n", 2528 unit);
2243 unit); 2529 ret = ipath_init_chip(dd, 1);
2244 ret = ipath_init_chip(dd, 1); 2530 } else
2531 ret = -EAGAIN;
2245 if (ret) 2532 if (ret)
2246 ipath_dev_err(dd, "Reinitialize unit %u after " 2533 ipath_dev_err(dd, "Reinitialize unit %u after "
2247 "reset failed with %d\n", unit, ret); 2534 "reset failed with %d\n", unit, ret);
@@ -2253,13 +2540,121 @@ bail:
2253 return ret; 2540 return ret;
2254} 2541}
2255 2542
2543/*
2544 * send a signal to all the processes that have the driver open
2545 * through the normal interfaces (i.e., everything other than diags
2546 * interface). Returns number of signalled processes.
2547 */
2548static int ipath_signal_procs(struct ipath_devdata *dd, int sig)
2549{
2550 int i, sub, any = 0;
2551 pid_t pid;
2552
2553 if (!dd->ipath_pd)
2554 return 0;
2555 for (i = 1; i < dd->ipath_cfgports; i++) {
2556 if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt ||
2557 !dd->ipath_pd[i]->port_pid)
2558 continue;
2559 pid = dd->ipath_pd[i]->port_pid;
2560 dev_info(&dd->pcidev->dev, "context %d in use "
2561 "(PID %u), sending signal %d\n",
2562 i, pid, sig);
2563 kill_proc(pid, sig, 1);
2564 any++;
2565 for (sub = 0; sub < INFINIPATH_MAX_SUBPORT; sub++) {
2566 pid = dd->ipath_pd[i]->port_subpid[sub];
2567 if (!pid)
2568 continue;
2569 dev_info(&dd->pcidev->dev, "sub-context "
2570 "%d:%d in use (PID %u), sending "
2571 "signal %d\n", i, sub, pid, sig);
2572 kill_proc(pid, sig, 1);
2573 any++;
2574 }
2575 }
2576 return any;
2577}
2578
2579static void ipath_hol_signal_down(struct ipath_devdata *dd)
2580{
2581 if (ipath_signal_procs(dd, SIGSTOP))
2582 ipath_dbg("Stopped some processes\n");
2583 ipath_cancel_sends(dd, 1);
2584}
2585
2586
2587static void ipath_hol_signal_up(struct ipath_devdata *dd)
2588{
2589 if (ipath_signal_procs(dd, SIGCONT))
2590 ipath_dbg("Continued some processes\n");
2591}
2592
2593/*
2594 * link is down, stop any users processes, and flush pending sends
2595 * to prevent HoL blocking, then start the HoL timer that
2596 * periodically continues, then stop procs, so they can detect
2597 * link down if they want, and do something about it.
2598 * Timer may already be running, so use __mod_timer, not add_timer.
2599 */
2600void ipath_hol_down(struct ipath_devdata *dd)
2601{
2602 dd->ipath_hol_state = IPATH_HOL_DOWN;
2603 ipath_hol_signal_down(dd);
2604 dd->ipath_hol_next = IPATH_HOL_DOWNCONT;
2605 dd->ipath_hol_timer.expires = jiffies +
2606 msecs_to_jiffies(ipath_hol_timeout_ms);
2607 __mod_timer(&dd->ipath_hol_timer, dd->ipath_hol_timer.expires);
2608}
2609
2610/*
2611 * link is up, continue any user processes, and ensure timer
2612 * is a nop, if running. Let timer keep running, if set; it
2613 * will nop when it sees the link is up
2614 */
2615void ipath_hol_up(struct ipath_devdata *dd)
2616{
2617 ipath_hol_signal_up(dd);
2618 dd->ipath_hol_state = IPATH_HOL_UP;
2619}
2620
2621/*
2622 * toggle the running/not running state of user proceses
2623 * to prevent HoL blocking on chip resources, but still allow
2624 * user processes to do link down special case handling.
2625 * Should only be called via the timer
2626 */
2627void ipath_hol_event(unsigned long opaque)
2628{
2629 struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
2630
2631 if (dd->ipath_hol_next == IPATH_HOL_DOWNSTOP
2632 && dd->ipath_hol_state != IPATH_HOL_UP) {
2633 dd->ipath_hol_next = IPATH_HOL_DOWNCONT;
2634 ipath_dbg("Stopping processes\n");
2635 ipath_hol_signal_down(dd);
2636 } else { /* may do "extra" if also in ipath_hol_up() */
2637 dd->ipath_hol_next = IPATH_HOL_DOWNSTOP;
2638 ipath_dbg("Continuing processes\n");
2639 ipath_hol_signal_up(dd);
2640 }
2641 if (dd->ipath_hol_state == IPATH_HOL_UP)
2642 ipath_dbg("link's up, don't resched timer\n");
2643 else {
2644 dd->ipath_hol_timer.expires = jiffies +
2645 msecs_to_jiffies(ipath_hol_timeout_ms);
2646 __mod_timer(&dd->ipath_hol_timer,
2647 dd->ipath_hol_timer.expires);
2648 }
2649}
2650
2256int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv) 2651int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv)
2257{ 2652{
2258 u64 val; 2653 u64 val;
2259 if ( new_pol_inv > INFINIPATH_XGXS_RX_POL_MASK ) { 2654
2655 if (new_pol_inv > INFINIPATH_XGXS_RX_POL_MASK)
2260 return -1; 2656 return -1;
2261 } 2657 if (dd->ipath_rx_pol_inv != new_pol_inv) {
2262 if ( dd->ipath_rx_pol_inv != new_pol_inv ) {
2263 dd->ipath_rx_pol_inv = new_pol_inv; 2658 dd->ipath_rx_pol_inv = new_pol_inv;
2264 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig); 2659 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
2265 val &= ~(INFINIPATH_XGXS_RX_POL_MASK << 2660 val &= ~(INFINIPATH_XGXS_RX_POL_MASK <<
diff --git a/drivers/infiniband/hw/ipath/ipath_eeprom.c b/drivers/infiniband/hw/ipath/ipath_eeprom.c
index e28a42f53769..dc37277f1c80 100644
--- a/drivers/infiniband/hw/ipath/ipath_eeprom.c
+++ b/drivers/infiniband/hw/ipath/ipath_eeprom.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -62,6 +62,33 @@
62 * accessing eeprom contents from within the kernel, only via sysfs. 62 * accessing eeprom contents from within the kernel, only via sysfs.
63 */ 63 */
64 64
65/* Added functionality for IBA7220-based cards */
66#define IPATH_EEPROM_DEV_V1 0xA0
67#define IPATH_EEPROM_DEV_V2 0xA2
68#define IPATH_TEMP_DEV 0x98
69#define IPATH_BAD_DEV (IPATH_EEPROM_DEV_V2+2)
70#define IPATH_NO_DEV (0xFF)
71
72/*
73 * The number of I2C chains is proliferating. Table below brings
74 * some order to the madness. The basic principle is that the
75 * table is scanned from the top, and a "probe" is made to the
76 * device probe_dev. If that succeeds, the chain is considered
77 * to be of that type, and dd->i2c_chain_type is set to the index+1
78 * of the entry.
79 * The +1 is so static initialization can mean "unknown, do probe."
80 */
81static struct i2c_chain_desc {
82 u8 probe_dev; /* If seen at probe, chain is this type */
83 u8 eeprom_dev; /* Dev addr (if any) for EEPROM */
84 u8 temp_dev; /* Dev Addr (if any) for Temp-sense */
85} i2c_chains[] = {
86 { IPATH_BAD_DEV, IPATH_NO_DEV, IPATH_NO_DEV }, /* pre-iba7220 bds */
87 { IPATH_EEPROM_DEV_V1, IPATH_EEPROM_DEV_V1, IPATH_TEMP_DEV}, /* V1 */
88 { IPATH_EEPROM_DEV_V2, IPATH_EEPROM_DEV_V2, IPATH_TEMP_DEV}, /* V2 */
89 { IPATH_NO_DEV }
90};
91
65enum i2c_type { 92enum i2c_type {
66 i2c_line_scl = 0, 93 i2c_line_scl = 0,
67 i2c_line_sda 94 i2c_line_sda
@@ -75,13 +102,6 @@ enum i2c_state {
75#define READ_CMD 1 102#define READ_CMD 1
76#define WRITE_CMD 0 103#define WRITE_CMD 0
77 104
78static int eeprom_init;
79
80/*
81 * The gpioval manipulation really should be protected by spinlocks
82 * or be converted to use atomic operations.
83 */
84
85/** 105/**
86 * i2c_gpio_set - set a GPIO line 106 * i2c_gpio_set - set a GPIO line
87 * @dd: the infinipath device 107 * @dd: the infinipath device
@@ -241,6 +261,27 @@ static int i2c_ackrcv(struct ipath_devdata *dd)
241} 261}
242 262
243/** 263/**
264 * rd_byte - read a byte, leaving ACK, STOP, etc up to caller
265 * @dd: the infinipath device
266 *
267 * Returns byte shifted out of device
268 */
269static int rd_byte(struct ipath_devdata *dd)
270{
271 int bit_cntr, data;
272
273 data = 0;
274
275 for (bit_cntr = 7; bit_cntr >= 0; --bit_cntr) {
276 data <<= 1;
277 scl_out(dd, i2c_line_high);
278 data |= sda_in(dd, 0);
279 scl_out(dd, i2c_line_low);
280 }
281 return data;
282}
283
284/**
244 * wr_byte - write a byte, one bit at a time 285 * wr_byte - write a byte, one bit at a time
245 * @dd: the infinipath device 286 * @dd: the infinipath device
246 * @data: the byte to write 287 * @data: the byte to write
@@ -331,7 +372,6 @@ static int eeprom_reset(struct ipath_devdata *dd)
331 ipath_cdbg(VERBOSE, "Resetting i2c eeprom; initial gpioout reg " 372 ipath_cdbg(VERBOSE, "Resetting i2c eeprom; initial gpioout reg "
332 "is %llx\n", (unsigned long long) *gpioval); 373 "is %llx\n", (unsigned long long) *gpioval);
333 374
334 eeprom_init = 1;
335 /* 375 /*
336 * This is to get the i2c into a known state, by first going low, 376 * This is to get the i2c into a known state, by first going low,
337 * then tristate sda (and then tristate scl as first thing 377 * then tristate sda (and then tristate scl as first thing
@@ -340,12 +380,17 @@ static int eeprom_reset(struct ipath_devdata *dd)
340 scl_out(dd, i2c_line_low); 380 scl_out(dd, i2c_line_low);
341 sda_out(dd, i2c_line_high); 381 sda_out(dd, i2c_line_high);
342 382
383 /* Clock up to 9 cycles looking for SDA hi, then issue START and STOP */
343 while (clock_cycles_left--) { 384 while (clock_cycles_left--) {
344 scl_out(dd, i2c_line_high); 385 scl_out(dd, i2c_line_high);
345 386
387 /* SDA seen high, issue START by dropping it while SCL high */
346 if (sda_in(dd, 0)) { 388 if (sda_in(dd, 0)) {
347 sda_out(dd, i2c_line_low); 389 sda_out(dd, i2c_line_low);
348 scl_out(dd, i2c_line_low); 390 scl_out(dd, i2c_line_low);
391 /* ATMEL spec says must be followed by STOP. */
392 scl_out(dd, i2c_line_high);
393 sda_out(dd, i2c_line_high);
349 ret = 0; 394 ret = 0;
350 goto bail; 395 goto bail;
351 } 396 }
@@ -359,29 +404,121 @@ bail:
359 return ret; 404 return ret;
360} 405}
361 406
362/** 407/*
363 * ipath_eeprom_read - receives bytes from the eeprom via I2C 408 * Probe for I2C device at specified address. Returns 0 for "success"
364 * @dd: the infinipath device 409 * to match rest of this file.
365 * @eeprom_offset: address to read from 410 * Leave bus in "reasonable" state for further commands.
366 * @buffer: where to store result
367 * @len: number of bytes to receive
368 */ 411 */
412static int i2c_probe(struct ipath_devdata *dd, int devaddr)
413{
414 int ret = 0;
415
416 ret = eeprom_reset(dd);
417 if (ret) {
418 ipath_dev_err(dd, "Failed reset probing device 0x%02X\n",
419 devaddr);
420 return ret;
421 }
422 /*
423 * Reset no longer leaves bus in start condition, so normal
424 * i2c_startcmd() will do.
425 */
426 ret = i2c_startcmd(dd, devaddr | READ_CMD);
427 if (ret)
428 ipath_cdbg(VERBOSE, "Failed startcmd for device 0x%02X\n",
429 devaddr);
430 else {
431 /*
432 * Device did respond. Complete a single-byte read, because some
433 * devices apparently cannot handle STOP immediately after they
434 * ACK the start-cmd.
435 */
436 int data;
437 data = rd_byte(dd);
438 stop_cmd(dd);
439 ipath_cdbg(VERBOSE, "Response from device 0x%02X\n", devaddr);
440 }
441 return ret;
442}
443
444/*
445 * Returns the "i2c type". This is a pointer to a struct that describes
446 * the I2C chain on this board. To minimize impact on struct ipath_devdata,
447 * the (small integer) index into the table is actually memoized, rather
448 * then the pointer.
449 * Memoization is because the type is determined on the first call per chip.
450 * An alternative would be to move type determination to early
451 * init code.
452 */
453static struct i2c_chain_desc *ipath_i2c_type(struct ipath_devdata *dd)
454{
455 int idx;
456
457 /* Get memoized index, from previous successful probes */
458 idx = dd->ipath_i2c_chain_type - 1;
459 if (idx >= 0 && idx < (ARRAY_SIZE(i2c_chains) - 1))
460 goto done;
461
462 idx = 0;
463 while (i2c_chains[idx].probe_dev != IPATH_NO_DEV) {
464 /* if probe succeeds, this is type */
465 if (!i2c_probe(dd, i2c_chains[idx].probe_dev))
466 break;
467 ++idx;
468 }
469
470 /*
471 * Old EEPROM (first entry) may require a reset after probe,
472 * rather than being able to "start" after "stop"
473 */
474 if (idx == 0)
475 eeprom_reset(dd);
476
477 if (i2c_chains[idx].probe_dev == IPATH_NO_DEV)
478 idx = -1;
479 else
480 dd->ipath_i2c_chain_type = idx + 1;
481done:
482 return (idx >= 0) ? i2c_chains + idx : NULL;
483}
369 484
370static int ipath_eeprom_internal_read(struct ipath_devdata *dd, 485static int ipath_eeprom_internal_read(struct ipath_devdata *dd,
371 u8 eeprom_offset, void *buffer, int len) 486 u8 eeprom_offset, void *buffer, int len)
372{ 487{
373 /* compiler complains unless initialized */
374 u8 single_byte = 0;
375 int bit_cntr;
376 int ret; 488 int ret;
489 struct i2c_chain_desc *icd;
490 u8 *bp = buffer;
377 491
378 if (!eeprom_init) 492 ret = 1;
379 eeprom_reset(dd); 493 icd = ipath_i2c_type(dd);
380 494 if (!icd)
381 eeprom_offset = (eeprom_offset << 1) | READ_CMD; 495 goto bail;
382 496
383 if (i2c_startcmd(dd, eeprom_offset)) { 497 if (icd->eeprom_dev == IPATH_NO_DEV) {
384 ipath_dbg("Failed startcmd\n"); 498 /* legacy not-really-I2C */
499 ipath_cdbg(VERBOSE, "Start command only address\n");
500 eeprom_offset = (eeprom_offset << 1) | READ_CMD;
501 ret = i2c_startcmd(dd, eeprom_offset);
502 } else {
503 /* Actual I2C */
504 ipath_cdbg(VERBOSE, "Start command uses devaddr\n");
505 if (i2c_startcmd(dd, icd->eeprom_dev | WRITE_CMD)) {
506 ipath_dbg("Failed EEPROM startcmd\n");
507 stop_cmd(dd);
508 ret = 1;
509 goto bail;
510 }
511 ret = wr_byte(dd, eeprom_offset);
512 stop_cmd(dd);
513 if (ret) {
514 ipath_dev_err(dd, "Failed to write EEPROM address\n");
515 ret = 1;
516 goto bail;
517 }
518 ret = i2c_startcmd(dd, icd->eeprom_dev | READ_CMD);
519 }
520 if (ret) {
521 ipath_dbg("Failed startcmd for dev %02X\n", icd->eeprom_dev);
385 stop_cmd(dd); 522 stop_cmd(dd);
386 ret = 1; 523 ret = 1;
387 goto bail; 524 goto bail;
@@ -392,22 +529,11 @@ static int ipath_eeprom_internal_read(struct ipath_devdata *dd,
392 * incrementing the address. 529 * incrementing the address.
393 */ 530 */
394 while (len-- > 0) { 531 while (len-- > 0) {
395 /* get data */ 532 /* get and store data */
396 single_byte = 0; 533 *bp++ = rd_byte(dd);
397 for (bit_cntr = 8; bit_cntr; bit_cntr--) {
398 u8 bit;
399 scl_out(dd, i2c_line_high);
400 bit = sda_in(dd, 0);
401 single_byte |= bit << (bit_cntr - 1);
402 scl_out(dd, i2c_line_low);
403 }
404
405 /* send ack if not the last byte */ 534 /* send ack if not the last byte */
406 if (len) 535 if (len)
407 send_ack(dd); 536 send_ack(dd);
408
409 *((u8 *) buffer) = single_byte;
410 buffer++;
411 } 537 }
412 538
413 stop_cmd(dd); 539 stop_cmd(dd);
@@ -418,31 +544,40 @@ bail:
418 return ret; 544 return ret;
419} 545}
420 546
421
422/**
423 * ipath_eeprom_write - writes data to the eeprom via I2C
424 * @dd: the infinipath device
425 * @eeprom_offset: where to place data
426 * @buffer: data to write
427 * @len: number of bytes to write
428 */
429static int ipath_eeprom_internal_write(struct ipath_devdata *dd, u8 eeprom_offset, 547static int ipath_eeprom_internal_write(struct ipath_devdata *dd, u8 eeprom_offset,
430 const void *buffer, int len) 548 const void *buffer, int len)
431{ 549{
432 u8 single_byte;
433 int sub_len; 550 int sub_len;
434 const u8 *bp = buffer; 551 const u8 *bp = buffer;
435 int max_wait_time, i; 552 int max_wait_time, i;
436 int ret; 553 int ret;
554 struct i2c_chain_desc *icd;
437 555
438 if (!eeprom_init) 556 ret = 1;
439 eeprom_reset(dd); 557 icd = ipath_i2c_type(dd);
558 if (!icd)
559 goto bail;
440 560
441 while (len > 0) { 561 while (len > 0) {
442 if (i2c_startcmd(dd, (eeprom_offset << 1) | WRITE_CMD)) { 562 if (icd->eeprom_dev == IPATH_NO_DEV) {
443 ipath_dbg("Failed to start cmd offset %u\n", 563 if (i2c_startcmd(dd,
444 eeprom_offset); 564 (eeprom_offset << 1) | WRITE_CMD)) {
445 goto failed_write; 565 ipath_dbg("Failed to start cmd offset %u\n",
566 eeprom_offset);
567 goto failed_write;
568 }
569 } else {
570 /* Real I2C */
571 if (i2c_startcmd(dd, icd->eeprom_dev | WRITE_CMD)) {
572 ipath_dbg("Failed EEPROM startcmd\n");
573 goto failed_write;
574 }
575 ret = wr_byte(dd, eeprom_offset);
576 if (ret) {
577 ipath_dev_err(dd, "Failed to write EEPROM "
578 "address\n");
579 goto failed_write;
580 }
446 } 581 }
447 582
448 sub_len = min(len, 4); 583 sub_len = min(len, 4);
@@ -468,9 +603,11 @@ static int ipath_eeprom_internal_write(struct ipath_devdata *dd, u8 eeprom_offse
468 * the writes have completed. We do this inline to avoid 603 * the writes have completed. We do this inline to avoid
469 * the debug prints that are in the real read routine 604 * the debug prints that are in the real read routine
470 * if the startcmd fails. 605 * if the startcmd fails.
606 * We also use the proper device address, so it doesn't matter
607 * whether we have real eeprom_dev. legacy likes any address.
471 */ 608 */
472 max_wait_time = 100; 609 max_wait_time = 100;
473 while (i2c_startcmd(dd, READ_CMD)) { 610 while (i2c_startcmd(dd, icd->eeprom_dev | READ_CMD)) {
474 stop_cmd(dd); 611 stop_cmd(dd);
475 if (!--max_wait_time) { 612 if (!--max_wait_time) {
476 ipath_dbg("Did not get successful read to " 613 ipath_dbg("Did not get successful read to "
@@ -478,15 +615,8 @@ static int ipath_eeprom_internal_write(struct ipath_devdata *dd, u8 eeprom_offse
478 goto failed_write; 615 goto failed_write;
479 } 616 }
480 } 617 }
481 /* now read the zero byte */ 618 /* now read (and ignore) the resulting byte */
482 for (i = single_byte = 0; i < 8; i++) { 619 rd_byte(dd);
483 u8 bit;
484 scl_out(dd, i2c_line_high);
485 bit = sda_in(dd, 0);
486 scl_out(dd, i2c_line_low);
487 single_byte <<= 1;
488 single_byte |= bit;
489 }
490 stop_cmd(dd); 620 stop_cmd(dd);
491 } 621 }
492 622
@@ -501,9 +631,12 @@ bail:
501 return ret; 631 return ret;
502} 632}
503 633
504/* 634/**
505 * The public entry-points ipath_eeprom_read() and ipath_eeprom_write() 635 * ipath_eeprom_read - receives bytes from the eeprom via I2C
506 * are now just wrappers around the internal functions. 636 * @dd: the infinipath device
637 * @eeprom_offset: address to read from
638 * @buffer: where to store result
639 * @len: number of bytes to receive
507 */ 640 */
508int ipath_eeprom_read(struct ipath_devdata *dd, u8 eeprom_offset, 641int ipath_eeprom_read(struct ipath_devdata *dd, u8 eeprom_offset,
509 void *buff, int len) 642 void *buff, int len)
@@ -519,6 +652,13 @@ int ipath_eeprom_read(struct ipath_devdata *dd, u8 eeprom_offset,
519 return ret; 652 return ret;
520} 653}
521 654
655/**
656 * ipath_eeprom_write - writes data to the eeprom via I2C
657 * @dd: the infinipath device
658 * @eeprom_offset: where to place data
659 * @buffer: data to write
660 * @len: number of bytes to write
661 */
522int ipath_eeprom_write(struct ipath_devdata *dd, u8 eeprom_offset, 662int ipath_eeprom_write(struct ipath_devdata *dd, u8 eeprom_offset,
523 const void *buff, int len) 663 const void *buff, int len)
524{ 664{
@@ -820,7 +960,7 @@ int ipath_update_eeprom_log(struct ipath_devdata *dd)
820 * if we log an hour at 31 minutes, then we would need to set 960 * if we log an hour at 31 minutes, then we would need to set
821 * active_time to -29 to accurately count the _next_ hour. 961 * active_time to -29 to accurately count the _next_ hour.
822 */ 962 */
823 if (new_time > 3600) { 963 if (new_time >= 3600) {
824 new_hrs = new_time / 3600; 964 new_hrs = new_time / 3600;
825 atomic_sub((new_hrs * 3600), &dd->ipath_active_time); 965 atomic_sub((new_hrs * 3600), &dd->ipath_active_time);
826 new_hrs += dd->ipath_eep_hrs; 966 new_hrs += dd->ipath_eep_hrs;
@@ -885,3 +1025,159 @@ void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr)
885 spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags); 1025 spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);
886 return; 1026 return;
887} 1027}
1028
1029static int ipath_tempsense_internal_read(struct ipath_devdata *dd, u8 regnum)
1030{
1031 int ret;
1032 struct i2c_chain_desc *icd;
1033
1034 ret = -ENOENT;
1035
1036 icd = ipath_i2c_type(dd);
1037 if (!icd)
1038 goto bail;
1039
1040 if (icd->temp_dev == IPATH_NO_DEV) {
1041 /* tempsense only exists on new, real-I2C boards */
1042 ret = -ENXIO;
1043 goto bail;
1044 }
1045
1046 if (i2c_startcmd(dd, icd->temp_dev | WRITE_CMD)) {
1047 ipath_dbg("Failed tempsense startcmd\n");
1048 stop_cmd(dd);
1049 ret = -ENXIO;
1050 goto bail;
1051 }
1052 ret = wr_byte(dd, regnum);
1053 stop_cmd(dd);
1054 if (ret) {
1055 ipath_dev_err(dd, "Failed tempsense WR command %02X\n",
1056 regnum);
1057 ret = -ENXIO;
1058 goto bail;
1059 }
1060 if (i2c_startcmd(dd, icd->temp_dev | READ_CMD)) {
1061 ipath_dbg("Failed tempsense RD startcmd\n");
1062 stop_cmd(dd);
1063 ret = -ENXIO;
1064 goto bail;
1065 }
1066 /*
1067 * We can only clock out one byte per command, sensibly
1068 */
1069 ret = rd_byte(dd);
1070 stop_cmd(dd);
1071
1072bail:
1073 return ret;
1074}
1075
1076#define VALID_TS_RD_REG_MASK 0xBF
1077
1078/**
1079 * ipath_tempsense_read - read register of temp sensor via I2C
1080 * @dd: the infinipath device
1081 * @regnum: register to read from
1082 *
1083 * returns reg contents (0..255) or < 0 for error
1084 */
1085int ipath_tempsense_read(struct ipath_devdata *dd, u8 regnum)
1086{
1087 int ret;
1088
1089 if (regnum > 7)
1090 return -EINVAL;
1091
1092 /* return a bogus value for (the one) register we do not have */
1093 if (!((1 << regnum) & VALID_TS_RD_REG_MASK))
1094 return 0;
1095
1096 ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
1097 if (!ret) {
1098 ret = ipath_tempsense_internal_read(dd, regnum);
1099 mutex_unlock(&dd->ipath_eep_lock);
1100 }
1101
1102 /*
1103 * There are three possibilities here:
1104 * ret is actual value (0..255)
1105 * ret is -ENXIO or -EINVAL from code in this file
1106 * ret is -EINTR from mutex_lock_interruptible.
1107 */
1108 return ret;
1109}
1110
1111static int ipath_tempsense_internal_write(struct ipath_devdata *dd,
1112 u8 regnum, u8 data)
1113{
1114 int ret = -ENOENT;
1115 struct i2c_chain_desc *icd;
1116
1117 icd = ipath_i2c_type(dd);
1118 if (!icd)
1119 goto bail;
1120
1121 if (icd->temp_dev == IPATH_NO_DEV) {
1122 /* tempsense only exists on new, real-I2C boards */
1123 ret = -ENXIO;
1124 goto bail;
1125 }
1126 if (i2c_startcmd(dd, icd->temp_dev | WRITE_CMD)) {
1127 ipath_dbg("Failed tempsense startcmd\n");
1128 stop_cmd(dd);
1129 ret = -ENXIO;
1130 goto bail;
1131 }
1132 ret = wr_byte(dd, regnum);
1133 if (ret) {
1134 stop_cmd(dd);
1135 ipath_dev_err(dd, "Failed to write tempsense command %02X\n",
1136 regnum);
1137 ret = -ENXIO;
1138 goto bail;
1139 }
1140 ret = wr_byte(dd, data);
1141 stop_cmd(dd);
1142 ret = i2c_startcmd(dd, icd->temp_dev | READ_CMD);
1143 if (ret) {
1144 ipath_dev_err(dd, "Failed tempsense data wrt to %02X\n",
1145 regnum);
1146 ret = -ENXIO;
1147 }
1148
1149bail:
1150 return ret;
1151}
1152
1153#define VALID_TS_WR_REG_MASK ((1 << 9) | (1 << 0xB) | (1 << 0xD))
1154
1155/**
1156 * ipath_tempsense_write - write register of temp sensor via I2C
1157 * @dd: the infinipath device
1158 * @regnum: register to write
1159 * @data: data to write
1160 *
1161 * returns 0 for success or < 0 for error
1162 */
1163int ipath_tempsense_write(struct ipath_devdata *dd, u8 regnum, u8 data)
1164{
1165 int ret;
1166
1167 if (regnum > 15 || !((1 << regnum) & VALID_TS_WR_REG_MASK))
1168 return -EINVAL;
1169
1170 ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
1171 if (!ret) {
1172 ret = ipath_tempsense_internal_write(dd, regnum, data);
1173 mutex_unlock(&dd->ipath_eep_lock);
1174 }
1175
1176 /*
1177 * There are three possibilities here:
1178 * ret is 0 for success
1179 * ret is -ENXIO or -EINVAL from code in this file
1180 * ret is -EINTR from mutex_lock_interruptible.
1181 */
1182 return ret;
1183}
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 7e025c8e01b6..1e627aab18bf 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -36,21 +36,28 @@
36#include <linux/cdev.h> 36#include <linux/cdev.h>
37#include <linux/swap.h> 37#include <linux/swap.h>
38#include <linux/vmalloc.h> 38#include <linux/vmalloc.h>
39#include <linux/highmem.h>
40#include <linux/io.h>
41#include <linux/jiffies.h>
39#include <asm/pgtable.h> 42#include <asm/pgtable.h>
40 43
41#include "ipath_kernel.h" 44#include "ipath_kernel.h"
42#include "ipath_common.h" 45#include "ipath_common.h"
46#include "ipath_user_sdma.h"
43 47
44static int ipath_open(struct inode *, struct file *); 48static int ipath_open(struct inode *, struct file *);
45static int ipath_close(struct inode *, struct file *); 49static int ipath_close(struct inode *, struct file *);
46static ssize_t ipath_write(struct file *, const char __user *, size_t, 50static ssize_t ipath_write(struct file *, const char __user *, size_t,
47 loff_t *); 51 loff_t *);
52static ssize_t ipath_writev(struct kiocb *, const struct iovec *,
53 unsigned long , loff_t);
48static unsigned int ipath_poll(struct file *, struct poll_table_struct *); 54static unsigned int ipath_poll(struct file *, struct poll_table_struct *);
49static int ipath_mmap(struct file *, struct vm_area_struct *); 55static int ipath_mmap(struct file *, struct vm_area_struct *);
50 56
51static const struct file_operations ipath_file_ops = { 57static const struct file_operations ipath_file_ops = {
52 .owner = THIS_MODULE, 58 .owner = THIS_MODULE,
53 .write = ipath_write, 59 .write = ipath_write,
60 .aio_write = ipath_writev,
54 .open = ipath_open, 61 .open = ipath_open,
55 .release = ipath_close, 62 .release = ipath_close,
56 .poll = ipath_poll, 63 .poll = ipath_poll,
@@ -184,6 +191,29 @@ static int ipath_get_base_info(struct file *fp,
184 kinfo->spi_piobufbase = (u64) pd->port_piobufs + 191 kinfo->spi_piobufbase = (u64) pd->port_piobufs +
185 dd->ipath_palign * kinfo->spi_piocnt * slave; 192 dd->ipath_palign * kinfo->spi_piocnt * slave;
186 } 193 }
194
195 /*
196 * Set the PIO avail update threshold to no larger
197 * than the number of buffers per process. Note that
198 * we decrease it here, but won't ever increase it.
199 */
200 if (dd->ipath_pioupd_thresh &&
201 kinfo->spi_piocnt < dd->ipath_pioupd_thresh) {
202 unsigned long flags;
203
204 dd->ipath_pioupd_thresh = kinfo->spi_piocnt;
205 ipath_dbg("Decreased pio update threshold to %u\n",
206 dd->ipath_pioupd_thresh);
207 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
208 dd->ipath_sendctrl &= ~(INFINIPATH_S_UPDTHRESH_MASK
209 << INFINIPATH_S_UPDTHRESH_SHIFT);
210 dd->ipath_sendctrl |= dd->ipath_pioupd_thresh
211 << INFINIPATH_S_UPDTHRESH_SHIFT;
212 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
213 dd->ipath_sendctrl);
214 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
215 }
216
187 if (shared) { 217 if (shared) {
188 kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase + 218 kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase +
189 dd->ipath_ureg_align * pd->port_port; 219 dd->ipath_ureg_align * pd->port_port;
@@ -219,7 +249,12 @@ static int ipath_get_base_info(struct file *fp,
219 kinfo->spi_pioalign = dd->ipath_palign; 249 kinfo->spi_pioalign = dd->ipath_palign;
220 250
221 kinfo->spi_qpair = IPATH_KD_QP; 251 kinfo->spi_qpair = IPATH_KD_QP;
222 kinfo->spi_piosize = dd->ipath_ibmaxlen; 252 /*
253 * user mode PIO buffers are always 2KB, even when 4KB can
254 * be received, and sent via the kernel; this is ibmaxlen
255 * for 2K MTU.
256 */
257 kinfo->spi_piosize = dd->ipath_piosize2k - 2 * sizeof(u32);
223 kinfo->spi_mtu = dd->ipath_ibmaxlen; /* maxlen, not ibmtu */ 258 kinfo->spi_mtu = dd->ipath_ibmaxlen; /* maxlen, not ibmtu */
224 kinfo->spi_port = pd->port_port; 259 kinfo->spi_port = pd->port_port;
225 kinfo->spi_subport = subport_fp(fp); 260 kinfo->spi_subport = subport_fp(fp);
@@ -1598,6 +1633,9 @@ static int try_alloc_port(struct ipath_devdata *dd, int port,
1598 port_fp(fp) = pd; 1633 port_fp(fp) = pd;
1599 pd->port_pid = current->pid; 1634 pd->port_pid = current->pid;
1600 strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm)); 1635 strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm));
1636 ipath_chg_pioavailkernel(dd,
1637 dd->ipath_pbufsport * (pd->port_port - 1),
1638 dd->ipath_pbufsport, 0);
1601 ipath_stats.sps_ports++; 1639 ipath_stats.sps_ports++;
1602 ret = 0; 1640 ret = 0;
1603 } else 1641 } else
@@ -1760,7 +1798,7 @@ static int find_shared_port(struct file *fp,
1760 for (ndev = 0; ndev < devmax; ndev++) { 1798 for (ndev = 0; ndev < devmax; ndev++) {
1761 struct ipath_devdata *dd = ipath_lookup(ndev); 1799 struct ipath_devdata *dd = ipath_lookup(ndev);
1762 1800
1763 if (!dd) 1801 if (!usable(dd))
1764 continue; 1802 continue;
1765 for (i = 1; i < dd->ipath_cfgports; i++) { 1803 for (i = 1; i < dd->ipath_cfgports; i++) {
1766 struct ipath_portdata *pd = dd->ipath_pd[i]; 1804 struct ipath_portdata *pd = dd->ipath_pd[i];
@@ -1839,10 +1877,9 @@ static int ipath_assign_port(struct file *fp,
1839 if (ipath_compatible_subports(swmajor, swminor) && 1877 if (ipath_compatible_subports(swmajor, swminor) &&
1840 uinfo->spu_subport_cnt && 1878 uinfo->spu_subport_cnt &&
1841 (ret = find_shared_port(fp, uinfo))) { 1879 (ret = find_shared_port(fp, uinfo))) {
1842 mutex_unlock(&ipath_mutex);
1843 if (ret > 0) 1880 if (ret > 0)
1844 ret = 0; 1881 ret = 0;
1845 goto done; 1882 goto done_chk_sdma;
1846 } 1883 }
1847 1884
1848 i_minor = iminor(fp->f_path.dentry->d_inode) - IPATH_USER_MINOR_BASE; 1885 i_minor = iminor(fp->f_path.dentry->d_inode) - IPATH_USER_MINOR_BASE;
@@ -1854,6 +1891,21 @@ static int ipath_assign_port(struct file *fp,
1854 else 1891 else
1855 ret = find_best_unit(fp, uinfo); 1892 ret = find_best_unit(fp, uinfo);
1856 1893
1894done_chk_sdma:
1895 if (!ret) {
1896 struct ipath_filedata *fd = fp->private_data;
1897 const struct ipath_portdata *pd = fd->pd;
1898 const struct ipath_devdata *dd = pd->port_dd;
1899
1900 fd->pq = ipath_user_sdma_queue_create(&dd->pcidev->dev,
1901 dd->ipath_unit,
1902 pd->port_port,
1903 fd->subport);
1904
1905 if (!fd->pq)
1906 ret = -ENOMEM;
1907 }
1908
1857 mutex_unlock(&ipath_mutex); 1909 mutex_unlock(&ipath_mutex);
1858 1910
1859done: 1911done:
@@ -1922,22 +1974,25 @@ static int ipath_do_user_init(struct file *fp,
1922 pd->port_hdrqfull_poll = pd->port_hdrqfull; 1974 pd->port_hdrqfull_poll = pd->port_hdrqfull;
1923 1975
1924 /* 1976 /*
1925 * now enable the port; the tail registers will be written to memory 1977 * Now enable the port for receive.
1926 * by the chip as soon as it sees the write to 1978 * For chips that are set to DMA the tail register to memory
1927 * dd->ipath_kregs->kr_rcvctrl. The update only happens on 1979 * when they change (and when the update bit transitions from
1928 * transition from 0 to 1, so clear it first, then set it as part of 1980 * 0 to 1. So for those chips, we turn it off and then back on.
1929 * enabling the port. This will (very briefly) affect any other 1981 * This will (very briefly) affect any other open ports, but the
1930 * open ports, but it shouldn't be long enough to be an issue. 1982 * duration is very short, and therefore isn't an issue. We
1931 * We explictly set the in-memory copy to 0 beforehand, so we don't 1983 * explictly set the in-memory tail copy to 0 beforehand, so we
1932 * have to wait to be sure the DMA update has happened. 1984 * don't have to wait to be sure the DMA update has happened
1985 * (chip resets head/tail to 0 on transition to enable).
1933 */ 1986 */
1934 if (pd->port_rcvhdrtail_kvaddr)
1935 ipath_clear_rcvhdrtail(pd);
1936 set_bit(dd->ipath_r_portenable_shift + pd->port_port, 1987 set_bit(dd->ipath_r_portenable_shift + pd->port_port,
1937 &dd->ipath_rcvctrl); 1988 &dd->ipath_rcvctrl);
1938 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 1989 if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
1990 if (pd->port_rcvhdrtail_kvaddr)
1991 ipath_clear_rcvhdrtail(pd);
1992 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1939 dd->ipath_rcvctrl & 1993 dd->ipath_rcvctrl &
1940 ~(1ULL << dd->ipath_r_tailupd_shift)); 1994 ~(1ULL << dd->ipath_r_tailupd_shift));
1995 }
1941 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 1996 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1942 dd->ipath_rcvctrl); 1997 dd->ipath_rcvctrl);
1943 /* Notify any waiting slaves */ 1998 /* Notify any waiting slaves */
@@ -1965,14 +2020,15 @@ static void unlock_expected_tids(struct ipath_portdata *pd)
1965 ipath_cdbg(VERBOSE, "Port %u unlocking any locked expTID pages\n", 2020 ipath_cdbg(VERBOSE, "Port %u unlocking any locked expTID pages\n",
1966 pd->port_port); 2021 pd->port_port);
1967 for (i = port_tidbase; i < maxtid; i++) { 2022 for (i = port_tidbase; i < maxtid; i++) {
1968 if (!dd->ipath_pageshadow[i]) 2023 struct page *ps = dd->ipath_pageshadow[i];
2024
2025 if (!ps)
1969 continue; 2026 continue;
1970 2027
2028 dd->ipath_pageshadow[i] = NULL;
1971 pci_unmap_page(dd->pcidev, dd->ipath_physshadow[i], 2029 pci_unmap_page(dd->pcidev, dd->ipath_physshadow[i],
1972 PAGE_SIZE, PCI_DMA_FROMDEVICE); 2030 PAGE_SIZE, PCI_DMA_FROMDEVICE);
1973 ipath_release_user_pages_on_close(&dd->ipath_pageshadow[i], 2031 ipath_release_user_pages_on_close(&ps, 1);
1974 1);
1975 dd->ipath_pageshadow[i] = NULL;
1976 cnt++; 2032 cnt++;
1977 ipath_stats.sps_pageunlocks++; 2033 ipath_stats.sps_pageunlocks++;
1978 } 2034 }
@@ -2007,6 +2063,13 @@ static int ipath_close(struct inode *in, struct file *fp)
2007 mutex_unlock(&ipath_mutex); 2063 mutex_unlock(&ipath_mutex);
2008 goto bail; 2064 goto bail;
2009 } 2065 }
2066
2067 dd = pd->port_dd;
2068
2069 /* drain user sdma queue */
2070 ipath_user_sdma_queue_drain(dd, fd->pq);
2071 ipath_user_sdma_queue_destroy(fd->pq);
2072
2010 if (--pd->port_cnt) { 2073 if (--pd->port_cnt) {
2011 /* 2074 /*
2012 * XXX If the master closes the port before the slave(s), 2075 * XXX If the master closes the port before the slave(s),
@@ -2019,7 +2082,6 @@ static int ipath_close(struct inode *in, struct file *fp)
2019 goto bail; 2082 goto bail;
2020 } 2083 }
2021 port = pd->port_port; 2084 port = pd->port_port;
2022 dd = pd->port_dd;
2023 2085
2024 if (pd->port_hdrqfull) { 2086 if (pd->port_hdrqfull) {
2025 ipath_cdbg(PROC, "%s[%u] had %u rcvhdrqfull errors " 2087 ipath_cdbg(PROC, "%s[%u] had %u rcvhdrqfull errors "
@@ -2039,7 +2101,7 @@ static int ipath_close(struct inode *in, struct file *fp)
2039 pd->port_rcvnowait = pd->port_pionowait = 0; 2101 pd->port_rcvnowait = pd->port_pionowait = 0;
2040 } 2102 }
2041 if (pd->port_flag) { 2103 if (pd->port_flag) {
2042 ipath_dbg("port %u port_flag still set to 0x%lx\n", 2104 ipath_cdbg(PROC, "port %u port_flag set: 0x%lx\n",
2043 pd->port_port, pd->port_flag); 2105 pd->port_port, pd->port_flag);
2044 pd->port_flag = 0; 2106 pd->port_flag = 0;
2045 } 2107 }
@@ -2076,6 +2138,7 @@ static int ipath_close(struct inode *in, struct file *fp)
2076 2138
2077 i = dd->ipath_pbufsport * (port - 1); 2139 i = dd->ipath_pbufsport * (port - 1);
2078 ipath_disarm_piobufs(dd, i, dd->ipath_pbufsport); 2140 ipath_disarm_piobufs(dd, i, dd->ipath_pbufsport);
2141 ipath_chg_pioavailkernel(dd, i, dd->ipath_pbufsport, 1);
2079 2142
2080 dd->ipath_f_clear_tids(dd, pd->port_port); 2143 dd->ipath_f_clear_tids(dd, pd->port_port);
2081 2144
@@ -2140,17 +2203,31 @@ static int ipath_get_slave_info(struct ipath_portdata *pd,
2140 return ret; 2203 return ret;
2141} 2204}
2142 2205
2143static int ipath_force_pio_avail_update(struct ipath_devdata *dd) 2206static int ipath_sdma_get_inflight(struct ipath_user_sdma_queue *pq,
2207 u32 __user *inflightp)
2144{ 2208{
2145 unsigned long flags; 2209 const u32 val = ipath_user_sdma_inflight_counter(pq);
2146 2210
2147 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); 2211 if (put_user(val, inflightp))
2148 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 2212 return -EFAULT;
2149 dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD); 2213
2150 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 2214 return 0;
2151 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl); 2215}
2152 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 2216
2153 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); 2217static int ipath_sdma_get_complete(struct ipath_devdata *dd,
2218 struct ipath_user_sdma_queue *pq,
2219 u32 __user *completep)
2220{
2221 u32 val;
2222 int err;
2223
2224 err = ipath_user_sdma_make_progress(dd, pq);
2225 if (err < 0)
2226 return err;
2227
2228 val = ipath_user_sdma_complete_counter(pq);
2229 if (put_user(val, completep))
2230 return -EFAULT;
2154 2231
2155 return 0; 2232 return 0;
2156} 2233}
@@ -2229,6 +2306,16 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
2229 dest = &cmd.cmd.armlaunch_ctrl; 2306 dest = &cmd.cmd.armlaunch_ctrl;
2230 src = &ucmd->cmd.armlaunch_ctrl; 2307 src = &ucmd->cmd.armlaunch_ctrl;
2231 break; 2308 break;
2309 case IPATH_CMD_SDMA_INFLIGHT:
2310 copy = sizeof(cmd.cmd.sdma_inflight);
2311 dest = &cmd.cmd.sdma_inflight;
2312 src = &ucmd->cmd.sdma_inflight;
2313 break;
2314 case IPATH_CMD_SDMA_COMPLETE:
2315 copy = sizeof(cmd.cmd.sdma_complete);
2316 dest = &cmd.cmd.sdma_complete;
2317 src = &ucmd->cmd.sdma_complete;
2318 break;
2232 default: 2319 default:
2233 ret = -EINVAL; 2320 ret = -EINVAL;
2234 goto bail; 2321 goto bail;
@@ -2299,7 +2386,7 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
2299 cmd.cmd.slave_mask_addr); 2386 cmd.cmd.slave_mask_addr);
2300 break; 2387 break;
2301 case IPATH_CMD_PIOAVAILUPD: 2388 case IPATH_CMD_PIOAVAILUPD:
2302 ret = ipath_force_pio_avail_update(pd->port_dd); 2389 ipath_force_pio_avail_update(pd->port_dd);
2303 break; 2390 break;
2304 case IPATH_CMD_POLL_TYPE: 2391 case IPATH_CMD_POLL_TYPE:
2305 pd->poll_type = cmd.cmd.poll_type; 2392 pd->poll_type = cmd.cmd.poll_type;
@@ -2310,6 +2397,17 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
2310 else 2397 else
2311 ipath_disable_armlaunch(pd->port_dd); 2398 ipath_disable_armlaunch(pd->port_dd);
2312 break; 2399 break;
2400 case IPATH_CMD_SDMA_INFLIGHT:
2401 ret = ipath_sdma_get_inflight(user_sdma_queue_fp(fp),
2402 (u32 __user *) (unsigned long)
2403 cmd.cmd.sdma_inflight);
2404 break;
2405 case IPATH_CMD_SDMA_COMPLETE:
2406 ret = ipath_sdma_get_complete(pd->port_dd,
2407 user_sdma_queue_fp(fp),
2408 (u32 __user *) (unsigned long)
2409 cmd.cmd.sdma_complete);
2410 break;
2313 } 2411 }
2314 2412
2315 if (ret >= 0) 2413 if (ret >= 0)
@@ -2319,6 +2417,20 @@ bail:
2319 return ret; 2417 return ret;
2320} 2418}
2321 2419
2420static ssize_t ipath_writev(struct kiocb *iocb, const struct iovec *iov,
2421 unsigned long dim, loff_t off)
2422{
2423 struct file *filp = iocb->ki_filp;
2424 struct ipath_filedata *fp = filp->private_data;
2425 struct ipath_portdata *pd = port_fp(filp);
2426 struct ipath_user_sdma_queue *pq = fp->pq;
2427
2428 if (!dim)
2429 return -EINVAL;
2430
2431 return ipath_user_sdma_writev(pd->port_dd, pq, iov, dim);
2432}
2433
2322static struct class *ipath_class; 2434static struct class *ipath_class;
2323 2435
2324static int init_cdev(int minor, char *name, const struct file_operations *fops, 2436static int init_cdev(int minor, char *name, const struct file_operations *fops,
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6110.c b/drivers/infiniband/hw/ipath/ipath_iba6110.c
index 9e2ced3cdc5e..02831ad070b8 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6110.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6110.c
@@ -40,6 +40,7 @@
40#include <linux/pci.h> 40#include <linux/pci.h>
41#include <linux/delay.h> 41#include <linux/delay.h>
42#include <linux/htirq.h> 42#include <linux/htirq.h>
43#include <rdma/ib_verbs.h>
43 44
44#include "ipath_kernel.h" 45#include "ipath_kernel.h"
45#include "ipath_registers.h" 46#include "ipath_registers.h"
@@ -305,7 +306,9 @@ static const struct ipath_cregs ipath_ht_cregs = {
305 306
306/* kr_intstatus, kr_intclear, kr_intmask bits */ 307/* kr_intstatus, kr_intclear, kr_intmask bits */
307#define INFINIPATH_I_RCVURG_MASK ((1U<<9)-1) 308#define INFINIPATH_I_RCVURG_MASK ((1U<<9)-1)
309#define INFINIPATH_I_RCVURG_SHIFT 0
308#define INFINIPATH_I_RCVAVAIL_MASK ((1U<<9)-1) 310#define INFINIPATH_I_RCVAVAIL_MASK ((1U<<9)-1)
311#define INFINIPATH_I_RCVAVAIL_SHIFT 12
309 312
310/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */ 313/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
311#define INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT 0 314#define INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT 0
@@ -476,7 +479,13 @@ static const struct ipath_hwerror_msgs ipath_6110_hwerror_msgs[] = {
476#define RXE_EAGER_PARITY (INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID \ 479#define RXE_EAGER_PARITY (INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID \
477 << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) 480 << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT)
478 481
479static int ipath_ht_txe_recover(struct ipath_devdata *); 482static void ipath_ht_txe_recover(struct ipath_devdata *dd)
483{
484 ++ipath_stats.sps_txeparity;
485 dev_info(&dd->pcidev->dev,
486 "Recovering from TXE PIO parity error\n");
487}
488
480 489
481/** 490/**
482 * ipath_ht_handle_hwerrors - display hardware errors. 491 * ipath_ht_handle_hwerrors - display hardware errors.
@@ -557,11 +566,11 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg,
557 * occur if a processor speculative read is done to the PIO 566 * occur if a processor speculative read is done to the PIO
558 * buffer while we are sending a packet, for example. 567 * buffer while we are sending a packet, for example.
559 */ 568 */
560 if ((hwerrs & TXE_PIO_PARITY) && ipath_ht_txe_recover(dd)) 569 if (hwerrs & TXE_PIO_PARITY) {
570 ipath_ht_txe_recover(dd);
561 hwerrs &= ~TXE_PIO_PARITY; 571 hwerrs &= ~TXE_PIO_PARITY;
562 if (hwerrs & RXE_EAGER_PARITY) 572 }
563 ipath_dev_err(dd, "RXE parity, Eager TID error is not " 573
564 "recoverable\n");
565 if (!hwerrs) { 574 if (!hwerrs) {
566 ipath_dbg("Clearing freezemode on ignored or " 575 ipath_dbg("Clearing freezemode on ignored or "
567 "recovered hardware error\n"); 576 "recovered hardware error\n");
@@ -735,11 +744,10 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
735 */ 744 */
736 dd->ipath_flags |= IPATH_32BITCOUNTERS; 745 dd->ipath_flags |= IPATH_32BITCOUNTERS;
737 dd->ipath_flags |= IPATH_GPIO_INTR; 746 dd->ipath_flags |= IPATH_GPIO_INTR;
738 if (dd->ipath_htspeed != 800) 747 if (dd->ipath_lbus_speed != 800)
739 ipath_dev_err(dd, 748 ipath_dev_err(dd,
740 "Incorrectly configured for HT @ %uMHz\n", 749 "Incorrectly configured for HT @ %uMHz\n",
741 dd->ipath_htspeed); 750 dd->ipath_lbus_speed);
742 ret = 0;
743 751
744 /* 752 /*
745 * set here, not in ipath_init_*_funcs because we have to do 753 * set here, not in ipath_init_*_funcs because we have to do
@@ -839,7 +847,7 @@ static void slave_or_pri_blk(struct ipath_devdata *dd, struct pci_dev *pdev,
839 /* 847 /*
840 * now write them back to clear the error. 848 * now write them back to clear the error.
841 */ 849 */
842 pci_write_config_byte(pdev, link_off, 850 pci_write_config_word(pdev, link_off,
843 linkctrl & (0xf << 8)); 851 linkctrl & (0xf << 8));
844 } 852 }
845 } 853 }
@@ -904,7 +912,7 @@ static void slave_or_pri_blk(struct ipath_devdata *dd, struct pci_dev *pdev,
904 break; 912 break;
905 } 913 }
906 914
907 dd->ipath_htwidth = width; 915 dd->ipath_lbus_width = width;
908 916
909 if (linkwidth != 0x11) { 917 if (linkwidth != 0x11) {
910 ipath_dev_err(dd, "Not configured for 16 bit HT " 918 ipath_dev_err(dd, "Not configured for 16 bit HT "
@@ -952,8 +960,13 @@ static void slave_or_pri_blk(struct ipath_devdata *dd, struct pci_dev *pdev,
952 speed = 200; 960 speed = 200;
953 break; 961 break;
954 } 962 }
955 dd->ipath_htspeed = speed; 963 dd->ipath_lbus_speed = speed;
956 } 964 }
965
966 snprintf(dd->ipath_lbus_info, sizeof(dd->ipath_lbus_info),
967 "HyperTransport,%uMHz,x%u\n",
968 dd->ipath_lbus_speed,
969 dd->ipath_lbus_width);
957} 970}
958 971
959static int ipath_ht_intconfig(struct ipath_devdata *dd) 972static int ipath_ht_intconfig(struct ipath_devdata *dd)
@@ -1653,22 +1666,6 @@ static int ipath_ht_early_init(struct ipath_devdata *dd)
1653} 1666}
1654 1667
1655 1668
1656static int ipath_ht_txe_recover(struct ipath_devdata *dd)
1657{
1658 int cnt = ++ipath_stats.sps_txeparity;
1659 if (cnt >= IPATH_MAX_PARITY_ATTEMPTS) {
1660 if (cnt == IPATH_MAX_PARITY_ATTEMPTS)
1661 ipath_dev_err(dd,
1662 "Too many attempts to recover from "
1663 "TXE parity, giving up\n");
1664 return 0;
1665 }
1666 dev_info(&dd->pcidev->dev,
1667 "Recovering from TXE PIO parity error\n");
1668 return 1;
1669}
1670
1671
1672/** 1669/**
1673 * ipath_init_ht_get_base_info - set chip-specific flags for user code 1670 * ipath_init_ht_get_base_info - set chip-specific flags for user code
1674 * @dd: the infinipath device 1671 * @dd: the infinipath device
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6120.c b/drivers/infiniband/hw/ipath/ipath_iba6120.c
index c7a2f50824c0..421cc2af891f 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6120.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6120.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -38,7 +38,7 @@
38#include <linux/interrupt.h> 38#include <linux/interrupt.h>
39#include <linux/pci.h> 39#include <linux/pci.h>
40#include <linux/delay.h> 40#include <linux/delay.h>
41 41#include <rdma/ib_verbs.h>
42 42
43#include "ipath_kernel.h" 43#include "ipath_kernel.h"
44#include "ipath_registers.h" 44#include "ipath_registers.h"
@@ -311,9 +311,14 @@ static const struct ipath_cregs ipath_pe_cregs = {
311 .cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt) 311 .cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt)
312}; 312};
313 313
314/* kr_control bits */
315#define INFINIPATH_C_RESET 1U
316
314/* kr_intstatus, kr_intclear, kr_intmask bits */ 317/* kr_intstatus, kr_intclear, kr_intmask bits */
315#define INFINIPATH_I_RCVURG_MASK ((1U<<5)-1) 318#define INFINIPATH_I_RCVURG_MASK ((1U<<5)-1)
319#define INFINIPATH_I_RCVURG_SHIFT 0
316#define INFINIPATH_I_RCVAVAIL_MASK ((1U<<5)-1) 320#define INFINIPATH_I_RCVAVAIL_MASK ((1U<<5)-1)
321#define INFINIPATH_I_RCVAVAIL_SHIFT 12
317 322
318/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */ 323/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
319#define INFINIPATH_HWE_PCIEMEMPARITYERR_MASK 0x000000000000003fULL 324#define INFINIPATH_HWE_PCIEMEMPARITYERR_MASK 0x000000000000003fULL
@@ -338,6 +343,9 @@ static const struct ipath_cregs ipath_pe_cregs = {
338#define INFINIPATH_EXTS_MEMBIST_ENDTEST 0x0000000000004000 343#define INFINIPATH_EXTS_MEMBIST_ENDTEST 0x0000000000004000
339#define INFINIPATH_EXTS_MEMBIST_FOUND 0x0000000000008000 344#define INFINIPATH_EXTS_MEMBIST_FOUND 0x0000000000008000
340 345
346/* kr_xgxsconfig bits */
347#define INFINIPATH_XGXS_RESET 0x5ULL
348
341#define _IPATH_GPIO_SDA_NUM 1 349#define _IPATH_GPIO_SDA_NUM 1
342#define _IPATH_GPIO_SCL_NUM 0 350#define _IPATH_GPIO_SCL_NUM 0
343 351
@@ -346,6 +354,16 @@ static const struct ipath_cregs ipath_pe_cregs = {
346#define IPATH_GPIO_SCL (1ULL << \ 354#define IPATH_GPIO_SCL (1ULL << \
347 (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT)) 355 (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
348 356
357#define INFINIPATH_RT_BUFSIZE_MASK 0xe0000000ULL
358#define INFINIPATH_RT_BUFSIZE_SHIFTVAL(tid) \
359 ((((tid) & INFINIPATH_RT_BUFSIZE_MASK) >> 29) + 11 - 1)
360#define INFINIPATH_RT_BUFSIZE(tid) (1 << INFINIPATH_RT_BUFSIZE_SHIFTVAL(tid))
361#define INFINIPATH_RT_IS_VALID(tid) \
362 (((tid) & INFINIPATH_RT_BUFSIZE_MASK) && \
363 ((((tid) & INFINIPATH_RT_BUFSIZE_MASK) != INFINIPATH_RT_BUFSIZE_MASK)))
364#define INFINIPATH_RT_ADDR_MASK 0x1FFFFFFFULL /* 29 bits valid */
365#define INFINIPATH_RT_ADDR_SHIFT 10
366
349#define INFINIPATH_R_INTRAVAIL_SHIFT 16 367#define INFINIPATH_R_INTRAVAIL_SHIFT 16
350#define INFINIPATH_R_TAILUPD_SHIFT 31 368#define INFINIPATH_R_TAILUPD_SHIFT 31
351 369
@@ -372,6 +390,8 @@ static const struct ipath_hwerror_msgs ipath_6120_hwerror_msgs[] = {
372#define TXE_PIO_PARITY ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | \ 390#define TXE_PIO_PARITY ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | \
373 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) \ 391 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) \
374 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) 392 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)
393#define RXE_EAGER_PARITY (INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID \
394 << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT)
375 395
376static void ipath_pe_put_tid_2(struct ipath_devdata *, u64 __iomem *, 396static void ipath_pe_put_tid_2(struct ipath_devdata *, u64 __iomem *,
377 u32, unsigned long); 397 u32, unsigned long);
@@ -450,10 +470,8 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
450 * make sure we get this much out, unless told to be quiet, 470 * make sure we get this much out, unless told to be quiet,
451 * or it's occurred within the last 5 seconds 471 * or it's occurred within the last 5 seconds
452 */ 472 */
453 if ((hwerrs & ~(dd->ipath_lasthwerror | 473 if ((hwerrs & ~(dd->ipath_lasthwerror | TXE_PIO_PARITY |
454 ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | 474 RXE_EAGER_PARITY)) ||
455 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
456 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT))) ||
457 (ipath_debug & __IPATH_VERBDBG)) 475 (ipath_debug & __IPATH_VERBDBG))
458 dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx " 476 dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx "
459 "(cleared)\n", (unsigned long long) hwerrs); 477 "(cleared)\n", (unsigned long long) hwerrs);
@@ -465,7 +483,7 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
465 (hwerrs & ~dd->ipath_hwe_bitsextant)); 483 (hwerrs & ~dd->ipath_hwe_bitsextant));
466 484
467 ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control); 485 ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
468 if (ctrl & INFINIPATH_C_FREEZEMODE) { 486 if ((ctrl & INFINIPATH_C_FREEZEMODE) && !ipath_diag_inuse) {
469 /* 487 /*
470 * parity errors in send memory are recoverable, 488 * parity errors in send memory are recoverable,
471 * just cancel the send (if indicated in * sendbuffererror), 489 * just cancel the send (if indicated in * sendbuffererror),
@@ -540,12 +558,40 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
540 dd->ipath_hwerrmask); 558 dd->ipath_hwerrmask);
541 } 559 }
542 560
543 if (*msg) 561 if (hwerrs) {
562 /*
563 * if any set that we aren't ignoring; only
564 * make the complaint once, in case it's stuck
565 * or recurring, and we get here multiple
566 * times.
567 */
544 ipath_dev_err(dd, "%s hardware error\n", msg); 568 ipath_dev_err(dd, "%s hardware error\n", msg);
545 if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg) { 569 if (dd->ipath_flags & IPATH_INITTED) {
570 ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
571 ipath_setup_pe_setextled(dd,
572 INFINIPATH_IBCS_L_STATE_DOWN,
573 INFINIPATH_IBCS_LT_STATE_DISABLED);
574 ipath_dev_err(dd, "Fatal Hardware Error (freeze "
575 "mode), no longer usable, SN %.16s\n",
576 dd->ipath_serial);
577 isfatal = 1;
578 }
579 *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
580 /* mark as having had error */
581 *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
546 /* 582 /*
547 * for /sys status file ; if no trailing } is copied, we'll 583 * mark as not usable, at a minimum until driver
548 * know it was truncated. 584 * is reloaded, probably until reboot, since no
585 * other reset is possible.
586 */
587 dd->ipath_flags &= ~IPATH_INITTED;
588 } else
589 *msg = 0; /* recovered from all of them */
590
591 if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg && msg) {
592 /*
593 * for /sys status file ; if no trailing brace is copied,
594 * we'll know it was truncated.
549 */ 595 */
550 snprintf(dd->ipath_freezemsg, dd->ipath_freezelen, 596 snprintf(dd->ipath_freezemsg, dd->ipath_freezelen,
551 "{%s}", msg); 597 "{%s}", msg);
@@ -610,7 +656,6 @@ static int ipath_pe_boardname(struct ipath_devdata *dd, char *name,
610 dd->ipath_f_put_tid = ipath_pe_put_tid_2; 656 dd->ipath_f_put_tid = ipath_pe_put_tid_2;
611 } 657 }
612 658
613
614 /* 659 /*
615 * set here, not in ipath_init_*_funcs because we have to do 660 * set here, not in ipath_init_*_funcs because we have to do
616 * it after we can read chip registers. 661 * it after we can read chip registers.
@@ -838,7 +883,7 @@ static void ipath_setup_pe_setextled(struct ipath_devdata *dd, u64 lst,
838 extctl = dd->ipath_extctrl & ~(INFINIPATH_EXTC_LED1PRIPORT_ON | 883 extctl = dd->ipath_extctrl & ~(INFINIPATH_EXTC_LED1PRIPORT_ON |
839 INFINIPATH_EXTC_LED2PRIPORT_ON); 884 INFINIPATH_EXTC_LED2PRIPORT_ON);
840 885
841 if (ltst & INFINIPATH_IBCS_LT_STATE_LINKUP) 886 if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP)
842 extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON; 887 extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON;
843 if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE) 888 if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
844 extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON; 889 extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON;
@@ -863,6 +908,62 @@ static void ipath_setup_pe_cleanup(struct ipath_devdata *dd)
863 pci_disable_msi(dd->pcidev); 908 pci_disable_msi(dd->pcidev);
864} 909}
865 910
911static void ipath_6120_pcie_params(struct ipath_devdata *dd)
912{
913 u16 linkstat, speed;
914 int pos;
915
916 pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP);
917 if (!pos) {
918 ipath_dev_err(dd, "Can't find PCI Express capability!\n");
919 goto bail;
920 }
921
922 pci_read_config_word(dd->pcidev, pos + PCI_EXP_LNKSTA,
923 &linkstat);
924 /*
925 * speed is bits 0-4, linkwidth is bits 4-8
926 * no defines for them in headers
927 */
928 speed = linkstat & 0xf;
929 linkstat >>= 4;
930 linkstat &= 0x1f;
931 dd->ipath_lbus_width = linkstat;
932
933 switch (speed) {
934 case 1:
935 dd->ipath_lbus_speed = 2500; /* Gen1, 2.5GHz */
936 break;
937 case 2:
938 dd->ipath_lbus_speed = 5000; /* Gen1, 5GHz */
939 break;
940 default: /* not defined, assume gen1 */
941 dd->ipath_lbus_speed = 2500;
942 break;
943 }
944
945 if (linkstat < 8)
946 ipath_dev_err(dd,
947 "PCIe width %u (x8 HCA), performance reduced\n",
948 linkstat);
949 else
950 ipath_cdbg(VERBOSE, "PCIe speed %u width %u (x8 HCA)\n",
951 dd->ipath_lbus_speed, linkstat);
952
953 if (speed != 1)
954 ipath_dev_err(dd,
955 "PCIe linkspeed %u is incorrect; "
956 "should be 1 (2500)!\n", speed);
957bail:
958 /* fill in string, even on errors */
959 snprintf(dd->ipath_lbus_info, sizeof(dd->ipath_lbus_info),
960 "PCIe,%uMHz,x%u\n",
961 dd->ipath_lbus_speed,
962 dd->ipath_lbus_width);
963
964 return;
965}
966
866/** 967/**
867 * ipath_setup_pe_config - setup PCIe config related stuff 968 * ipath_setup_pe_config - setup PCIe config related stuff
868 * @dd: the infinipath device 969 * @dd: the infinipath device
@@ -920,19 +1021,8 @@ static int ipath_setup_pe_config(struct ipath_devdata *dd,
920 } else 1021 } else
921 ipath_dev_err(dd, "Can't find MSI capability, " 1022 ipath_dev_err(dd, "Can't find MSI capability, "
922 "can't save MSI settings for reset\n"); 1023 "can't save MSI settings for reset\n");
923 if ((pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP))) { 1024
924 u16 linkstat; 1025 ipath_6120_pcie_params(dd);
925 pci_read_config_word(dd->pcidev, pos + PCI_EXP_LNKSTA,
926 &linkstat);
927 linkstat >>= 4;
928 linkstat &= 0x1f;
929 if (linkstat != 8)
930 ipath_dev_err(dd, "PCIe width %u, "
931 "performance reduced\n", linkstat);
932 }
933 else
934 ipath_dev_err(dd, "Can't find PCI Express "
935 "capability!\n");
936 1026
937 dd->ipath_link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X; 1027 dd->ipath_link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
938 dd->ipath_link_speed_supported = IPATH_IB_SDR; 1028 dd->ipath_link_speed_supported = IPATH_IB_SDR;
@@ -1065,10 +1155,7 @@ static void ipath_init_pe_variables(struct ipath_devdata *dd)
1065 INFINIPATH_HWE_RXEMEMPARITYERR_MASK << 1155 INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
1066 INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT; 1156 INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT;
1067 1157
1068 dd->ipath_eep_st_masks[2].errs_to_log = 1158 dd->ipath_eep_st_masks[2].errs_to_log = INFINIPATH_E_RESET;
1069 INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET;
1070
1071
1072 dd->delay_mult = 2; /* SDR, 4X, can't change */ 1159 dd->delay_mult = 2; /* SDR, 4X, can't change */
1073} 1160}
1074 1161
@@ -1142,6 +1229,9 @@ static int ipath_setup_pe_reset(struct ipath_devdata *dd)
1142 u64 val; 1229 u64 val;
1143 int i; 1230 int i;
1144 int ret; 1231 int ret;
1232 u16 cmdval;
1233
1234 pci_read_config_word(dd->pcidev, PCI_COMMAND, &cmdval);
1145 1235
1146 /* Use ERROR so it shows up in logs, etc. */ 1236 /* Use ERROR so it shows up in logs, etc. */
1147 ipath_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->ipath_unit); 1237 ipath_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->ipath_unit);
@@ -1169,10 +1259,14 @@ static int ipath_setup_pe_reset(struct ipath_devdata *dd)
1169 ipath_dev_err(dd, "rewrite of BAR1 failed: %d\n", 1259 ipath_dev_err(dd, "rewrite of BAR1 failed: %d\n",
1170 r); 1260 r);
1171 /* now re-enable memory access */ 1261 /* now re-enable memory access */
1262 pci_write_config_word(dd->pcidev, PCI_COMMAND, cmdval);
1172 if ((r = pci_enable_device(dd->pcidev))) 1263 if ((r = pci_enable_device(dd->pcidev)))
1173 ipath_dev_err(dd, "pci_enable_device failed after " 1264 ipath_dev_err(dd, "pci_enable_device failed after "
1174 "reset: %d\n", r); 1265 "reset: %d\n", r);
1175 /* whether it worked or not, mark as present, again */ 1266 /*
1267 * whether it fully enabled or not, mark as present,
1268 * again (but not INITTED)
1269 */
1176 dd->ipath_flags |= IPATH_PRESENT; 1270 dd->ipath_flags |= IPATH_PRESENT;
1177 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision); 1271 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision);
1178 if (val == dd->ipath_revision) { 1272 if (val == dd->ipath_revision) {
@@ -1190,6 +1284,8 @@ static int ipath_setup_pe_reset(struct ipath_devdata *dd)
1190 ret = 0; /* failed */ 1284 ret = 0; /* failed */
1191 1285
1192bail: 1286bail:
1287 if (ret)
1288 ipath_6120_pcie_params(dd);
1193 return ret; 1289 return ret;
1194} 1290}
1195 1291
@@ -1209,16 +1305,21 @@ static void ipath_pe_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr,
1209{ 1305{
1210 u32 __iomem *tidp32 = (u32 __iomem *)tidptr; 1306 u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
1211 unsigned long flags = 0; /* keep gcc quiet */ 1307 unsigned long flags = 0; /* keep gcc quiet */
1308 int tidx;
1309 spinlock_t *tidlockp;
1310
1311 if (!dd->ipath_kregbase)
1312 return;
1212 1313
1213 if (pa != dd->ipath_tidinvalid) { 1314 if (pa != dd->ipath_tidinvalid) {
1214 if (pa & ((1U << 11) - 1)) { 1315 if (pa & ((1U << 11) - 1)) {
1215 dev_info(&dd->pcidev->dev, "BUG: physaddr %lx " 1316 dev_info(&dd->pcidev->dev, "BUG: physaddr %lx "
1216 "not 4KB aligned!\n", pa); 1317 "not 2KB aligned!\n", pa);
1217 return; 1318 return;
1218 } 1319 }
1219 pa >>= 11; 1320 pa >>= 11;
1220 /* paranoia check */ 1321 /* paranoia check */
1221 if (pa & (7<<29)) 1322 if (pa & ~INFINIPATH_RT_ADDR_MASK)
1222 ipath_dev_err(dd, 1323 ipath_dev_err(dd,
1223 "BUG: Physical page address 0x%lx " 1324 "BUG: Physical page address 0x%lx "
1224 "has bits set in 31-29\n", pa); 1325 "has bits set in 31-29\n", pa);
@@ -1238,14 +1339,22 @@ static void ipath_pe_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr,
1238 * call can be done from interrupt level for the port 0 eager TIDs, 1339 * call can be done from interrupt level for the port 0 eager TIDs,
1239 * so we have to use irqsave locks. 1340 * so we have to use irqsave locks.
1240 */ 1341 */
1241 spin_lock_irqsave(&dd->ipath_tid_lock, flags); 1342 /*
1343 * Assumes tidptr always > ipath_egrtidbase
1344 * if type == RCVHQ_RCV_TYPE_EAGER.
1345 */
1346 tidx = tidptr - dd->ipath_egrtidbase;
1347
1348 tidlockp = (type == RCVHQ_RCV_TYPE_EAGER && tidx < dd->ipath_rcvegrcnt)
1349 ? &dd->ipath_kernel_tid_lock : &dd->ipath_user_tid_lock;
1350 spin_lock_irqsave(tidlockp, flags);
1242 ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xfeeddeaf); 1351 ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xfeeddeaf);
1243 if (dd->ipath_kregbase) 1352 writel(pa, tidp32);
1244 writel(pa, tidp32);
1245 ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xdeadbeef); 1353 ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xdeadbeef);
1246 mmiowb(); 1354 mmiowb();
1247 spin_unlock_irqrestore(&dd->ipath_tid_lock, flags); 1355 spin_unlock_irqrestore(tidlockp, flags);
1248} 1356}
1357
1249/** 1358/**
1250 * ipath_pe_put_tid_2 - write a TID in chip, Revision 2 or higher 1359 * ipath_pe_put_tid_2 - write a TID in chip, Revision 2 or higher
1251 * @dd: the infinipath device 1360 * @dd: the infinipath device
@@ -1261,6 +1370,10 @@ static void ipath_pe_put_tid_2(struct ipath_devdata *dd, u64 __iomem *tidptr,
1261 u32 type, unsigned long pa) 1370 u32 type, unsigned long pa)
1262{ 1371{
1263 u32 __iomem *tidp32 = (u32 __iomem *)tidptr; 1372 u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
1373 u32 tidx;
1374
1375 if (!dd->ipath_kregbase)
1376 return;
1264 1377
1265 if (pa != dd->ipath_tidinvalid) { 1378 if (pa != dd->ipath_tidinvalid) {
1266 if (pa & ((1U << 11) - 1)) { 1379 if (pa & ((1U << 11) - 1)) {
@@ -1270,7 +1383,7 @@ static void ipath_pe_put_tid_2(struct ipath_devdata *dd, u64 __iomem *tidptr,
1270 } 1383 }
1271 pa >>= 11; 1384 pa >>= 11;
1272 /* paranoia check */ 1385 /* paranoia check */
1273 if (pa & (7<<29)) 1386 if (pa & ~INFINIPATH_RT_ADDR_MASK)
1274 ipath_dev_err(dd, 1387 ipath_dev_err(dd,
1275 "BUG: Physical page address 0x%lx " 1388 "BUG: Physical page address 0x%lx "
1276 "has bits set in 31-29\n", pa); 1389 "has bits set in 31-29\n", pa);
@@ -1280,8 +1393,8 @@ static void ipath_pe_put_tid_2(struct ipath_devdata *dd, u64 __iomem *tidptr,
1280 else /* for now, always full 4KB page */ 1393 else /* for now, always full 4KB page */
1281 pa |= 2 << 29; 1394 pa |= 2 << 29;
1282 } 1395 }
1283 if (dd->ipath_kregbase) 1396 tidx = tidptr - dd->ipath_egrtidbase;
1284 writel(pa, tidp32); 1397 writel(pa, tidp32);
1285 mmiowb(); 1398 mmiowb();
1286} 1399}
1287 1400
@@ -1379,17 +1492,13 @@ static int ipath_pe_early_init(struct ipath_devdata *dd)
1379 dd->ipath_egrtidbase = (u64 __iomem *) 1492 dd->ipath_egrtidbase = (u64 __iomem *)
1380 ((char __iomem *) dd->ipath_kregbase + dd->ipath_rcvegrbase); 1493 ((char __iomem *) dd->ipath_kregbase + dd->ipath_rcvegrbase);
1381 1494
1382 /* 1495 dd->ipath_rcvegrbufsize = ipath_mtu4096 ? 4096 : 2048;
1383 * To truly support a 4KB MTU (for usermode), we need to
1384 * bump this to a larger value. For now, we use them for
1385 * the kernel only.
1386 */
1387 dd->ipath_rcvegrbufsize = 2048;
1388 /* 1496 /*
1389 * the min() check here is currently a nop, but it may not always 1497 * the min() check here is currently a nop, but it may not always
1390 * be, depending on just how we do ipath_rcvegrbufsize 1498 * be, depending on just how we do ipath_rcvegrbufsize
1391 */ 1499 */
1392 dd->ipath_ibmaxlen = min(dd->ipath_piosize2k, 1500 dd->ipath_ibmaxlen = min(ipath_mtu4096 ? dd->ipath_piosize4k :
1501 dd->ipath_piosize2k,
1393 dd->ipath_rcvegrbufsize + 1502 dd->ipath_rcvegrbufsize +
1394 (dd->ipath_rcvhdrentsize << 2)); 1503 (dd->ipath_rcvhdrentsize << 2));
1395 dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen; 1504 dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen;
diff --git a/drivers/infiniband/hw/ipath/ipath_iba7220.c b/drivers/infiniband/hw/ipath/ipath_iba7220.c
new file mode 100644
index 000000000000..1b2de2cfb69b
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_iba7220.c
@@ -0,0 +1,2571 @@
1/*
2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33/*
34 * This file contains all of the code that is specific to the
35 * InfiniPath 7220 chip (except that specific to the SerDes)
36 */
37
38#include <linux/interrupt.h>
39#include <linux/pci.h>
40#include <linux/delay.h>
41#include <linux/io.h>
42#include <rdma/ib_verbs.h>
43
44#include "ipath_kernel.h"
45#include "ipath_registers.h"
46#include "ipath_7220.h"
47
48static void ipath_setup_7220_setextled(struct ipath_devdata *, u64, u64);
49
50static unsigned ipath_compat_ddr_negotiate = 1;
51
52module_param_named(compat_ddr_negotiate, ipath_compat_ddr_negotiate, uint,
53 S_IWUSR | S_IRUGO);
54MODULE_PARM_DESC(compat_ddr_negotiate,
55 "Attempt pre-IBTA 1.2 DDR speed negotiation");
56
57static unsigned ipath_sdma_fetch_arb = 1;
58module_param_named(fetch_arb, ipath_sdma_fetch_arb, uint, S_IRUGO);
59MODULE_PARM_DESC(fetch_arb, "IBA7220: change SDMA descriptor arbitration");
60
61/*
62 * This file contains almost all the chip-specific register information and
63 * access functions for the QLogic InfiniPath 7220 PCI-Express chip, with the
64 * exception of SerDes support, which in in ipath_sd7220.c.
65 *
66 * This lists the InfiniPath registers, in the actual chip layout.
67 * This structure should never be directly accessed.
68 */
69struct _infinipath_do_not_use_kernel_regs {
70 unsigned long long Revision;
71 unsigned long long Control;
72 unsigned long long PageAlign;
73 unsigned long long PortCnt;
74 unsigned long long DebugPortSelect;
75 unsigned long long DebugSigsIntSel; /* was Reserved0;*/
76 unsigned long long SendRegBase;
77 unsigned long long UserRegBase;
78 unsigned long long CounterRegBase;
79 unsigned long long Scratch;
80 unsigned long long EEPROMAddrCmd; /* was Reserved1; */
81 unsigned long long EEPROMData; /* was Reserved2; */
82 unsigned long long IntBlocked;
83 unsigned long long IntMask;
84 unsigned long long IntStatus;
85 unsigned long long IntClear;
86 unsigned long long ErrorMask;
87 unsigned long long ErrorStatus;
88 unsigned long long ErrorClear;
89 unsigned long long HwErrMask;
90 unsigned long long HwErrStatus;
91 unsigned long long HwErrClear;
92 unsigned long long HwDiagCtrl;
93 unsigned long long MDIO;
94 unsigned long long IBCStatus;
95 unsigned long long IBCCtrl;
96 unsigned long long ExtStatus;
97 unsigned long long ExtCtrl;
98 unsigned long long GPIOOut;
99 unsigned long long GPIOMask;
100 unsigned long long GPIOStatus;
101 unsigned long long GPIOClear;
102 unsigned long long RcvCtrl;
103 unsigned long long RcvBTHQP;
104 unsigned long long RcvHdrSize;
105 unsigned long long RcvHdrCnt;
106 unsigned long long RcvHdrEntSize;
107 unsigned long long RcvTIDBase;
108 unsigned long long RcvTIDCnt;
109 unsigned long long RcvEgrBase;
110 unsigned long long RcvEgrCnt;
111 unsigned long long RcvBufBase;
112 unsigned long long RcvBufSize;
113 unsigned long long RxIntMemBase;
114 unsigned long long RxIntMemSize;
115 unsigned long long RcvPartitionKey;
116 unsigned long long RcvQPMulticastPort;
117 unsigned long long RcvPktLEDCnt;
118 unsigned long long IBCDDRCtrl;
119 unsigned long long HRTBT_GUID;
120 unsigned long long IB_SDTEST_IF_TX;
121 unsigned long long IB_SDTEST_IF_RX;
122 unsigned long long IBCDDRCtrl2;
123 unsigned long long IBCDDRStatus;
124 unsigned long long JIntReload;
125 unsigned long long IBNCModeCtrl;
126 unsigned long long SendCtrl;
127 unsigned long long SendBufBase;
128 unsigned long long SendBufSize;
129 unsigned long long SendBufCnt;
130 unsigned long long SendAvailAddr;
131 unsigned long long TxIntMemBase;
132 unsigned long long TxIntMemSize;
133 unsigned long long SendDmaBase;
134 unsigned long long SendDmaLenGen;
135 unsigned long long SendDmaTail;
136 unsigned long long SendDmaHead;
137 unsigned long long SendDmaHeadAddr;
138 unsigned long long SendDmaBufMask0;
139 unsigned long long SendDmaBufMask1;
140 unsigned long long SendDmaBufMask2;
141 unsigned long long SendDmaStatus;
142 unsigned long long SendBufferError;
143 unsigned long long SendBufferErrorCONT1;
144 unsigned long long SendBufErr2; /* was Reserved6SBE[0/6] */
145 unsigned long long Reserved6L[2];
146 unsigned long long AvailUpdCount;
147 unsigned long long RcvHdrAddr0;
148 unsigned long long RcvHdrAddrs[16]; /* Why enumerate? */
149 unsigned long long Reserved7hdtl; /* Align next to 300 */
150 unsigned long long RcvHdrTailAddr0; /* 300, like others */
151 unsigned long long RcvHdrTailAddrs[16];
152 unsigned long long Reserved9SW[7]; /* was [8]; we have 17 ports */
153 unsigned long long IbsdEpbAccCtl; /* IB Serdes EPB access control */
154 unsigned long long IbsdEpbTransReg; /* IB Serdes EPB Transaction */
155 unsigned long long Reserved10sds; /* was SerdesStatus on */
156 unsigned long long XGXSConfig;
157 unsigned long long IBSerDesCtrl; /* Was IBPLLCfg on Monty */
158 unsigned long long EEPCtlStat; /* for "boot" EEPROM/FLASH */
159 unsigned long long EEPAddrCmd;
160 unsigned long long EEPData;
161 unsigned long long PcieEpbAccCtl;
162 unsigned long long PcieEpbTransCtl;
163 unsigned long long EfuseCtl; /* E-Fuse control */
164 unsigned long long EfuseData[4];
165 unsigned long long ProcMon;
166 /* this chip moves following two from previous 200, 208 */
167 unsigned long long PCIeRBufTestReg0;
168 unsigned long long PCIeRBufTestReg1;
169 /* added for this chip */
170 unsigned long long PCIeRBufTestReg2;
171 unsigned long long PCIeRBufTestReg3;
172 /* added for this chip, debug only */
173 unsigned long long SPC_JTAG_ACCESS_REG;
174 unsigned long long LAControlReg;
175 unsigned long long GPIODebugSelReg;
176 unsigned long long DebugPortValueReg;
177 /* added for this chip, DMA */
178 unsigned long long SendDmaBufUsed[3];
179 unsigned long long SendDmaReqTagUsed;
180 /*
181 * added for this chip, EFUSE: note that these program 64-bit
182 * words 2 and 3 */
183 unsigned long long efuse_pgm_data[2];
184 unsigned long long Reserved11LAalign[10]; /* Skip 4B0..4F8 */
185 /* we have 30 regs for DDS and RXEQ in IB SERDES */
186 unsigned long long SerDesDDSRXEQ[30];
187 unsigned long long Reserved12LAalign[2]; /* Skip 5F0, 5F8 */
188 /* added for LA debug support */
189 unsigned long long LAMemory[32];
190};
191
192struct _infinipath_do_not_use_counters {
193 __u64 LBIntCnt;
194 __u64 LBFlowStallCnt;
195 __u64 TxSDmaDescCnt; /* was Reserved1 */
196 __u64 TxUnsupVLErrCnt;
197 __u64 TxDataPktCnt;
198 __u64 TxFlowPktCnt;
199 __u64 TxDwordCnt;
200 __u64 TxLenErrCnt;
201 __u64 TxMaxMinLenErrCnt;
202 __u64 TxUnderrunCnt;
203 __u64 TxFlowStallCnt;
204 __u64 TxDroppedPktCnt;
205 __u64 RxDroppedPktCnt;
206 __u64 RxDataPktCnt;
207 __u64 RxFlowPktCnt;
208 __u64 RxDwordCnt;
209 __u64 RxLenErrCnt;
210 __u64 RxMaxMinLenErrCnt;
211 __u64 RxICRCErrCnt;
212 __u64 RxVCRCErrCnt;
213 __u64 RxFlowCtrlErrCnt;
214 __u64 RxBadFormatCnt;
215 __u64 RxLinkProblemCnt;
216 __u64 RxEBPCnt;
217 __u64 RxLPCRCErrCnt;
218 __u64 RxBufOvflCnt;
219 __u64 RxTIDFullErrCnt;
220 __u64 RxTIDValidErrCnt;
221 __u64 RxPKeyMismatchCnt;
222 __u64 RxP0HdrEgrOvflCnt;
223 __u64 RxP1HdrEgrOvflCnt;
224 __u64 RxP2HdrEgrOvflCnt;
225 __u64 RxP3HdrEgrOvflCnt;
226 __u64 RxP4HdrEgrOvflCnt;
227 __u64 RxP5HdrEgrOvflCnt;
228 __u64 RxP6HdrEgrOvflCnt;
229 __u64 RxP7HdrEgrOvflCnt;
230 __u64 RxP8HdrEgrOvflCnt;
231 __u64 RxP9HdrEgrOvflCnt; /* was Reserved6 */
232 __u64 RxP10HdrEgrOvflCnt; /* was Reserved7 */
233 __u64 RxP11HdrEgrOvflCnt; /* new for IBA7220 */
234 __u64 RxP12HdrEgrOvflCnt; /* new for IBA7220 */
235 __u64 RxP13HdrEgrOvflCnt; /* new for IBA7220 */
236 __u64 RxP14HdrEgrOvflCnt; /* new for IBA7220 */
237 __u64 RxP15HdrEgrOvflCnt; /* new for IBA7220 */
238 __u64 RxP16HdrEgrOvflCnt; /* new for IBA7220 */
239 __u64 IBStatusChangeCnt;
240 __u64 IBLinkErrRecoveryCnt;
241 __u64 IBLinkDownedCnt;
242 __u64 IBSymbolErrCnt;
243 /* The following are new for IBA7220 */
244 __u64 RxVL15DroppedPktCnt;
245 __u64 RxOtherLocalPhyErrCnt;
246 __u64 PcieRetryBufDiagQwordCnt;
247 __u64 ExcessBufferOvflCnt;
248 __u64 LocalLinkIntegrityErrCnt;
249 __u64 RxVlErrCnt;
250 __u64 RxDlidFltrCnt;
251 __u64 Reserved8[7];
252 __u64 PSStat;
253 __u64 PSStart;
254 __u64 PSInterval;
255 __u64 PSRcvDataCount;
256 __u64 PSRcvPktsCount;
257 __u64 PSXmitDataCount;
258 __u64 PSXmitPktsCount;
259 __u64 PSXmitWaitCount;
260};
261
262#define IPATH_KREG_OFFSET(field) (offsetof( \
263 struct _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
264#define IPATH_CREG_OFFSET(field) (offsetof( \
265 struct _infinipath_do_not_use_counters, field) / sizeof(u64))
266
267static const struct ipath_kregs ipath_7220_kregs = {
268 .kr_control = IPATH_KREG_OFFSET(Control),
269 .kr_counterregbase = IPATH_KREG_OFFSET(CounterRegBase),
270 .kr_debugportselect = IPATH_KREG_OFFSET(DebugPortSelect),
271 .kr_errorclear = IPATH_KREG_OFFSET(ErrorClear),
272 .kr_errormask = IPATH_KREG_OFFSET(ErrorMask),
273 .kr_errorstatus = IPATH_KREG_OFFSET(ErrorStatus),
274 .kr_extctrl = IPATH_KREG_OFFSET(ExtCtrl),
275 .kr_extstatus = IPATH_KREG_OFFSET(ExtStatus),
276 .kr_gpio_clear = IPATH_KREG_OFFSET(GPIOClear),
277 .kr_gpio_mask = IPATH_KREG_OFFSET(GPIOMask),
278 .kr_gpio_out = IPATH_KREG_OFFSET(GPIOOut),
279 .kr_gpio_status = IPATH_KREG_OFFSET(GPIOStatus),
280 .kr_hwdiagctrl = IPATH_KREG_OFFSET(HwDiagCtrl),
281 .kr_hwerrclear = IPATH_KREG_OFFSET(HwErrClear),
282 .kr_hwerrmask = IPATH_KREG_OFFSET(HwErrMask),
283 .kr_hwerrstatus = IPATH_KREG_OFFSET(HwErrStatus),
284 .kr_ibcctrl = IPATH_KREG_OFFSET(IBCCtrl),
285 .kr_ibcstatus = IPATH_KREG_OFFSET(IBCStatus),
286 .kr_intblocked = IPATH_KREG_OFFSET(IntBlocked),
287 .kr_intclear = IPATH_KREG_OFFSET(IntClear),
288 .kr_intmask = IPATH_KREG_OFFSET(IntMask),
289 .kr_intstatus = IPATH_KREG_OFFSET(IntStatus),
290 .kr_mdio = IPATH_KREG_OFFSET(MDIO),
291 .kr_pagealign = IPATH_KREG_OFFSET(PageAlign),
292 .kr_partitionkey = IPATH_KREG_OFFSET(RcvPartitionKey),
293 .kr_portcnt = IPATH_KREG_OFFSET(PortCnt),
294 .kr_rcvbthqp = IPATH_KREG_OFFSET(RcvBTHQP),
295 .kr_rcvbufbase = IPATH_KREG_OFFSET(RcvBufBase),
296 .kr_rcvbufsize = IPATH_KREG_OFFSET(RcvBufSize),
297 .kr_rcvctrl = IPATH_KREG_OFFSET(RcvCtrl),
298 .kr_rcvegrbase = IPATH_KREG_OFFSET(RcvEgrBase),
299 .kr_rcvegrcnt = IPATH_KREG_OFFSET(RcvEgrCnt),
300 .kr_rcvhdrcnt = IPATH_KREG_OFFSET(RcvHdrCnt),
301 .kr_rcvhdrentsize = IPATH_KREG_OFFSET(RcvHdrEntSize),
302 .kr_rcvhdrsize = IPATH_KREG_OFFSET(RcvHdrSize),
303 .kr_rcvintmembase = IPATH_KREG_OFFSET(RxIntMemBase),
304 .kr_rcvintmemsize = IPATH_KREG_OFFSET(RxIntMemSize),
305 .kr_rcvtidbase = IPATH_KREG_OFFSET(RcvTIDBase),
306 .kr_rcvtidcnt = IPATH_KREG_OFFSET(RcvTIDCnt),
307 .kr_revision = IPATH_KREG_OFFSET(Revision),
308 .kr_scratch = IPATH_KREG_OFFSET(Scratch),
309 .kr_sendbuffererror = IPATH_KREG_OFFSET(SendBufferError),
310 .kr_sendctrl = IPATH_KREG_OFFSET(SendCtrl),
311 .kr_sendpioavailaddr = IPATH_KREG_OFFSET(SendAvailAddr),
312 .kr_sendpiobufbase = IPATH_KREG_OFFSET(SendBufBase),
313 .kr_sendpiobufcnt = IPATH_KREG_OFFSET(SendBufCnt),
314 .kr_sendpiosize = IPATH_KREG_OFFSET(SendBufSize),
315 .kr_sendregbase = IPATH_KREG_OFFSET(SendRegBase),
316 .kr_txintmembase = IPATH_KREG_OFFSET(TxIntMemBase),
317 .kr_txintmemsize = IPATH_KREG_OFFSET(TxIntMemSize),
318 .kr_userregbase = IPATH_KREG_OFFSET(UserRegBase),
319
320 .kr_xgxsconfig = IPATH_KREG_OFFSET(XGXSConfig),
321
322 /* send dma related regs */
323 .kr_senddmabase = IPATH_KREG_OFFSET(SendDmaBase),
324 .kr_senddmalengen = IPATH_KREG_OFFSET(SendDmaLenGen),
325 .kr_senddmatail = IPATH_KREG_OFFSET(SendDmaTail),
326 .kr_senddmahead = IPATH_KREG_OFFSET(SendDmaHead),
327 .kr_senddmaheadaddr = IPATH_KREG_OFFSET(SendDmaHeadAddr),
328 .kr_senddmabufmask0 = IPATH_KREG_OFFSET(SendDmaBufMask0),
329 .kr_senddmabufmask1 = IPATH_KREG_OFFSET(SendDmaBufMask1),
330 .kr_senddmabufmask2 = IPATH_KREG_OFFSET(SendDmaBufMask2),
331 .kr_senddmastatus = IPATH_KREG_OFFSET(SendDmaStatus),
332
333 /* SerDes related regs */
334 .kr_ibserdesctrl = IPATH_KREG_OFFSET(IBSerDesCtrl),
335 .kr_ib_epbacc = IPATH_KREG_OFFSET(IbsdEpbAccCtl),
336 .kr_ib_epbtrans = IPATH_KREG_OFFSET(IbsdEpbTransReg),
337 .kr_pcie_epbacc = IPATH_KREG_OFFSET(PcieEpbAccCtl),
338 .kr_pcie_epbtrans = IPATH_KREG_OFFSET(PcieEpbTransCtl),
339 .kr_ib_ddsrxeq = IPATH_KREG_OFFSET(SerDesDDSRXEQ),
340
341 /*
342 * These should not be used directly via ipath_read_kreg64(),
343 * use them with ipath_read_kreg64_port()
344 */
345 .kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0),
346 .kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0),
347
348 /*
349 * The rcvpktled register controls one of the debug port signals, so
350 * a packet activity LED can be connected to it.
351 */
352 .kr_rcvpktledcnt = IPATH_KREG_OFFSET(RcvPktLEDCnt),
353 .kr_pcierbuftestreg0 = IPATH_KREG_OFFSET(PCIeRBufTestReg0),
354 .kr_pcierbuftestreg1 = IPATH_KREG_OFFSET(PCIeRBufTestReg1),
355
356 .kr_hrtbt_guid = IPATH_KREG_OFFSET(HRTBT_GUID),
357 .kr_ibcddrctrl = IPATH_KREG_OFFSET(IBCDDRCtrl),
358 .kr_ibcddrstatus = IPATH_KREG_OFFSET(IBCDDRStatus),
359 .kr_jintreload = IPATH_KREG_OFFSET(JIntReload)
360};
361
362static const struct ipath_cregs ipath_7220_cregs = {
363 .cr_badformatcnt = IPATH_CREG_OFFSET(RxBadFormatCnt),
364 .cr_erricrccnt = IPATH_CREG_OFFSET(RxICRCErrCnt),
365 .cr_errlinkcnt = IPATH_CREG_OFFSET(RxLinkProblemCnt),
366 .cr_errlpcrccnt = IPATH_CREG_OFFSET(RxLPCRCErrCnt),
367 .cr_errpkey = IPATH_CREG_OFFSET(RxPKeyMismatchCnt),
368 .cr_errrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowCtrlErrCnt),
369 .cr_err_rlencnt = IPATH_CREG_OFFSET(RxLenErrCnt),
370 .cr_errslencnt = IPATH_CREG_OFFSET(TxLenErrCnt),
371 .cr_errtidfull = IPATH_CREG_OFFSET(RxTIDFullErrCnt),
372 .cr_errtidvalid = IPATH_CREG_OFFSET(RxTIDValidErrCnt),
373 .cr_errvcrccnt = IPATH_CREG_OFFSET(RxVCRCErrCnt),
374 .cr_ibstatuschange = IPATH_CREG_OFFSET(IBStatusChangeCnt),
375 .cr_intcnt = IPATH_CREG_OFFSET(LBIntCnt),
376 .cr_invalidrlencnt = IPATH_CREG_OFFSET(RxMaxMinLenErrCnt),
377 .cr_invalidslencnt = IPATH_CREG_OFFSET(TxMaxMinLenErrCnt),
378 .cr_lbflowstallcnt = IPATH_CREG_OFFSET(LBFlowStallCnt),
379 .cr_pktrcvcnt = IPATH_CREG_OFFSET(RxDataPktCnt),
380 .cr_pktrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowPktCnt),
381 .cr_pktsendcnt = IPATH_CREG_OFFSET(TxDataPktCnt),
382 .cr_pktsendflowcnt = IPATH_CREG_OFFSET(TxFlowPktCnt),
383 .cr_portovflcnt = IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt),
384 .cr_rcvebpcnt = IPATH_CREG_OFFSET(RxEBPCnt),
385 .cr_rcvovflcnt = IPATH_CREG_OFFSET(RxBufOvflCnt),
386 .cr_senddropped = IPATH_CREG_OFFSET(TxDroppedPktCnt),
387 .cr_sendstallcnt = IPATH_CREG_OFFSET(TxFlowStallCnt),
388 .cr_sendunderruncnt = IPATH_CREG_OFFSET(TxUnderrunCnt),
389 .cr_wordrcvcnt = IPATH_CREG_OFFSET(RxDwordCnt),
390 .cr_wordsendcnt = IPATH_CREG_OFFSET(TxDwordCnt),
391 .cr_unsupvlcnt = IPATH_CREG_OFFSET(TxUnsupVLErrCnt),
392 .cr_rxdroppktcnt = IPATH_CREG_OFFSET(RxDroppedPktCnt),
393 .cr_iblinkerrrecovcnt = IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt),
394 .cr_iblinkdowncnt = IPATH_CREG_OFFSET(IBLinkDownedCnt),
395 .cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt),
396 .cr_vl15droppedpktcnt = IPATH_CREG_OFFSET(RxVL15DroppedPktCnt),
397 .cr_rxotherlocalphyerrcnt =
398 IPATH_CREG_OFFSET(RxOtherLocalPhyErrCnt),
399 .cr_excessbufferovflcnt = IPATH_CREG_OFFSET(ExcessBufferOvflCnt),
400 .cr_locallinkintegrityerrcnt =
401 IPATH_CREG_OFFSET(LocalLinkIntegrityErrCnt),
402 .cr_rxvlerrcnt = IPATH_CREG_OFFSET(RxVlErrCnt),
403 .cr_rxdlidfltrcnt = IPATH_CREG_OFFSET(RxDlidFltrCnt),
404 .cr_psstat = IPATH_CREG_OFFSET(PSStat),
405 .cr_psstart = IPATH_CREG_OFFSET(PSStart),
406 .cr_psinterval = IPATH_CREG_OFFSET(PSInterval),
407 .cr_psrcvdatacount = IPATH_CREG_OFFSET(PSRcvDataCount),
408 .cr_psrcvpktscount = IPATH_CREG_OFFSET(PSRcvPktsCount),
409 .cr_psxmitdatacount = IPATH_CREG_OFFSET(PSXmitDataCount),
410 .cr_psxmitpktscount = IPATH_CREG_OFFSET(PSXmitPktsCount),
411 .cr_psxmitwaitcount = IPATH_CREG_OFFSET(PSXmitWaitCount),
412};
413
414/* kr_control bits */
415#define INFINIPATH_C_RESET (1U<<7)
416
417/* kr_intstatus, kr_intclear, kr_intmask bits */
418#define INFINIPATH_I_RCVURG_MASK ((1ULL<<17)-1)
419#define INFINIPATH_I_RCVURG_SHIFT 32
420#define INFINIPATH_I_RCVAVAIL_MASK ((1ULL<<17)-1)
421#define INFINIPATH_I_RCVAVAIL_SHIFT 0
422#define INFINIPATH_I_SERDESTRIMDONE (1ULL<<27)
423
424/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
425#define INFINIPATH_HWE_PCIEMEMPARITYERR_MASK 0x00000000000000ffULL
426#define INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT 0
427#define INFINIPATH_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL
428#define INFINIPATH_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL
429#define INFINIPATH_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL
430#define INFINIPATH_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL
431#define INFINIPATH_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL
432#define INFINIPATH_HWE_COREPLL_FBSLIP 0x0080000000000000ULL
433#define INFINIPATH_HWE_COREPLL_RFSLIP 0x0100000000000000ULL
434#define INFINIPATH_HWE_PCIE1PLLFAILED 0x0400000000000000ULL
435#define INFINIPATH_HWE_PCIE0PLLFAILED 0x0800000000000000ULL
436#define INFINIPATH_HWE_SERDESPLLFAILED 0x1000000000000000ULL
437/* specific to this chip */
438#define INFINIPATH_HWE_PCIECPLDATAQUEUEERR 0x0000000000000040ULL
439#define INFINIPATH_HWE_PCIECPLHDRQUEUEERR 0x0000000000000080ULL
440#define INFINIPATH_HWE_SDMAMEMREADERR 0x0000000010000000ULL
441#define INFINIPATH_HWE_CLK_UC_PLLNOTLOCKED 0x2000000000000000ULL
442#define INFINIPATH_HWE_PCIESERDESQ0PCLKNOTDETECT 0x0100000000000000ULL
443#define INFINIPATH_HWE_PCIESERDESQ1PCLKNOTDETECT 0x0200000000000000ULL
444#define INFINIPATH_HWE_PCIESERDESQ2PCLKNOTDETECT 0x0400000000000000ULL
445#define INFINIPATH_HWE_PCIESERDESQ3PCLKNOTDETECT 0x0800000000000000ULL
446#define INFINIPATH_HWE_DDSRXEQMEMORYPARITYERR 0x0000008000000000ULL
447#define INFINIPATH_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL
448#define INFINIPATH_HWE_PCIE_UC_OCT0MEMORYPARITYERR 0x0000001000000000ULL
449#define INFINIPATH_HWE_PCIE_UC_OCT1MEMORYPARITYERR 0x0000002000000000ULL
450
451#define IBA7220_IBCS_LINKTRAININGSTATE_MASK 0x1F
452#define IBA7220_IBCS_LINKSTATE_SHIFT 5
453#define IBA7220_IBCS_LINKSPEED_SHIFT 8
454#define IBA7220_IBCS_LINKWIDTH_SHIFT 9
455
456#define IBA7220_IBCC_LINKINITCMD_MASK 0x7ULL
457#define IBA7220_IBCC_LINKCMD_SHIFT 19
458#define IBA7220_IBCC_MAXPKTLEN_SHIFT 21
459
460/* kr_ibcddrctrl bits */
461#define IBA7220_IBC_DLIDLMC_MASK 0xFFFFFFFFUL
462#define IBA7220_IBC_DLIDLMC_SHIFT 32
463#define IBA7220_IBC_HRTBT_MASK 3
464#define IBA7220_IBC_HRTBT_SHIFT 16
465#define IBA7220_IBC_HRTBT_ENB 0x10000UL
466#define IBA7220_IBC_LANE_REV_SUPPORTED (1<<8)
467#define IBA7220_IBC_LREV_MASK 1
468#define IBA7220_IBC_LREV_SHIFT 8
469#define IBA7220_IBC_RXPOL_MASK 1
470#define IBA7220_IBC_RXPOL_SHIFT 7
471#define IBA7220_IBC_WIDTH_SHIFT 5
472#define IBA7220_IBC_WIDTH_MASK 0x3
473#define IBA7220_IBC_WIDTH_1X_ONLY (0<<IBA7220_IBC_WIDTH_SHIFT)
474#define IBA7220_IBC_WIDTH_4X_ONLY (1<<IBA7220_IBC_WIDTH_SHIFT)
475#define IBA7220_IBC_WIDTH_AUTONEG (2<<IBA7220_IBC_WIDTH_SHIFT)
476#define IBA7220_IBC_SPEED_AUTONEG (1<<1)
477#define IBA7220_IBC_SPEED_SDR (1<<2)
478#define IBA7220_IBC_SPEED_DDR (1<<3)
479#define IBA7220_IBC_SPEED_AUTONEG_MASK (0x7<<1)
480#define IBA7220_IBC_IBTA_1_2_MASK (1)
481
482/* kr_ibcddrstatus */
483/* link latency shift is 0, don't bother defining */
484#define IBA7220_DDRSTAT_LINKLAT_MASK 0x3ffffff
485
486/* kr_extstatus bits */
487#define INFINIPATH_EXTS_FREQSEL 0x2
488#define INFINIPATH_EXTS_SERDESSEL 0x4
489#define INFINIPATH_EXTS_MEMBIST_ENDTEST 0x0000000000004000
490#define INFINIPATH_EXTS_MEMBIST_DISABLED 0x0000000000008000
491
492/* kr_xgxsconfig bits */
493#define INFINIPATH_XGXS_RESET 0x5ULL
494#define INFINIPATH_XGXS_FC_SAFE (1ULL<<63)
495
496/* kr_rcvpktledcnt */
497#define IBA7220_LEDBLINK_ON_SHIFT 32 /* 4ns period on after packet */
498#define IBA7220_LEDBLINK_OFF_SHIFT 0 /* 4ns period off before next on */
499
500#define _IPATH_GPIO_SDA_NUM 1
501#define _IPATH_GPIO_SCL_NUM 0
502
503#define IPATH_GPIO_SDA (1ULL << \
504 (_IPATH_GPIO_SDA_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
505#define IPATH_GPIO_SCL (1ULL << \
506 (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
507
508#define IBA7220_R_INTRAVAIL_SHIFT 17
509#define IBA7220_R_TAILUPD_SHIFT 35
510#define IBA7220_R_PORTCFG_SHIFT 36
511
512#define INFINIPATH_JINT_PACKETSHIFT 16
513#define INFINIPATH_JINT_DEFAULT_IDLE_TICKS 0
514#define INFINIPATH_JINT_DEFAULT_MAX_PACKETS 0
515
516#define IBA7220_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
517
518/*
519 * the size bits give us 2^N, in KB units. 0 marks as invalid,
520 * and 7 is reserved. We currently use only 2KB and 4KB
521 */
522#define IBA7220_TID_SZ_SHIFT 37 /* shift to 3bit size selector */
523#define IBA7220_TID_SZ_2K (1UL<<IBA7220_TID_SZ_SHIFT) /* 2KB */
524#define IBA7220_TID_SZ_4K (2UL<<IBA7220_TID_SZ_SHIFT) /* 4KB */
525#define IBA7220_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
526
527#define IPATH_AUTONEG_TRIES 5 /* sequential retries to negotiate DDR */
528
529static char int_type[16] = "auto";
530module_param_string(interrupt_type, int_type, sizeof(int_type), 0444);
531MODULE_PARM_DESC(int_type, " interrupt_type=auto|force_msi|force_intx\n");
532
533/* packet rate matching delay; chip has support */
534static u8 rate_to_delay[2][2] = {
535 /* 1x, 4x */
536 { 8, 2 }, /* SDR */
537 { 4, 1 } /* DDR */
538};
539
540/* 7220 specific hardware errors... */
541static const struct ipath_hwerror_msgs ipath_7220_hwerror_msgs[] = {
542 INFINIPATH_HWE_MSG(PCIEPOISONEDTLP, "PCIe Poisoned TLP"),
543 INFINIPATH_HWE_MSG(PCIECPLTIMEOUT, "PCIe completion timeout"),
544 /*
545 * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
546 * parity or memory parity error failures, because most likely we
547 * won't be able to talk to the core of the chip. Nonetheless, we
548 * might see them, if they are in parts of the PCIe core that aren't
549 * essential.
550 */
551 INFINIPATH_HWE_MSG(PCIE1PLLFAILED, "PCIePLL1"),
552 INFINIPATH_HWE_MSG(PCIE0PLLFAILED, "PCIePLL0"),
553 INFINIPATH_HWE_MSG(PCIEBUSPARITYXTLH, "PCIe XTLH core parity"),
554 INFINIPATH_HWE_MSG(PCIEBUSPARITYXADM, "PCIe ADM TX core parity"),
555 INFINIPATH_HWE_MSG(PCIEBUSPARITYRADM, "PCIe ADM RX core parity"),
556 INFINIPATH_HWE_MSG(RXDSYNCMEMPARITYERR, "Rx Dsync"),
557 INFINIPATH_HWE_MSG(SERDESPLLFAILED, "SerDes PLL"),
558 INFINIPATH_HWE_MSG(PCIECPLDATAQUEUEERR, "PCIe cpl header queue"),
559 INFINIPATH_HWE_MSG(PCIECPLHDRQUEUEERR, "PCIe cpl data queue"),
560 INFINIPATH_HWE_MSG(SDMAMEMREADERR, "Send DMA memory read"),
561 INFINIPATH_HWE_MSG(CLK_UC_PLLNOTLOCKED, "uC PLL clock not locked"),
562 INFINIPATH_HWE_MSG(PCIESERDESQ0PCLKNOTDETECT,
563 "PCIe serdes Q0 no clock"),
564 INFINIPATH_HWE_MSG(PCIESERDESQ1PCLKNOTDETECT,
565 "PCIe serdes Q1 no clock"),
566 INFINIPATH_HWE_MSG(PCIESERDESQ2PCLKNOTDETECT,
567 "PCIe serdes Q2 no clock"),
568 INFINIPATH_HWE_MSG(PCIESERDESQ3PCLKNOTDETECT,
569 "PCIe serdes Q3 no clock"),
570 INFINIPATH_HWE_MSG(DDSRXEQMEMORYPARITYERR,
571 "DDS RXEQ memory parity"),
572 INFINIPATH_HWE_MSG(IB_UC_MEMORYPARITYERR, "IB uC memory parity"),
573 INFINIPATH_HWE_MSG(PCIE_UC_OCT0MEMORYPARITYERR,
574 "PCIe uC oct0 memory parity"),
575 INFINIPATH_HWE_MSG(PCIE_UC_OCT1MEMORYPARITYERR,
576 "PCIe uC oct1 memory parity"),
577};
578
579static void autoneg_work(struct work_struct *);
580
581/*
582 * the offset is different for different configured port numbers, since
583 * port0 is fixed in size, but others can vary. Make it a function to
584 * make the issue more obvious.
585*/
586static inline u32 port_egrtid_idx(struct ipath_devdata *dd, unsigned port)
587{
588 return port ? dd->ipath_p0_rcvegrcnt +
589 (port-1) * dd->ipath_rcvegrcnt : 0;
590}
591
592static void ipath_7220_txe_recover(struct ipath_devdata *dd)
593{
594 ++ipath_stats.sps_txeparity;
595
596 dev_info(&dd->pcidev->dev,
597 "Recovering from TXE PIO parity error\n");
598 ipath_disarm_senderrbufs(dd, 1);
599}
600
601
602/**
603 * ipath_7220_handle_hwerrors - display hardware errors.
604 * @dd: the infinipath device
605 * @msg: the output buffer
606 * @msgl: the size of the output buffer
607 *
608 * Use same msg buffer as regular errors to avoid excessive stack
609 * use. Most hardware errors are catastrophic, but for right now,
610 * we'll print them and continue. We reuse the same message buffer as
611 * ipath_handle_errors() to avoid excessive stack usage.
612 */
613static void ipath_7220_handle_hwerrors(struct ipath_devdata *dd, char *msg,
614 size_t msgl)
615{
616 ipath_err_t hwerrs;
617 u32 bits, ctrl;
618 int isfatal = 0;
619 char bitsmsg[64];
620 int log_idx;
621
622 hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
623 if (!hwerrs) {
624 /*
625 * better than printing cofusing messages
626 * This seems to be related to clearing the crc error, or
627 * the pll error during init.
628 */
629 ipath_cdbg(VERBOSE, "Called but no hardware errors set\n");
630 goto bail;
631 } else if (hwerrs == ~0ULL) {
632 ipath_dev_err(dd, "Read of hardware error status failed "
633 "(all bits set); ignoring\n");
634 goto bail;
635 }
636 ipath_stats.sps_hwerrs++;
637
638 /*
639 * Always clear the error status register, except MEMBISTFAIL,
640 * regardless of whether we continue or stop using the chip.
641 * We want that set so we know it failed, even across driver reload.
642 * We'll still ignore it in the hwerrmask. We do this partly for
643 * diagnostics, but also for support.
644 */
645 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
646 hwerrs&~INFINIPATH_HWE_MEMBISTFAILED);
647
648 hwerrs &= dd->ipath_hwerrmask;
649
650 /* We log some errors to EEPROM, check if we have any of those. */
651 for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx)
652 if (hwerrs & dd->ipath_eep_st_masks[log_idx].hwerrs_to_log)
653 ipath_inc_eeprom_err(dd, log_idx, 1);
654 /*
655 * Make sure we get this much out, unless told to be quiet,
656 * or it's occurred within the last 5 seconds.
657 */
658 if ((hwerrs & ~(dd->ipath_lasthwerror |
659 ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
660 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
661 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT))) ||
662 (ipath_debug & __IPATH_VERBDBG))
663 dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx "
664 "(cleared)\n", (unsigned long long) hwerrs);
665 dd->ipath_lasthwerror |= hwerrs;
666
667 if (hwerrs & ~dd->ipath_hwe_bitsextant)
668 ipath_dev_err(dd, "hwerror interrupt with unknown errors "
669 "%llx set\n", (unsigned long long)
670 (hwerrs & ~dd->ipath_hwe_bitsextant));
671
672 if (hwerrs & INFINIPATH_HWE_IB_UC_MEMORYPARITYERR)
673 ipath_sd7220_clr_ibpar(dd);
674
675 ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
676 if ((ctrl & INFINIPATH_C_FREEZEMODE) && !ipath_diag_inuse) {
677 /*
678 * Parity errors in send memory are recoverable,
679 * just cancel the send (if indicated in * sendbuffererror),
680 * count the occurrence, unfreeze (if no other handled
681 * hardware error bits are set), and continue.
682 */
683 if (hwerrs & ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
684 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
685 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)) {
686 ipath_7220_txe_recover(dd);
687 hwerrs &= ~((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
688 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
689 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT);
690 if (!hwerrs) {
691 /* else leave in freeze mode */
692 ipath_write_kreg(dd,
693 dd->ipath_kregs->kr_control,
694 dd->ipath_control);
695 goto bail;
696 }
697 }
698 if (hwerrs) {
699 /*
700 * If any set that we aren't ignoring only make the
701 * complaint once, in case it's stuck or recurring,
702 * and we get here multiple times
703 * Force link down, so switch knows, and
704 * LEDs are turned off.
705 */
706 if (dd->ipath_flags & IPATH_INITTED) {
707 ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
708 ipath_setup_7220_setextled(dd,
709 INFINIPATH_IBCS_L_STATE_DOWN,
710 INFINIPATH_IBCS_LT_STATE_DISABLED);
711 ipath_dev_err(dd, "Fatal Hardware Error "
712 "(freeze mode), no longer"
713 " usable, SN %.16s\n",
714 dd->ipath_serial);
715 isfatal = 1;
716 }
717 /*
718 * Mark as having had an error for driver, and also
719 * for /sys and status word mapped to user programs.
720 * This marks unit as not usable, until reset.
721 */
722 *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
723 *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
724 dd->ipath_flags &= ~IPATH_INITTED;
725 } else {
726 ipath_dbg("Clearing freezemode on ignored hardware "
727 "error\n");
728 ipath_clear_freeze(dd);
729 }
730 }
731
732 *msg = '\0';
733
734 if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) {
735 strlcat(msg, "[Memory BIST test failed, "
736 "InfiniPath hardware unusable]", msgl);
737 /* ignore from now on, so disable until driver reloaded */
738 *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
739 dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED;
740 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
741 dd->ipath_hwerrmask);
742 }
743
744 ipath_format_hwerrors(hwerrs,
745 ipath_7220_hwerror_msgs,
746 ARRAY_SIZE(ipath_7220_hwerror_msgs),
747 msg, msgl);
748
749 if (hwerrs & (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK
750 << INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT)) {
751 bits = (u32) ((hwerrs >>
752 INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) &
753 INFINIPATH_HWE_PCIEMEMPARITYERR_MASK);
754 snprintf(bitsmsg, sizeof bitsmsg,
755 "[PCIe Mem Parity Errs %x] ", bits);
756 strlcat(msg, bitsmsg, msgl);
757 }
758
759#define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP | \
760 INFINIPATH_HWE_COREPLL_RFSLIP)
761
762 if (hwerrs & _IPATH_PLL_FAIL) {
763 snprintf(bitsmsg, sizeof bitsmsg,
764 "[PLL failed (%llx), InfiniPath hardware unusable]",
765 (unsigned long long) hwerrs & _IPATH_PLL_FAIL);
766 strlcat(msg, bitsmsg, msgl);
767 /* ignore from now on, so disable until driver reloaded */
768 dd->ipath_hwerrmask &= ~(hwerrs & _IPATH_PLL_FAIL);
769 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
770 dd->ipath_hwerrmask);
771 }
772
773 if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) {
774 /*
775 * If it occurs, it is left masked since the eternal
776 * interface is unused.
777 */
778 dd->ipath_hwerrmask &= ~INFINIPATH_HWE_SERDESPLLFAILED;
779 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
780 dd->ipath_hwerrmask);
781 }
782
783 ipath_dev_err(dd, "%s hardware error\n", msg);
784 /*
785 * For /sys status file. if no trailing } is copied, we'll
786 * know it was truncated.
787 */
788 if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg)
789 snprintf(dd->ipath_freezemsg, dd->ipath_freezelen,
790 "{%s}", msg);
791bail:;
792}
793
794/**
795 * ipath_7220_boardname - fill in the board name
796 * @dd: the infinipath device
797 * @name: the output buffer
798 * @namelen: the size of the output buffer
799 *
800 * info is based on the board revision register
801 */
802static int ipath_7220_boardname(struct ipath_devdata *dd, char *name,
803 size_t namelen)
804{
805 char *n = NULL;
806 u8 boardrev = dd->ipath_boardrev;
807 int ret;
808
809 if (boardrev == 15) {
810 /*
811 * Emulator sometimes comes up all-ones, rather than zero.
812 */
813 boardrev = 0;
814 dd->ipath_boardrev = boardrev;
815 }
816 switch (boardrev) {
817 case 0:
818 n = "InfiniPath_7220_Emulation";
819 break;
820 case 1:
821 n = "InfiniPath_QLE7240";
822 break;
823 case 2:
824 n = "InfiniPath_QLE7280";
825 break;
826 case 3:
827 n = "InfiniPath_QLE7242";
828 break;
829 case 4:
830 n = "InfiniPath_QEM7240";
831 break;
832 case 5:
833 n = "InfiniPath_QMI7240";
834 break;
835 case 6:
836 n = "InfiniPath_QMI7264";
837 break;
838 case 7:
839 n = "InfiniPath_QMH7240";
840 break;
841 case 8:
842 n = "InfiniPath_QME7240";
843 break;
844 case 9:
845 n = "InfiniPath_QLE7250";
846 break;
847 case 10:
848 n = "InfiniPath_QLE7290";
849 break;
850 case 11:
851 n = "InfiniPath_QEM7250";
852 break;
853 case 12:
854 n = "InfiniPath_QLE-Bringup";
855 break;
856 default:
857 ipath_dev_err(dd,
858 "Don't yet know about board with ID %u\n",
859 boardrev);
860 snprintf(name, namelen, "Unknown_InfiniPath_PCIe_%u",
861 boardrev);
862 break;
863 }
864 if (n)
865 snprintf(name, namelen, "%s", n);
866
867 if (dd->ipath_majrev != 5 || !dd->ipath_minrev ||
868 dd->ipath_minrev > 2) {
869 ipath_dev_err(dd, "Unsupported InfiniPath hardware "
870 "revision %u.%u!\n",
871 dd->ipath_majrev, dd->ipath_minrev);
872 ret = 1;
873 } else if (dd->ipath_minrev == 1) {
874 /* Rev1 chips are prototype. Complain, but allow use */
875 ipath_dev_err(dd, "Unsupported hardware "
876 "revision %u.%u, Contact support@qlogic.com\n",
877 dd->ipath_majrev, dd->ipath_minrev);
878 ret = 0;
879 } else
880 ret = 0;
881
882 /*
883 * Set here not in ipath_init_*_funcs because we have to do
884 * it after we can read chip registers.
885 */
886 dd->ipath_ureg_align = 0x10000; /* 64KB alignment */
887
888 return ret;
889}
890
891/**
892 * ipath_7220_init_hwerrors - enable hardware errors
893 * @dd: the infinipath device
894 *
895 * now that we have finished initializing everything that might reasonably
896 * cause a hardware error, and cleared those errors bits as they occur,
897 * we can enable hardware errors in the mask (potentially enabling
898 * freeze mode), and enable hardware errors as errors (along with
899 * everything else) in errormask
900 */
901static void ipath_7220_init_hwerrors(struct ipath_devdata *dd)
902{
903 ipath_err_t val;
904 u64 extsval;
905
906 extsval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
907
908 if (!(extsval & (INFINIPATH_EXTS_MEMBIST_ENDTEST |
909 INFINIPATH_EXTS_MEMBIST_DISABLED)))
910 ipath_dev_err(dd, "MemBIST did not complete!\n");
911 if (extsval & INFINIPATH_EXTS_MEMBIST_DISABLED)
912 dev_info(&dd->pcidev->dev, "MemBIST is disabled.\n");
913
914 val = ~0ULL; /* barring bugs, all hwerrors become interrupts, */
915
916 if (!dd->ipath_boardrev) /* no PLL for Emulator */
917 val &= ~INFINIPATH_HWE_SERDESPLLFAILED;
918
919 if (dd->ipath_minrev == 1)
920 val &= ~(1ULL << 42); /* TXE LaunchFIFO Parity rev1 issue */
921
922 val &= ~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR;
923 dd->ipath_hwerrmask = val;
924
925 /*
926 * special trigger "error" is for debugging purposes. It
927 * works around a processor/chipset problem. The error
928 * interrupt allows us to count occurrences, but we don't
929 * want to pay the overhead for normal use. Emulation only
930 */
931 if (!dd->ipath_boardrev)
932 dd->ipath_maskederrs = INFINIPATH_E_SENDSPECIALTRIGGER;
933}
934
935/*
936 * All detailed interaction with the SerDes has been moved to ipath_sd7220.c
937 *
938 * The portion of IBA7220-specific bringup_serdes() that actually deals with
939 * registers and memory within the SerDes itself is ipath_sd7220_init().
940 */
941
942/**
943 * ipath_7220_bringup_serdes - bring up the serdes
944 * @dd: the infinipath device
945 */
946static int ipath_7220_bringup_serdes(struct ipath_devdata *dd)
947{
948 int ret = 0;
949 u64 val, prev_val, guid;
950 int was_reset; /* Note whether uC was reset */
951
952 ipath_dbg("Trying to bringup serdes\n");
953
954 if (ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus) &
955 INFINIPATH_HWE_SERDESPLLFAILED) {
956 ipath_dbg("At start, serdes PLL failed bit set "
957 "in hwerrstatus, clearing and continuing\n");
958 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
959 INFINIPATH_HWE_SERDESPLLFAILED);
960 }
961
962 if (!dd->ipath_ibcddrctrl) {
963 /* not on re-init after reset */
964 dd->ipath_ibcddrctrl =
965 ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcddrctrl);
966
967 if (dd->ipath_link_speed_enabled ==
968 (IPATH_IB_SDR | IPATH_IB_DDR))
969 dd->ipath_ibcddrctrl |=
970 IBA7220_IBC_SPEED_AUTONEG_MASK |
971 IBA7220_IBC_IBTA_1_2_MASK;
972 else
973 dd->ipath_ibcddrctrl |=
974 dd->ipath_link_speed_enabled == IPATH_IB_DDR
975 ? IBA7220_IBC_SPEED_DDR :
976 IBA7220_IBC_SPEED_SDR;
977 if ((dd->ipath_link_width_enabled & (IB_WIDTH_1X |
978 IB_WIDTH_4X)) == (IB_WIDTH_1X | IB_WIDTH_4X))
979 dd->ipath_ibcddrctrl |= IBA7220_IBC_WIDTH_AUTONEG;
980 else
981 dd->ipath_ibcddrctrl |=
982 dd->ipath_link_width_enabled == IB_WIDTH_4X
983 ? IBA7220_IBC_WIDTH_4X_ONLY :
984 IBA7220_IBC_WIDTH_1X_ONLY;
985
986 /* always enable these on driver reload, not sticky */
987 dd->ipath_ibcddrctrl |=
988 IBA7220_IBC_RXPOL_MASK << IBA7220_IBC_RXPOL_SHIFT;
989 dd->ipath_ibcddrctrl |=
990 IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT;
991 /*
992 * automatic lane reversal detection for receive
993 * doesn't work correctly in rev 1, so disable it
994 * on that rev, otherwise enable (disabling not
995 * sticky across reload for >rev1)
996 */
997 if (dd->ipath_minrev == 1)
998 dd->ipath_ibcddrctrl &=
999 ~IBA7220_IBC_LANE_REV_SUPPORTED;
1000 else
1001 dd->ipath_ibcddrctrl |=
1002 IBA7220_IBC_LANE_REV_SUPPORTED;
1003 }
1004
1005 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcddrctrl,
1006 dd->ipath_ibcddrctrl);
1007
1008 ipath_write_kreg(dd, IPATH_KREG_OFFSET(IBNCModeCtrl), 0Ull);
1009
1010 /* IBA7220 has SERDES MPU reset in D0 of what _was_ IBPLLCfg */
1011 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibserdesctrl);
1012 /* remember if uC was in Reset or not, for dactrim */
1013 was_reset = (val & 1);
1014 ipath_cdbg(VERBOSE, "IBReset %s xgxsconfig %llx\n",
1015 was_reset ? "Asserted" : "Negated", (unsigned long long)
1016 ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
1017
1018 if (dd->ipath_boardrev) {
1019 /*
1020 * Hardware is not emulator, and may have been reset. Init it.
1021 * Below will release reset, but needs to know if chip was
1022 * originally in reset, to only trim DACs on first time
1023 * after chip reset or powercycle (not driver reload)
1024 */
1025 ret = ipath_sd7220_init(dd, was_reset);
1026 }
1027
1028 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
1029 prev_val = val;
1030 val |= INFINIPATH_XGXS_FC_SAFE;
1031 if (val != prev_val) {
1032 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
1033 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
1034 }
1035 if (val & INFINIPATH_XGXS_RESET)
1036 val &= ~INFINIPATH_XGXS_RESET;
1037 if (val != prev_val)
1038 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
1039
1040 ipath_cdbg(VERBOSE, "done: xgxs=%llx from %llx\n",
1041 (unsigned long long)
1042 ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig),
1043 prev_val);
1044
1045 guid = be64_to_cpu(dd->ipath_guid);
1046
1047 if (!guid) {
1048 /* have to have something, so use likely unique tsc */
1049 guid = get_cycles();
1050 ipath_dbg("No GUID for heartbeat, faking %llx\n",
1051 (unsigned long long)guid);
1052 } else
1053 ipath_cdbg(VERBOSE, "Wrote %llX to HRTBT_GUID\n", guid);
1054 ipath_write_kreg(dd, dd->ipath_kregs->kr_hrtbt_guid, guid);
1055 return ret;
1056}
1057
1058static void ipath_7220_config_jint(struct ipath_devdata *dd,
1059 u16 idle_ticks, u16 max_packets)
1060{
1061
1062 /*
1063 * We can request a receive interrupt for 1 or more packets
1064 * from current offset.
1065 */
1066 if (idle_ticks == 0 || max_packets == 0)
1067 /* interrupt after one packet if no mitigation */
1068 dd->ipath_rhdrhead_intr_off =
1069 1ULL << IBA7220_HDRHEAD_PKTINT_SHIFT;
1070 else
1071 /* Turn off RcvHdrHead interrupts if using mitigation */
1072 dd->ipath_rhdrhead_intr_off = 0ULL;
1073
1074 /* refresh kernel RcvHdrHead registers... */
1075 ipath_write_ureg(dd, ur_rcvhdrhead,
1076 dd->ipath_rhdrhead_intr_off |
1077 dd->ipath_pd[0]->port_head, 0);
1078
1079 dd->ipath_jint_max_packets = max_packets;
1080 dd->ipath_jint_idle_ticks = idle_ticks;
1081 ipath_write_kreg(dd, dd->ipath_kregs->kr_jintreload,
1082 ((u64) max_packets << INFINIPATH_JINT_PACKETSHIFT) |
1083 idle_ticks);
1084}
1085
1086/**
1087 * ipath_7220_quiet_serdes - set serdes to txidle
1088 * @dd: the infinipath device
1089 * Called when driver is being unloaded
1090 */
1091static void ipath_7220_quiet_serdes(struct ipath_devdata *dd)
1092{
1093 u64 val;
1094 dd->ipath_flags &= ~IPATH_IB_AUTONEG_INPROG;
1095 wake_up(&dd->ipath_autoneg_wait);
1096 cancel_delayed_work(&dd->ipath_autoneg_work);
1097 flush_scheduled_work();
1098 ipath_shutdown_relock_poll(dd);
1099 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
1100 val |= INFINIPATH_XGXS_RESET;
1101 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
1102}
1103
1104static int ipath_7220_intconfig(struct ipath_devdata *dd)
1105{
1106 ipath_7220_config_jint(dd, dd->ipath_jint_idle_ticks,
1107 dd->ipath_jint_max_packets);
1108 return 0;
1109}
1110
1111/**
1112 * ipath_setup_7220_setextled - set the state of the two external LEDs
1113 * @dd: the infinipath device
1114 * @lst: the L state
1115 * @ltst: the LT state
1116 *
1117 * These LEDs indicate the physical and logical state of IB link.
1118 * For this chip (at least with recommended board pinouts), LED1
1119 * is Yellow (logical state) and LED2 is Green (physical state),
1120 *
1121 * Note: We try to match the Mellanox HCA LED behavior as best
1122 * we can. Green indicates physical link state is OK (something is
1123 * plugged in, and we can train).
1124 * Amber indicates the link is logically up (ACTIVE).
1125 * Mellanox further blinks the amber LED to indicate data packet
1126 * activity, but we have no hardware support for that, so it would
1127 * require waking up every 10-20 msecs and checking the counters
1128 * on the chip, and then turning the LED off if appropriate. That's
1129 * visible overhead, so not something we will do.
1130 *
1131 */
1132static void ipath_setup_7220_setextled(struct ipath_devdata *dd, u64 lst,
1133 u64 ltst)
1134{
1135 u64 extctl, ledblink = 0;
1136 unsigned long flags = 0;
1137
1138 /* the diags use the LED to indicate diag info, so we leave
1139 * the external LED alone when the diags are running */
1140 if (ipath_diag_inuse)
1141 return;
1142
1143 /* Allow override of LED display for, e.g. Locating system in rack */
1144 if (dd->ipath_led_override) {
1145 ltst = (dd->ipath_led_override & IPATH_LED_PHYS)
1146 ? INFINIPATH_IBCS_LT_STATE_LINKUP
1147 : INFINIPATH_IBCS_LT_STATE_DISABLED;
1148 lst = (dd->ipath_led_override & IPATH_LED_LOG)
1149 ? INFINIPATH_IBCS_L_STATE_ACTIVE
1150 : INFINIPATH_IBCS_L_STATE_DOWN;
1151 }
1152
1153 spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
1154 extctl = dd->ipath_extctrl & ~(INFINIPATH_EXTC_LED1PRIPORT_ON |
1155 INFINIPATH_EXTC_LED2PRIPORT_ON);
1156 if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP) {
1157 extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON;
1158 /*
1159 * counts are in chip clock (4ns) periods.
1160 * This is 1/16 sec (66.6ms) on,
1161 * 3/16 sec (187.5 ms) off, with packets rcvd
1162 */
1163 ledblink = ((66600*1000UL/4) << IBA7220_LEDBLINK_ON_SHIFT)
1164 | ((187500*1000UL/4) << IBA7220_LEDBLINK_OFF_SHIFT);
1165 }
1166 if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
1167 extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON;
1168 dd->ipath_extctrl = extctl;
1169 ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl);
1170 spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
1171
1172 if (ledblink) /* blink the LED on packet receive */
1173 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvpktledcnt,
1174 ledblink);
1175}
1176
1177/*
1178 * Similar to pci_intx(pdev, 1), except that we make sure
1179 * msi is off...
1180 */
1181static void ipath_enable_intx(struct pci_dev *pdev)
1182{
1183 u16 cw, new;
1184 int pos;
1185
1186 /* first, turn on INTx */
1187 pci_read_config_word(pdev, PCI_COMMAND, &cw);
1188 new = cw & ~PCI_COMMAND_INTX_DISABLE;
1189 if (new != cw)
1190 pci_write_config_word(pdev, PCI_COMMAND, new);
1191
1192 /* then turn off MSI */
1193 pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
1194 if (pos) {
1195 pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw);
1196 new = cw & ~PCI_MSI_FLAGS_ENABLE;
1197 if (new != cw)
1198 pci_write_config_word(pdev, pos + PCI_MSI_FLAGS, new);
1199 }
1200}
1201
1202static int ipath_msi_enabled(struct pci_dev *pdev)
1203{
1204 int pos, ret = 0;
1205
1206 pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
1207 if (pos) {
1208 u16 cw;
1209
1210 pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw);
1211 ret = !!(cw & PCI_MSI_FLAGS_ENABLE);
1212 }
1213 return ret;
1214}
1215
1216/*
1217 * disable msi interrupt if enabled, and clear the flag.
1218 * flag is used primarily for the fallback to IntX, but
1219 * is also used in reinit after reset as a flag.
1220 */
1221static void ipath_7220_nomsi(struct ipath_devdata *dd)
1222{
1223 dd->ipath_msi_lo = 0;
1224#ifdef CONFIG_PCI_MSI
1225 if (ipath_msi_enabled(dd->pcidev)) {
1226 /*
1227 * free, but don't zero; later kernels require
1228 * it be freed before disable_msi, so the intx
1229 * setup has to request it again.
1230 */
1231 if (dd->ipath_irq)
1232 free_irq(dd->ipath_irq, dd);
1233 pci_disable_msi(dd->pcidev);
1234 }
1235#endif
1236}
1237
1238/*
1239 * ipath_setup_7220_cleanup - clean up any per-chip chip-specific stuff
1240 * @dd: the infinipath device
1241 *
1242 * Nothing but msi interrupt cleanup for now.
1243 *
1244 * This is called during driver unload.
1245 */
1246static void ipath_setup_7220_cleanup(struct ipath_devdata *dd)
1247{
1248 ipath_7220_nomsi(dd);
1249}
1250
1251
1252static void ipath_7220_pcie_params(struct ipath_devdata *dd, u32 boardrev)
1253{
1254 u16 linkstat, minwidth, speed;
1255 int pos;
1256
1257 pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP);
1258 if (!pos) {
1259 ipath_dev_err(dd, "Can't find PCI Express capability!\n");
1260 goto bail;
1261 }
1262
1263 pci_read_config_word(dd->pcidev, pos + PCI_EXP_LNKSTA,
1264 &linkstat);
1265 /*
1266 * speed is bits 0-4, linkwidth is bits 4-8
1267 * no defines for them in headers
1268 */
1269 speed = linkstat & 0xf;
1270 linkstat >>= 4;
1271 linkstat &= 0x1f;
1272 dd->ipath_lbus_width = linkstat;
1273 switch (boardrev) {
1274 case 0:
1275 case 2:
1276 case 10:
1277 case 12:
1278 minwidth = 16; /* x16 capable boards */
1279 break;
1280 default:
1281 minwidth = 8; /* x8 capable boards */
1282 break;
1283 }
1284
1285 switch (speed) {
1286 case 1:
1287 dd->ipath_lbus_speed = 2500; /* Gen1, 2.5GHz */
1288 break;
1289 case 2:
1290 dd->ipath_lbus_speed = 5000; /* Gen1, 5GHz */
1291 break;
1292 default: /* not defined, assume gen1 */
1293 dd->ipath_lbus_speed = 2500;
1294 break;
1295 }
1296
1297 if (linkstat < minwidth)
1298 ipath_dev_err(dd,
1299 "PCIe width %u (x%u HCA), performance "
1300 "reduced\n", linkstat, minwidth);
1301 else
1302 ipath_cdbg(VERBOSE, "PCIe speed %u width %u (x%u HCA)\n",
1303 dd->ipath_lbus_speed, linkstat, minwidth);
1304
1305 if (speed != 1)
1306 ipath_dev_err(dd,
1307 "PCIe linkspeed %u is incorrect; "
1308 "should be 1 (2500)!\n", speed);
1309
1310bail:
1311 /* fill in string, even on errors */
1312 snprintf(dd->ipath_lbus_info, sizeof(dd->ipath_lbus_info),
1313 "PCIe,%uMHz,x%u\n",
1314 dd->ipath_lbus_speed,
1315 dd->ipath_lbus_width);
1316 return;
1317}
1318
1319
1320/**
1321 * ipath_setup_7220_config - setup PCIe config related stuff
1322 * @dd: the infinipath device
1323 * @pdev: the PCI device
1324 *
1325 * The pci_enable_msi() call will fail on systems with MSI quirks
1326 * such as those with AMD8131, even if the device of interest is not
1327 * attached to that device, (in the 2.6.13 - 2.6.15 kernels, at least, fixed
1328 * late in 2.6.16).
1329 * All that can be done is to edit the kernel source to remove the quirk
1330 * check until that is fixed.
1331 * We do not need to call enable_msi() for our HyperTransport chip,
1332 * even though it uses MSI, and we want to avoid the quirk warning, so
1333 * So we call enable_msi only for PCIe. If we do end up needing
1334 * pci_enable_msi at some point in the future for HT, we'll move the
1335 * call back into the main init_one code.
1336 * We save the msi lo and hi values, so we can restore them after
1337 * chip reset (the kernel PCI infrastructure doesn't yet handle that
1338 * correctly).
1339 */
1340static int ipath_setup_7220_config(struct ipath_devdata *dd,
1341 struct pci_dev *pdev)
1342{
1343 int pos, ret = -1;
1344 u32 boardrev;
1345
1346 dd->ipath_msi_lo = 0; /* used as a flag during reset processing */
1347#ifdef CONFIG_PCI_MSI
1348 pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
1349 if (!strcmp(int_type, "force_msi") || !strcmp(int_type, "auto"))
1350 ret = pci_enable_msi(pdev);
1351 if (ret) {
1352 if (!strcmp(int_type, "force_msi")) {
1353 ipath_dev_err(dd, "pci_enable_msi failed: %d, "
1354 "force_msi is on, so not continuing.\n",
1355 ret);
1356 return ret;
1357 }
1358
1359 ipath_enable_intx(pdev);
1360 if (!strcmp(int_type, "auto"))
1361 ipath_dev_err(dd, "pci_enable_msi failed: %d, "
1362 "falling back to INTx\n", ret);
1363 } else if (pos) {
1364 u16 control;
1365 pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO,
1366 &dd->ipath_msi_lo);
1367 pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_HI,
1368 &dd->ipath_msi_hi);
1369 pci_read_config_word(pdev, pos + PCI_MSI_FLAGS,
1370 &control);
1371 /* now save the data (vector) info */
1372 pci_read_config_word(pdev,
1373 pos + ((control & PCI_MSI_FLAGS_64BIT)
1374 ? PCI_MSI_DATA_64 :
1375 PCI_MSI_DATA_32),
1376 &dd->ipath_msi_data);
1377 } else
1378 ipath_dev_err(dd, "Can't find MSI capability, "
1379 "can't save MSI settings for reset\n");
1380#else
1381 ipath_dbg("PCI_MSI not configured, using IntX interrupts\n");
1382 ipath_enable_intx(pdev);
1383#endif
1384
1385 dd->ipath_irq = pdev->irq;
1386
1387 /*
1388 * We save the cachelinesize also, although it doesn't
1389 * really matter.
1390 */
1391 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
1392 &dd->ipath_pci_cacheline);
1393
1394 /*
1395 * this function called early, ipath_boardrev not set yet. Can't
1396 * use ipath_read_kreg64() yet, too early in init, so use readq()
1397 */
1398 boardrev = (readq(&dd->ipath_kregbase[dd->ipath_kregs->kr_revision])
1399 >> INFINIPATH_R_BOARDID_SHIFT) & INFINIPATH_R_BOARDID_MASK;
1400
1401 ipath_7220_pcie_params(dd, boardrev);
1402
1403 dd->ipath_flags |= IPATH_NODMA_RTAIL | IPATH_HAS_SEND_DMA |
1404 IPATH_HAS_PBC_CNT | IPATH_HAS_THRESH_UPDATE;
1405 dd->ipath_pioupd_thresh = 4U; /* set default update threshold */
1406 return 0;
1407}
1408
1409static void ipath_init_7220_variables(struct ipath_devdata *dd)
1410{
1411 /*
1412 * setup the register offsets, since they are different for each
1413 * chip
1414 */
1415 dd->ipath_kregs = &ipath_7220_kregs;
1416 dd->ipath_cregs = &ipath_7220_cregs;
1417
1418 /*
1419 * bits for selecting i2c direction and values,
1420 * used for I2C serial flash
1421 */
1422 dd->ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM;
1423 dd->ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM;
1424 dd->ipath_gpio_sda = IPATH_GPIO_SDA;
1425 dd->ipath_gpio_scl = IPATH_GPIO_SCL;
1426
1427 /*
1428 * Fill in data for field-values that change in IBA7220.
1429 * We dynamically specify only the mask for LINKTRAININGSTATE
1430 * and only the shift for LINKSTATE, as they are the only ones
1431 * that change. Also precalculate the 3 link states of interest
1432 * and the combined mask.
1433 */
1434 dd->ibcs_ls_shift = IBA7220_IBCS_LINKSTATE_SHIFT;
1435 dd->ibcs_lts_mask = IBA7220_IBCS_LINKTRAININGSTATE_MASK;
1436 dd->ibcs_mask = (INFINIPATH_IBCS_LINKSTATE_MASK <<
1437 dd->ibcs_ls_shift) | dd->ibcs_lts_mask;
1438 dd->ib_init = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
1439 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
1440 (INFINIPATH_IBCS_L_STATE_INIT << dd->ibcs_ls_shift);
1441 dd->ib_arm = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
1442 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
1443 (INFINIPATH_IBCS_L_STATE_ARM << dd->ibcs_ls_shift);
1444 dd->ib_active = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
1445 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
1446 (INFINIPATH_IBCS_L_STATE_ACTIVE << dd->ibcs_ls_shift);
1447
1448 /*
1449 * Fill in data for ibcc field-values that change in IBA7220.
1450 * We dynamically specify only the mask for LINKINITCMD
1451 * and only the shift for LINKCMD and MAXPKTLEN, as they are
1452 * the only ones that change.
1453 */
1454 dd->ibcc_lic_mask = IBA7220_IBCC_LINKINITCMD_MASK;
1455 dd->ibcc_lc_shift = IBA7220_IBCC_LINKCMD_SHIFT;
1456 dd->ibcc_mpl_shift = IBA7220_IBCC_MAXPKTLEN_SHIFT;
1457
1458 /* Fill in shifts for RcvCtrl. */
1459 dd->ipath_r_portenable_shift = INFINIPATH_R_PORTENABLE_SHIFT;
1460 dd->ipath_r_intravail_shift = IBA7220_R_INTRAVAIL_SHIFT;
1461 dd->ipath_r_tailupd_shift = IBA7220_R_TAILUPD_SHIFT;
1462 dd->ipath_r_portcfg_shift = IBA7220_R_PORTCFG_SHIFT;
1463
1464 /* variables for sanity checking interrupt and errors */
1465 dd->ipath_hwe_bitsextant =
1466 (INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
1467 INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) |
1468 (INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
1469 INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) |
1470 (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK <<
1471 INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) |
1472 INFINIPATH_HWE_PCIE1PLLFAILED |
1473 INFINIPATH_HWE_PCIE0PLLFAILED |
1474 INFINIPATH_HWE_PCIEPOISONEDTLP |
1475 INFINIPATH_HWE_PCIECPLTIMEOUT |
1476 INFINIPATH_HWE_PCIEBUSPARITYXTLH |
1477 INFINIPATH_HWE_PCIEBUSPARITYXADM |
1478 INFINIPATH_HWE_PCIEBUSPARITYRADM |
1479 INFINIPATH_HWE_MEMBISTFAILED |
1480 INFINIPATH_HWE_COREPLL_FBSLIP |
1481 INFINIPATH_HWE_COREPLL_RFSLIP |
1482 INFINIPATH_HWE_SERDESPLLFAILED |
1483 INFINIPATH_HWE_IBCBUSTOSPCPARITYERR |
1484 INFINIPATH_HWE_IBCBUSFRSPCPARITYERR |
1485 INFINIPATH_HWE_PCIECPLDATAQUEUEERR |
1486 INFINIPATH_HWE_PCIECPLHDRQUEUEERR |
1487 INFINIPATH_HWE_SDMAMEMREADERR |
1488 INFINIPATH_HWE_CLK_UC_PLLNOTLOCKED |
1489 INFINIPATH_HWE_PCIESERDESQ0PCLKNOTDETECT |
1490 INFINIPATH_HWE_PCIESERDESQ1PCLKNOTDETECT |
1491 INFINIPATH_HWE_PCIESERDESQ2PCLKNOTDETECT |
1492 INFINIPATH_HWE_PCIESERDESQ3PCLKNOTDETECT |
1493 INFINIPATH_HWE_DDSRXEQMEMORYPARITYERR |
1494 INFINIPATH_HWE_IB_UC_MEMORYPARITYERR |
1495 INFINIPATH_HWE_PCIE_UC_OCT0MEMORYPARITYERR |
1496 INFINIPATH_HWE_PCIE_UC_OCT1MEMORYPARITYERR;
1497 dd->ipath_i_bitsextant =
1498 INFINIPATH_I_SDMAINT | INFINIPATH_I_SDMADISABLED |
1499 (INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) |
1500 (INFINIPATH_I_RCVAVAIL_MASK <<
1501 INFINIPATH_I_RCVAVAIL_SHIFT) |
1502 INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT |
1503 INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO |
1504 INFINIPATH_I_JINT | INFINIPATH_I_SERDESTRIMDONE;
1505 dd->ipath_e_bitsextant =
1506 INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC |
1507 INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN |
1508 INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN |
1509 INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RUNEXPCHAR |
1510 INFINIPATH_E_RUNSUPVL | INFINIPATH_E_REBP |
1511 INFINIPATH_E_RIBFLOW | INFINIPATH_E_RBADVERSION |
1512 INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
1513 INFINIPATH_E_RBADTID | INFINIPATH_E_RHDRLEN |
1514 INFINIPATH_E_RHDR | INFINIPATH_E_RIBLOSTLINK |
1515 INFINIPATH_E_SENDSPECIALTRIGGER |
1516 INFINIPATH_E_SDMADISABLED | INFINIPATH_E_SMINPKTLEN |
1517 INFINIPATH_E_SMAXPKTLEN | INFINIPATH_E_SUNDERRUN |
1518 INFINIPATH_E_SPKTLEN | INFINIPATH_E_SDROPPEDSMPPKT |
1519 INFINIPATH_E_SDROPPEDDATAPKT |
1520 INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM |
1521 INFINIPATH_E_SUNSUPVL | INFINIPATH_E_SENDBUFMISUSE |
1522 INFINIPATH_E_SDMAGENMISMATCH | INFINIPATH_E_SDMAOUTOFBOUND |
1523 INFINIPATH_E_SDMATAILOUTOFBOUND | INFINIPATH_E_SDMABASE |
1524 INFINIPATH_E_SDMA1STDESC | INFINIPATH_E_SDMARPYTAG |
1525 INFINIPATH_E_SDMADWEN | INFINIPATH_E_SDMAMISSINGDW |
1526 INFINIPATH_E_SDMAUNEXPDATA |
1527 INFINIPATH_E_IBSTATUSCHANGED | INFINIPATH_E_INVALIDADDR |
1528 INFINIPATH_E_RESET | INFINIPATH_E_HARDWARE |
1529 INFINIPATH_E_SDMADESCADDRMISALIGN |
1530 INFINIPATH_E_INVALIDEEPCMD;
1531
1532 dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
1533 dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
1534 dd->ipath_i_rcvavail_shift = INFINIPATH_I_RCVAVAIL_SHIFT;
1535 dd->ipath_i_rcvurg_shift = INFINIPATH_I_RCVURG_SHIFT;
1536 dd->ipath_flags |= IPATH_INTREG_64 | IPATH_HAS_MULT_IB_SPEED
1537 | IPATH_HAS_LINK_LATENCY;
1538
1539 /*
1540 * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
1541 * 2 is Some Misc, 3 is reserved for future.
1542 */
1543 dd->ipath_eep_st_masks[0].hwerrs_to_log =
1544 INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
1545 INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT;
1546
1547 dd->ipath_eep_st_masks[1].hwerrs_to_log =
1548 INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
1549 INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT;
1550
1551 dd->ipath_eep_st_masks[2].errs_to_log = INFINIPATH_E_RESET;
1552
1553 ipath_linkrecovery = 0;
1554
1555 init_waitqueue_head(&dd->ipath_autoneg_wait);
1556 INIT_DELAYED_WORK(&dd->ipath_autoneg_work, autoneg_work);
1557
1558 dd->ipath_link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
1559 dd->ipath_link_speed_supported = IPATH_IB_SDR | IPATH_IB_DDR;
1560
1561 dd->ipath_link_width_enabled = dd->ipath_link_width_supported;
1562 dd->ipath_link_speed_enabled = dd->ipath_link_speed_supported;
1563 /*
1564 * set the initial values to reasonable default, will be set
1565 * for real when link is up.
1566 */
1567 dd->ipath_link_width_active = IB_WIDTH_4X;
1568 dd->ipath_link_speed_active = IPATH_IB_SDR;
1569 dd->delay_mult = rate_to_delay[0][1];
1570}
1571
1572
1573/*
1574 * Setup the MSI stuff again after a reset. I'd like to just call
1575 * pci_enable_msi() and request_irq() again, but when I do that,
1576 * the MSI enable bit doesn't get set in the command word, and
1577 * we switch to to a different interrupt vector, which is confusing,
1578 * so I instead just do it all inline. Perhaps somehow can tie this
1579 * into the PCIe hotplug support at some point
1580 * Note, because I'm doing it all here, I don't call pci_disable_msi()
1581 * or free_irq() at the start of ipath_setup_7220_reset().
1582 */
1583static int ipath_reinit_msi(struct ipath_devdata *dd)
1584{
1585 int ret = 0;
1586#ifdef CONFIG_PCI_MSI
1587 int pos;
1588 u16 control;
1589 if (!dd->ipath_msi_lo) /* Using intX, or init problem */
1590 goto bail;
1591
1592 pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI);
1593 if (!pos) {
1594 ipath_dev_err(dd, "Can't find MSI capability, "
1595 "can't restore MSI settings\n");
1596 goto bail;
1597 }
1598 ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n",
1599 dd->ipath_msi_lo, pos + PCI_MSI_ADDRESS_LO);
1600 pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
1601 dd->ipath_msi_lo);
1602 ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n",
1603 dd->ipath_msi_hi, pos + PCI_MSI_ADDRESS_HI);
1604 pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
1605 dd->ipath_msi_hi);
1606 pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control);
1607 if (!(control & PCI_MSI_FLAGS_ENABLE)) {
1608 ipath_cdbg(VERBOSE, "MSI control at off %x was %x, "
1609 "setting MSI enable (%x)\n", pos + PCI_MSI_FLAGS,
1610 control, control | PCI_MSI_FLAGS_ENABLE);
1611 control |= PCI_MSI_FLAGS_ENABLE;
1612 pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
1613 control);
1614 }
1615 /* now rewrite the data (vector) info */
1616 pci_write_config_word(dd->pcidev, pos +
1617 ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
1618 dd->ipath_msi_data);
1619 ret = 1;
1620bail:
1621#endif
1622 if (!ret) {
1623 ipath_dbg("Using IntX, MSI disabled or not configured\n");
1624 ipath_enable_intx(dd->pcidev);
1625 ret = 1;
1626 }
1627 /*
1628 * We restore the cachelinesize also, although it doesn't really
1629 * matter.
1630 */
1631 pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE,
1632 dd->ipath_pci_cacheline);
1633 /* and now set the pci master bit again */
1634 pci_set_master(dd->pcidev);
1635
1636 return ret;
1637}
1638
1639/*
1640 * This routine sleeps, so it can only be called from user context, not
1641 * from interrupt context. If we need interrupt context, we can split
1642 * it into two routines.
1643 */
1644static int ipath_setup_7220_reset(struct ipath_devdata *dd)
1645{
1646 u64 val;
1647 int i;
1648 int ret;
1649 u16 cmdval;
1650
1651 pci_read_config_word(dd->pcidev, PCI_COMMAND, &cmdval);
1652
1653 /* Use dev_err so it shows up in logs, etc. */
1654 ipath_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->ipath_unit);
1655
1656 /* keep chip from being accessed in a few places */
1657 dd->ipath_flags &= ~(IPATH_INITTED | IPATH_PRESENT);
1658 val = dd->ipath_control | INFINIPATH_C_RESET;
1659 ipath_write_kreg(dd, dd->ipath_kregs->kr_control, val);
1660 mb();
1661
1662 for (i = 1; i <= 5; i++) {
1663 int r;
1664
1665 /*
1666 * Allow MBIST, etc. to complete; longer on each retry.
1667 * We sometimes get machine checks from bus timeout if no
1668 * response, so for now, make it *really* long.
1669 */
1670 msleep(1000 + (1 + i) * 2000);
1671 r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
1672 dd->ipath_pcibar0);
1673 if (r)
1674 ipath_dev_err(dd, "rewrite of BAR0 failed: %d\n", r);
1675 r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
1676 dd->ipath_pcibar1);
1677 if (r)
1678 ipath_dev_err(dd, "rewrite of BAR1 failed: %d\n", r);
1679 /* now re-enable memory access */
1680 pci_write_config_word(dd->pcidev, PCI_COMMAND, cmdval);
1681 r = pci_enable_device(dd->pcidev);
1682 if (r)
1683 ipath_dev_err(dd, "pci_enable_device failed after "
1684 "reset: %d\n", r);
1685 /*
1686 * whether it fully enabled or not, mark as present,
1687 * again (but not INITTED)
1688 */
1689 dd->ipath_flags |= IPATH_PRESENT;
1690 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision);
1691 if (val == dd->ipath_revision) {
1692 ipath_cdbg(VERBOSE, "Got matching revision "
1693 "register %llx on try %d\n",
1694 (unsigned long long) val, i);
1695 ret = ipath_reinit_msi(dd);
1696 goto bail;
1697 }
1698 /* Probably getting -1 back */
1699 ipath_dbg("Didn't get expected revision register, "
1700 "got %llx, try %d\n", (unsigned long long) val,
1701 i + 1);
1702 }
1703 ret = 0; /* failed */
1704
1705bail:
1706 if (ret)
1707 ipath_7220_pcie_params(dd, dd->ipath_boardrev);
1708
1709 return ret;
1710}
1711
1712/**
1713 * ipath_7220_put_tid - write a TID to the chip
1714 * @dd: the infinipath device
1715 * @tidptr: pointer to the expected TID (in chip) to udpate
1716 * @tidtype: 0 for eager, 1 for expected
1717 * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
1718 *
1719 * This exists as a separate routine to allow for selection of the
1720 * appropriate "flavor". The static calls in cleanup just use the
1721 * revision-agnostic form, as they are not performance critical.
1722 */
1723static void ipath_7220_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr,
1724 u32 type, unsigned long pa)
1725{
1726 if (pa != dd->ipath_tidinvalid) {
1727 u64 chippa = pa >> IBA7220_TID_PA_SHIFT;
1728
1729 /* paranoia checks */
1730 if (pa != (chippa << IBA7220_TID_PA_SHIFT)) {
1731 dev_info(&dd->pcidev->dev, "BUG: physaddr %lx "
1732 "not 2KB aligned!\n", pa);
1733 return;
1734 }
1735 if (pa >= (1UL << IBA7220_TID_SZ_SHIFT)) {
1736 ipath_dev_err(dd,
1737 "BUG: Physical page address 0x%lx "
1738 "larger than supported\n", pa);
1739 return;
1740 }
1741
1742 if (type == RCVHQ_RCV_TYPE_EAGER)
1743 chippa |= dd->ipath_tidtemplate;
1744 else /* for now, always full 4KB page */
1745 chippa |= IBA7220_TID_SZ_4K;
1746 writeq(chippa, tidptr);
1747 } else
1748 writeq(pa, tidptr);
1749 mmiowb();
1750}
1751
1752/**
1753 * ipath_7220_clear_tid - clear all TID entries for a port, expected and eager
1754 * @dd: the infinipath device
1755 * @port: the port
1756 *
1757 * clear all TID entries for a port, expected and eager.
1758 * Used from ipath_close(). On this chip, TIDs are only 32 bits,
1759 * not 64, but they are still on 64 bit boundaries, so tidbase
1760 * is declared as u64 * for the pointer math, even though we write 32 bits
1761 */
1762static void ipath_7220_clear_tids(struct ipath_devdata *dd, unsigned port)
1763{
1764 u64 __iomem *tidbase;
1765 unsigned long tidinv;
1766 int i;
1767
1768 if (!dd->ipath_kregbase)
1769 return;
1770
1771 ipath_cdbg(VERBOSE, "Invalidate TIDs for port %u\n", port);
1772
1773 tidinv = dd->ipath_tidinvalid;
1774 tidbase = (u64 __iomem *)
1775 ((char __iomem *)(dd->ipath_kregbase) +
1776 dd->ipath_rcvtidbase +
1777 port * dd->ipath_rcvtidcnt * sizeof(*tidbase));
1778
1779 for (i = 0; i < dd->ipath_rcvtidcnt; i++)
1780 ipath_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
1781 tidinv);
1782
1783 tidbase = (u64 __iomem *)
1784 ((char __iomem *)(dd->ipath_kregbase) +
1785 dd->ipath_rcvegrbase + port_egrtid_idx(dd, port)
1786 * sizeof(*tidbase));
1787
1788 for (i = port ? dd->ipath_rcvegrcnt : dd->ipath_p0_rcvegrcnt; i; i--)
1789 ipath_7220_put_tid(dd, &tidbase[i-1], RCVHQ_RCV_TYPE_EAGER,
1790 tidinv);
1791}
1792
1793/**
1794 * ipath_7220_tidtemplate - setup constants for TID updates
1795 * @dd: the infinipath device
1796 *
1797 * We setup stuff that we use a lot, to avoid calculating each time
1798 */
1799static void ipath_7220_tidtemplate(struct ipath_devdata *dd)
1800{
1801 /* For now, we always allocate 4KB buffers (at init) so we can
1802 * receive max size packets. We may want a module parameter to
1803 * specify 2KB or 4KB and/or make be per port instead of per device
1804 * for those who want to reduce memory footprint. Note that the
1805 * ipath_rcvhdrentsize size must be large enough to hold the largest
1806 * IB header (currently 96 bytes) that we expect to handle (plus of
1807 * course the 2 dwords of RHF).
1808 */
1809 if (dd->ipath_rcvegrbufsize == 2048)
1810 dd->ipath_tidtemplate = IBA7220_TID_SZ_2K;
1811 else if (dd->ipath_rcvegrbufsize == 4096)
1812 dd->ipath_tidtemplate = IBA7220_TID_SZ_4K;
1813 else {
1814 dev_info(&dd->pcidev->dev, "BUG: unsupported egrbufsize "
1815 "%u, using %u\n", dd->ipath_rcvegrbufsize,
1816 4096);
1817 dd->ipath_tidtemplate = IBA7220_TID_SZ_4K;
1818 }
1819 dd->ipath_tidinvalid = 0;
1820}
1821
1822static int ipath_7220_early_init(struct ipath_devdata *dd)
1823{
1824 u32 i, s;
1825
1826 if (strcmp(int_type, "auto") &&
1827 strcmp(int_type, "force_msi") &&
1828 strcmp(int_type, "force_intx")) {
1829 ipath_dev_err(dd, "Invalid interrupt_type: '%s', expecting "
1830 "auto, force_msi or force_intx\n", int_type);
1831 return -EINVAL;
1832 }
1833
1834 /*
1835 * Control[4] has been added to change the arbitration within
1836 * the SDMA engine between favoring data fetches over descriptor
1837 * fetches. ipath_sdma_fetch_arb==0 gives data fetches priority.
1838 */
1839 if (ipath_sdma_fetch_arb && (dd->ipath_minrev > 1))
1840 dd->ipath_control |= 1<<4;
1841
1842 dd->ipath_flags |= IPATH_4BYTE_TID;
1843
1844 /*
1845 * For openfabrics, we need to be able to handle an IB header of
1846 * 24 dwords. HT chip has arbitrary sized receive buffers, so we
1847 * made them the same size as the PIO buffers. This chip does not
1848 * handle arbitrary size buffers, so we need the header large enough
1849 * to handle largest IB header, but still have room for a 2KB MTU
1850 * standard IB packet.
1851 */
1852 dd->ipath_rcvhdrentsize = 24;
1853 dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE;
1854 dd->ipath_rhf_offset =
1855 dd->ipath_rcvhdrentsize - sizeof(u64) / sizeof(u32);
1856
1857 dd->ipath_rcvegrbufsize = ipath_mtu4096 ? 4096 : 2048;
1858 /*
1859 * the min() check here is currently a nop, but it may not always
1860 * be, depending on just how we do ipath_rcvegrbufsize
1861 */
1862 dd->ipath_ibmaxlen = min(ipath_mtu4096 ? dd->ipath_piosize4k :
1863 dd->ipath_piosize2k,
1864 dd->ipath_rcvegrbufsize +
1865 (dd->ipath_rcvhdrentsize << 2));
1866 dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen;
1867
1868 ipath_7220_config_jint(dd, INFINIPATH_JINT_DEFAULT_IDLE_TICKS,
1869 INFINIPATH_JINT_DEFAULT_MAX_PACKETS);
1870
1871 if (dd->ipath_boardrev) /* no eeprom on emulator */
1872 ipath_get_eeprom_info(dd);
1873
1874 /* start of code to check and print procmon */
1875 s = ipath_read_kreg32(dd, IPATH_KREG_OFFSET(ProcMon));
1876 s &= ~(1U<<31); /* clear done bit */
1877 s |= 1U<<14; /* clear counter (write 1 to clear) */
1878 ipath_write_kreg(dd, IPATH_KREG_OFFSET(ProcMon), s);
1879 /* make sure clear_counter low long enough before start */
1880 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
1881 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
1882
1883 s &= ~(1U<<14); /* allow counter to count (before starting) */
1884 ipath_write_kreg(dd, IPATH_KREG_OFFSET(ProcMon), s);
1885 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
1886 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
1887 s = ipath_read_kreg32(dd, IPATH_KREG_OFFSET(ProcMon));
1888
1889 s |= 1U<<15; /* start the counter */
1890 s &= ~(1U<<31); /* clear done bit */
1891 s &= ~0x7ffU; /* clear frequency bits */
1892 s |= 0xe29; /* set frequency bits, in case cleared */
1893 ipath_write_kreg(dd, IPATH_KREG_OFFSET(ProcMon), s);
1894
1895 s = 0;
1896 for (i = 500; i > 0 && !(s&(1ULL<<31)); i--) {
1897 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
1898 s = ipath_read_kreg32(dd, IPATH_KREG_OFFSET(ProcMon));
1899 }
1900 if (!(s&(1U<<31)))
1901 ipath_dev_err(dd, "ProcMon register not valid: 0x%x\n", s);
1902 else
1903 ipath_dbg("ProcMon=0x%x, count=0x%x\n", s, (s>>16)&0x1ff);
1904
1905 return 0;
1906}
1907
1908/**
1909 * ipath_init_7220_get_base_info - set chip-specific flags for user code
1910 * @pd: the infinipath port
1911 * @kbase: ipath_base_info pointer
1912 *
1913 * We set the PCIE flag because the lower bandwidth on PCIe vs
1914 * HyperTransport can affect some user packet algorithims.
1915 */
1916static int ipath_7220_get_base_info(struct ipath_portdata *pd, void *kbase)
1917{
1918 struct ipath_base_info *kinfo = kbase;
1919
1920 kinfo->spi_runtime_flags |=
1921 IPATH_RUNTIME_PCIE | IPATH_RUNTIME_NODMA_RTAIL |
1922 IPATH_RUNTIME_SDMA;
1923
1924 return 0;
1925}
1926
1927static void ipath_7220_free_irq(struct ipath_devdata *dd)
1928{
1929 free_irq(dd->ipath_irq, dd);
1930 dd->ipath_irq = 0;
1931}
1932
1933static struct ipath_message_header *
1934ipath_7220_get_msgheader(struct ipath_devdata *dd, __le32 *rhf_addr)
1935{
1936 u32 offset = ipath_hdrget_offset(rhf_addr);
1937
1938 return (struct ipath_message_header *)
1939 (rhf_addr - dd->ipath_rhf_offset + offset);
1940}
1941
1942static void ipath_7220_config_ports(struct ipath_devdata *dd, ushort cfgports)
1943{
1944 u32 nchipports;
1945
1946 nchipports = ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
1947 if (!cfgports) {
1948 int ncpus = num_online_cpus();
1949
1950 if (ncpus <= 4)
1951 dd->ipath_portcnt = 5;
1952 else if (ncpus <= 8)
1953 dd->ipath_portcnt = 9;
1954 if (dd->ipath_portcnt)
1955 ipath_dbg("Auto-configured for %u ports, %d cpus "
1956 "online\n", dd->ipath_portcnt, ncpus);
1957 } else if (cfgports <= nchipports)
1958 dd->ipath_portcnt = cfgports;
1959 if (!dd->ipath_portcnt) /* none of the above, set to max */
1960 dd->ipath_portcnt = nchipports;
1961 /*
1962 * chip can be configured for 5, 9, or 17 ports, and choice
1963 * affects number of eager TIDs per port (1K, 2K, 4K).
1964 */
1965 if (dd->ipath_portcnt > 9)
1966 dd->ipath_rcvctrl |= 2ULL << IBA7220_R_PORTCFG_SHIFT;
1967 else if (dd->ipath_portcnt > 5)
1968 dd->ipath_rcvctrl |= 1ULL << IBA7220_R_PORTCFG_SHIFT;
1969 /* else configure for default 5 receive ports */
1970 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1971 dd->ipath_rcvctrl);
1972 dd->ipath_p0_rcvegrcnt = 2048; /* always */
1973 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
1974 dd->ipath_pioreserved = 1; /* reserve a buffer */
1975}
1976
1977
1978static int ipath_7220_get_ib_cfg(struct ipath_devdata *dd, int which)
1979{
1980 int lsb, ret = 0;
1981 u64 maskr; /* right-justified mask */
1982
1983 switch (which) {
1984 case IPATH_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
1985 lsb = IBA7220_IBC_HRTBT_SHIFT;
1986 maskr = IBA7220_IBC_HRTBT_MASK;
1987 break;
1988
1989 case IPATH_IB_CFG_LWID_ENB: /* Get allowed Link-width */
1990 ret = dd->ipath_link_width_enabled;
1991 goto done;
1992
1993 case IPATH_IB_CFG_LWID: /* Get currently active Link-width */
1994 ret = dd->ipath_link_width_active;
1995 goto done;
1996
1997 case IPATH_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
1998 ret = dd->ipath_link_speed_enabled;
1999 goto done;
2000
2001 case IPATH_IB_CFG_SPD: /* Get current Link spd */
2002 ret = dd->ipath_link_speed_active;
2003 goto done;
2004
2005 case IPATH_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
2006 lsb = IBA7220_IBC_RXPOL_SHIFT;
2007 maskr = IBA7220_IBC_RXPOL_MASK;
2008 break;
2009
2010 case IPATH_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
2011 lsb = IBA7220_IBC_LREV_SHIFT;
2012 maskr = IBA7220_IBC_LREV_MASK;
2013 break;
2014
2015 case IPATH_IB_CFG_LINKLATENCY:
2016 ret = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcddrstatus)
2017 & IBA7220_DDRSTAT_LINKLAT_MASK;
2018 goto done;
2019
2020 default:
2021 ret = -ENOTSUPP;
2022 goto done;
2023 }
2024 ret = (int)((dd->ipath_ibcddrctrl >> lsb) & maskr);
2025done:
2026 return ret;
2027}
2028
2029static int ipath_7220_set_ib_cfg(struct ipath_devdata *dd, int which, u32 val)
2030{
2031 int lsb, ret = 0, setforce = 0;
2032 u64 maskr; /* right-justified mask */
2033
2034 switch (which) {
2035 case IPATH_IB_CFG_LIDLMC:
2036 /*
2037 * Set LID and LMC. Combined to avoid possible hazard
2038 * caller puts LMC in 16MSbits, DLID in 16LSbits of val
2039 */
2040 lsb = IBA7220_IBC_DLIDLMC_SHIFT;
2041 maskr = IBA7220_IBC_DLIDLMC_MASK;
2042 break;
2043
2044 case IPATH_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
2045 if (val & IPATH_IB_HRTBT_ON &&
2046 (dd->ipath_flags & IPATH_NO_HRTBT))
2047 goto bail;
2048 lsb = IBA7220_IBC_HRTBT_SHIFT;
2049 maskr = IBA7220_IBC_HRTBT_MASK;
2050 break;
2051
2052 case IPATH_IB_CFG_LWID_ENB: /* set allowed Link-width */
2053 /*
2054 * As with speed, only write the actual register if
2055 * the link is currently down, otherwise takes effect
2056 * on next link change.
2057 */
2058 dd->ipath_link_width_enabled = val;
2059 if ((dd->ipath_flags & (IPATH_LINKDOWN|IPATH_LINKINIT)) !=
2060 IPATH_LINKDOWN)
2061 goto bail;
2062 /*
2063 * We set the IPATH_IB_FORCE_NOTIFY bit so updown
2064 * will get called because we want update
2065 * link_width_active, and the change may not take
2066 * effect for some time (if we are in POLL), so this
2067 * flag will force the updown routine to be called
2068 * on the next ibstatuschange down interrupt, even
2069 * if it's not an down->up transition.
2070 */
2071 val--; /* convert from IB to chip */
2072 maskr = IBA7220_IBC_WIDTH_MASK;
2073 lsb = IBA7220_IBC_WIDTH_SHIFT;
2074 setforce = 1;
2075 dd->ipath_flags |= IPATH_IB_FORCE_NOTIFY;
2076 break;
2077
2078 case IPATH_IB_CFG_SPD_ENB: /* set allowed Link speeds */
2079 /*
2080 * If we turn off IB1.2, need to preset SerDes defaults,
2081 * but not right now. Set a flag for the next time
2082 * we command the link down. As with width, only write the
2083 * actual register if the link is currently down, otherwise
2084 * takes effect on next link change. Since setting is being
2085 * explictly requested (via MAD or sysfs), clear autoneg
2086 * failure status if speed autoneg is enabled.
2087 */
2088 dd->ipath_link_speed_enabled = val;
2089 if (dd->ipath_ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK &&
2090 !(val & (val - 1)))
2091 dd->ipath_presets_needed = 1;
2092 if ((dd->ipath_flags & (IPATH_LINKDOWN|IPATH_LINKINIT)) !=
2093 IPATH_LINKDOWN)
2094 goto bail;
2095 /*
2096 * We set the IPATH_IB_FORCE_NOTIFY bit so updown
2097 * will get called because we want update
2098 * link_speed_active, and the change may not take
2099 * effect for some time (if we are in POLL), so this
2100 * flag will force the updown routine to be called
2101 * on the next ibstatuschange down interrupt, even
2102 * if it's not an down->up transition. When setting
2103 * speed autoneg, clear AUTONEG_FAILED.
2104 */
2105 if (val == (IPATH_IB_SDR | IPATH_IB_DDR)) {
2106 val = IBA7220_IBC_SPEED_AUTONEG_MASK |
2107 IBA7220_IBC_IBTA_1_2_MASK;
2108 dd->ipath_flags &= ~IPATH_IB_AUTONEG_FAILED;
2109 } else
2110 val = val == IPATH_IB_DDR ? IBA7220_IBC_SPEED_DDR
2111 : IBA7220_IBC_SPEED_SDR;
2112 maskr = IBA7220_IBC_SPEED_AUTONEG_MASK |
2113 IBA7220_IBC_IBTA_1_2_MASK;
2114 lsb = 0; /* speed bits are low bits */
2115 setforce = 1;
2116 break;
2117
2118 case IPATH_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
2119 lsb = IBA7220_IBC_RXPOL_SHIFT;
2120 maskr = IBA7220_IBC_RXPOL_MASK;
2121 break;
2122
2123 case IPATH_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
2124 lsb = IBA7220_IBC_LREV_SHIFT;
2125 maskr = IBA7220_IBC_LREV_MASK;
2126 break;
2127
2128 default:
2129 ret = -ENOTSUPP;
2130 goto bail;
2131 }
2132 dd->ipath_ibcddrctrl &= ~(maskr << lsb);
2133 dd->ipath_ibcddrctrl |= (((u64) val & maskr) << lsb);
2134 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcddrctrl,
2135 dd->ipath_ibcddrctrl);
2136 if (setforce)
2137 dd->ipath_flags |= IPATH_IB_FORCE_NOTIFY;
2138bail:
2139 return ret;
2140}
2141
2142static void ipath_7220_read_counters(struct ipath_devdata *dd,
2143 struct infinipath_counters *cntrs)
2144{
2145 u64 *counters = (u64 *) cntrs;
2146 int i;
2147
2148 for (i = 0; i < sizeof(*cntrs) / sizeof(u64); i++)
2149 counters[i] = ipath_snap_cntr(dd, i);
2150}
2151
2152/* if we are using MSI, try to fallback to IntX */
2153static int ipath_7220_intr_fallback(struct ipath_devdata *dd)
2154{
2155 if (dd->ipath_msi_lo) {
2156 dev_info(&dd->pcidev->dev, "MSI interrupt not detected,"
2157 " trying IntX interrupts\n");
2158 ipath_7220_nomsi(dd);
2159 ipath_enable_intx(dd->pcidev);
2160 /*
2161 * some newer kernels require free_irq before disable_msi,
2162 * and irq can be changed during disable and intx enable
2163 * and we need to therefore use the pcidev->irq value,
2164 * not our saved MSI value.
2165 */
2166 dd->ipath_irq = dd->pcidev->irq;
2167 if (request_irq(dd->ipath_irq, ipath_intr, IRQF_SHARED,
2168 IPATH_DRV_NAME, dd))
2169 ipath_dev_err(dd,
2170 "Could not re-request_irq for IntX\n");
2171 return 1;
2172 }
2173 return 0;
2174}
2175
2176/*
2177 * reset the XGXS (between serdes and IBC). Slightly less intrusive
2178 * than resetting the IBC or external link state, and useful in some
2179 * cases to cause some retraining. To do this right, we reset IBC
2180 * as well.
2181 */
2182static void ipath_7220_xgxs_reset(struct ipath_devdata *dd)
2183{
2184 u64 val, prev_val;
2185
2186 prev_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
2187 val = prev_val | INFINIPATH_XGXS_RESET;
2188 prev_val &= ~INFINIPATH_XGXS_RESET; /* be sure */
2189 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
2190 dd->ipath_control & ~INFINIPATH_C_LINKENABLE);
2191 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
2192 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
2193 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, prev_val);
2194 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
2195 dd->ipath_control);
2196}
2197
2198
2199/* Still needs cleanup, too much hardwired stuff */
2200static void autoneg_send(struct ipath_devdata *dd,
2201 u32 *hdr, u32 dcnt, u32 *data)
2202{
2203 int i;
2204 u64 cnt;
2205 u32 __iomem *piobuf;
2206 u32 pnum;
2207
2208 i = 0;
2209 cnt = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
2210 while (!(piobuf = ipath_getpiobuf(dd, cnt, &pnum))) {
2211 if (i++ > 15) {
2212 ipath_dbg("Couldn't get pio buffer for send\n");
2213 return;
2214 }
2215 udelay(2);
2216 }
2217 if (dd->ipath_flags&IPATH_HAS_PBC_CNT)
2218 cnt |= 0x80000000UL<<32; /* mark as VL15 */
2219 writeq(cnt, piobuf);
2220 ipath_flush_wc();
2221 __iowrite32_copy(piobuf + 2, hdr, 7);
2222 __iowrite32_copy(piobuf + 9, data, dcnt);
2223 ipath_flush_wc();
2224}
2225
2226/*
2227 * _start packet gets sent twice at start, _done gets sent twice at end
2228 */
2229static void ipath_autoneg_send(struct ipath_devdata *dd, int which)
2230{
2231 static u32 swapped;
2232 u32 dw, i, hcnt, dcnt, *data;
2233 static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
2234 static u32 madpayload_start[0x40] = {
2235 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
2236 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
2237 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
2238 };
2239 static u32 madpayload_done[0x40] = {
2240 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
2241 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
2242 0x40000001, 0x1388, 0x15e, /* rest 0's */
2243 };
2244 dcnt = sizeof(madpayload_start)/sizeof(madpayload_start[0]);
2245 hcnt = sizeof(hdr)/sizeof(hdr[0]);
2246 if (!swapped) {
2247 /* for maintainability, do it at runtime */
2248 for (i = 0; i < hcnt; i++) {
2249 dw = (__force u32) cpu_to_be32(hdr[i]);
2250 hdr[i] = dw;
2251 }
2252 for (i = 0; i < dcnt; i++) {
2253 dw = (__force u32) cpu_to_be32(madpayload_start[i]);
2254 madpayload_start[i] = dw;
2255 dw = (__force u32) cpu_to_be32(madpayload_done[i]);
2256 madpayload_done[i] = dw;
2257 }
2258 swapped = 1;
2259 }
2260
2261 data = which ? madpayload_done : madpayload_start;
2262 ipath_cdbg(PKT, "Sending %s special MADs\n", which?"done":"start");
2263
2264 autoneg_send(dd, hdr, dcnt, data);
2265 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
2266 udelay(2);
2267 autoneg_send(dd, hdr, dcnt, data);
2268 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
2269 udelay(2);
2270}
2271
2272
2273
2274/*
2275 * Do the absolute minimum to cause an IB speed change, and make it
2276 * ready, but don't actually trigger the change. The caller will
2277 * do that when ready (if link is in Polling training state, it will
2278 * happen immediately, otherwise when link next goes down)
2279 *
2280 * This routine should only be used as part of the DDR autonegotation
2281 * code for devices that are not compliant with IB 1.2 (or code that
2282 * fixes things up for same).
2283 *
2284 * When link has gone down, and autoneg enabled, or autoneg has
2285 * failed and we give up until next time we set both speeds, and
2286 * then we want IBTA enabled as well as "use max enabled speed.
2287 */
2288static void set_speed_fast(struct ipath_devdata *dd, u32 speed)
2289{
2290 dd->ipath_ibcddrctrl &= ~(IBA7220_IBC_SPEED_AUTONEG_MASK |
2291 IBA7220_IBC_IBTA_1_2_MASK |
2292 (IBA7220_IBC_WIDTH_MASK << IBA7220_IBC_WIDTH_SHIFT));
2293
2294 if (speed == (IPATH_IB_SDR | IPATH_IB_DDR))
2295 dd->ipath_ibcddrctrl |= IBA7220_IBC_SPEED_AUTONEG_MASK |
2296 IBA7220_IBC_IBTA_1_2_MASK;
2297 else
2298 dd->ipath_ibcddrctrl |= speed == IPATH_IB_DDR ?
2299 IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
2300
2301 /*
2302 * Convert from IB-style 1 = 1x, 2 = 4x, 3 = auto
2303 * to chip-centric 0 = 1x, 1 = 4x, 2 = auto
2304 */
2305 dd->ipath_ibcddrctrl |= (u64)(dd->ipath_link_width_enabled - 1) <<
2306 IBA7220_IBC_WIDTH_SHIFT;
2307 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcddrctrl,
2308 dd->ipath_ibcddrctrl);
2309 ipath_cdbg(VERBOSE, "setup for IB speed (%x) done\n", speed);
2310}
2311
2312
2313/*
2314 * this routine is only used when we are not talking to another
2315 * IB 1.2-compliant device that we think can do DDR.
2316 * (This includes all existing switch chips as of Oct 2007.)
2317 * 1.2-compliant devices go directly to DDR prior to reaching INIT
2318 */
2319static void try_auto_neg(struct ipath_devdata *dd)
2320{
2321 /*
2322 * required for older non-IB1.2 DDR switches. Newer
2323 * non-IB-compliant switches don't need it, but so far,
2324 * aren't bothered by it either. "Magic constant"
2325 */
2326 ipath_write_kreg(dd, IPATH_KREG_OFFSET(IBNCModeCtrl),
2327 0x3b9dc07);
2328 dd->ipath_flags |= IPATH_IB_AUTONEG_INPROG;
2329 ipath_autoneg_send(dd, 0);
2330 set_speed_fast(dd, IPATH_IB_DDR);
2331 ipath_toggle_rclkrls(dd);
2332 /* 2 msec is minimum length of a poll cycle */
2333 schedule_delayed_work(&dd->ipath_autoneg_work,
2334 msecs_to_jiffies(2));
2335}
2336
2337
2338static int ipath_7220_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs)
2339{
2340 int ret = 0;
2341 u32 ltstate = ipath_ib_linkstate(dd, ibcs);
2342
2343 dd->ipath_link_width_active =
2344 ((ibcs >> IBA7220_IBCS_LINKWIDTH_SHIFT) & 1) ?
2345 IB_WIDTH_4X : IB_WIDTH_1X;
2346 dd->ipath_link_speed_active =
2347 ((ibcs >> IBA7220_IBCS_LINKSPEED_SHIFT) & 1) ?
2348 IPATH_IB_DDR : IPATH_IB_SDR;
2349
2350 if (!ibup) {
2351 /*
2352 * when link goes down we don't want aeq running, so it
2353 * won't't interfere with IBC training, etc., and we need
2354 * to go back to the static SerDes preset values
2355 */
2356 if (dd->ipath_x1_fix_tries &&
2357 ltstate <= INFINIPATH_IBCS_LT_STATE_SLEEPQUIET &&
2358 ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP)
2359 dd->ipath_x1_fix_tries = 0;
2360 if (!(dd->ipath_flags & (IPATH_IB_AUTONEG_FAILED |
2361 IPATH_IB_AUTONEG_INPROG)))
2362 set_speed_fast(dd, dd->ipath_link_speed_enabled);
2363 if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)) {
2364 ipath_cdbg(VERBOSE, "Setting RXEQ defaults\n");
2365 ipath_sd7220_presets(dd);
2366 }
2367 /* this might better in ipath_sd7220_presets() */
2368 ipath_set_relock_poll(dd, ibup);
2369 } else {
2370 if (ipath_compat_ddr_negotiate &&
2371 !(dd->ipath_flags & (IPATH_IB_AUTONEG_FAILED |
2372 IPATH_IB_AUTONEG_INPROG)) &&
2373 dd->ipath_link_speed_active == IPATH_IB_SDR &&
2374 (dd->ipath_link_speed_enabled &
2375 (IPATH_IB_DDR | IPATH_IB_SDR)) ==
2376 (IPATH_IB_DDR | IPATH_IB_SDR) &&
2377 dd->ipath_autoneg_tries < IPATH_AUTONEG_TRIES) {
2378 /* we are SDR, and DDR auto-negotiation enabled */
2379 ++dd->ipath_autoneg_tries;
2380 ipath_dbg("DDR negotiation try, %u/%u\n",
2381 dd->ipath_autoneg_tries,
2382 IPATH_AUTONEG_TRIES);
2383 try_auto_neg(dd);
2384 ret = 1; /* no other IB status change processing */
2385 } else if ((dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)
2386 && dd->ipath_link_speed_active == IPATH_IB_SDR) {
2387 ipath_autoneg_send(dd, 1);
2388 set_speed_fast(dd, IPATH_IB_DDR);
2389 udelay(2);
2390 ipath_toggle_rclkrls(dd);
2391 ret = 1; /* no other IB status change processing */
2392 } else {
2393 if ((dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) &&
2394 (dd->ipath_link_speed_active & IPATH_IB_DDR)) {
2395 ipath_dbg("Got to INIT with DDR autoneg\n");
2396 dd->ipath_flags &= ~(IPATH_IB_AUTONEG_INPROG
2397 | IPATH_IB_AUTONEG_FAILED);
2398 dd->ipath_autoneg_tries = 0;
2399 /* re-enable SDR, for next link down */
2400 set_speed_fast(dd,
2401 dd->ipath_link_speed_enabled);
2402 wake_up(&dd->ipath_autoneg_wait);
2403 } else if (dd->ipath_flags & IPATH_IB_AUTONEG_FAILED) {
2404 /*
2405 * clear autoneg failure flag, and do setup
2406 * so we'll try next time link goes down and
2407 * back to INIT (possibly connected to different
2408 * device).
2409 */
2410 ipath_dbg("INIT %sDR after autoneg failure\n",
2411 (dd->ipath_link_speed_active &
2412 IPATH_IB_DDR) ? "D" : "S");
2413 dd->ipath_flags &= ~IPATH_IB_AUTONEG_FAILED;
2414 dd->ipath_ibcddrctrl |=
2415 IBA7220_IBC_IBTA_1_2_MASK;
2416 ipath_write_kreg(dd,
2417 IPATH_KREG_OFFSET(IBNCModeCtrl), 0);
2418 }
2419 }
2420 /*
2421 * if we are in 1X, and are in autoneg width, it
2422 * could be due to an xgxs problem, so if we haven't
2423 * already tried, try twice to get to 4X; if we
2424 * tried, and couldn't, report it, since it will
2425 * probably not be what is desired.
2426 */
2427 if ((dd->ipath_link_width_enabled & (IB_WIDTH_1X |
2428 IB_WIDTH_4X)) == (IB_WIDTH_1X | IB_WIDTH_4X)
2429 && dd->ipath_link_width_active == IB_WIDTH_1X
2430 && dd->ipath_x1_fix_tries < 3) {
2431 if (++dd->ipath_x1_fix_tries == 3)
2432 dev_info(&dd->pcidev->dev,
2433 "IB link is in 1X mode\n");
2434 else {
2435 ipath_cdbg(VERBOSE, "IB 1X in "
2436 "auto-width, try %u to be "
2437 "sure it's really 1X; "
2438 "ltstate %u\n",
2439 dd->ipath_x1_fix_tries,
2440 ltstate);
2441 dd->ipath_f_xgxs_reset(dd);
2442 ret = 1; /* skip other processing */
2443 }
2444 }
2445
2446 if (!ret) {
2447 dd->delay_mult = rate_to_delay
2448 [(ibcs >> IBA7220_IBCS_LINKSPEED_SHIFT) & 1]
2449 [(ibcs >> IBA7220_IBCS_LINKWIDTH_SHIFT) & 1];
2450
2451 ipath_set_relock_poll(dd, ibup);
2452 }
2453 }
2454
2455 if (!ret)
2456 ipath_setup_7220_setextled(dd, ipath_ib_linkstate(dd, ibcs),
2457 ltstate);
2458 return ret;
2459}
2460
2461
2462/*
2463 * Handle the empirically determined mechanism for auto-negotiation
2464 * of DDR speed with switches.
2465 */
2466static void autoneg_work(struct work_struct *work)
2467{
2468 struct ipath_devdata *dd;
2469 u64 startms;
2470 u32 lastlts, i;
2471
2472 dd = container_of(work, struct ipath_devdata,
2473 ipath_autoneg_work.work);
2474
2475 startms = jiffies_to_msecs(jiffies);
2476
2477 /*
2478 * busy wait for this first part, it should be at most a
2479 * few hundred usec, since we scheduled ourselves for 2msec.
2480 */
2481 for (i = 0; i < 25; i++) {
2482 lastlts = ipath_ib_linktrstate(dd, dd->ipath_lastibcstat);
2483 if (lastlts == INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
2484 ipath_set_linkstate(dd, IPATH_IB_LINKDOWN_DISABLE);
2485 break;
2486 }
2487 udelay(100);
2488 }
2489
2490 if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG))
2491 goto done; /* we got there early or told to stop */
2492
2493 /* we expect this to timeout */
2494 if (wait_event_timeout(dd->ipath_autoneg_wait,
2495 !(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG),
2496 msecs_to_jiffies(90)))
2497 goto done;
2498
2499 ipath_toggle_rclkrls(dd);
2500
2501 /* we expect this to timeout */
2502 if (wait_event_timeout(dd->ipath_autoneg_wait,
2503 !(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG),
2504 msecs_to_jiffies(1700)))
2505 goto done;
2506
2507 set_speed_fast(dd, IPATH_IB_SDR);
2508 ipath_toggle_rclkrls(dd);
2509
2510 /*
2511 * wait up to 250 msec for link to train and get to INIT at DDR;
2512 * this should terminate early
2513 */
2514 wait_event_timeout(dd->ipath_autoneg_wait,
2515 !(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG),
2516 msecs_to_jiffies(250));
2517done:
2518 if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) {
2519 ipath_dbg("Did not get to DDR INIT (%x) after %Lu msecs\n",
2520 ipath_ib_state(dd, dd->ipath_lastibcstat),
2521 jiffies_to_msecs(jiffies)-startms);
2522 dd->ipath_flags &= ~IPATH_IB_AUTONEG_INPROG;
2523 if (dd->ipath_autoneg_tries == IPATH_AUTONEG_TRIES) {
2524 dd->ipath_flags |= IPATH_IB_AUTONEG_FAILED;
2525 ipath_dbg("Giving up on DDR until next IB "
2526 "link Down\n");
2527 dd->ipath_autoneg_tries = 0;
2528 }
2529 set_speed_fast(dd, dd->ipath_link_speed_enabled);
2530 }
2531}
2532
2533
2534/**
2535 * ipath_init_iba7220_funcs - set up the chip-specific function pointers
2536 * @dd: the infinipath device
2537 *
2538 * This is global, and is called directly at init to set up the
2539 * chip-specific function pointers for later use.
2540 */
2541void ipath_init_iba7220_funcs(struct ipath_devdata *dd)
2542{
2543 dd->ipath_f_intrsetup = ipath_7220_intconfig;
2544 dd->ipath_f_bus = ipath_setup_7220_config;
2545 dd->ipath_f_reset = ipath_setup_7220_reset;
2546 dd->ipath_f_get_boardname = ipath_7220_boardname;
2547 dd->ipath_f_init_hwerrors = ipath_7220_init_hwerrors;
2548 dd->ipath_f_early_init = ipath_7220_early_init;
2549 dd->ipath_f_handle_hwerrors = ipath_7220_handle_hwerrors;
2550 dd->ipath_f_quiet_serdes = ipath_7220_quiet_serdes;
2551 dd->ipath_f_bringup_serdes = ipath_7220_bringup_serdes;
2552 dd->ipath_f_clear_tids = ipath_7220_clear_tids;
2553 dd->ipath_f_put_tid = ipath_7220_put_tid;
2554 dd->ipath_f_cleanup = ipath_setup_7220_cleanup;
2555 dd->ipath_f_setextled = ipath_setup_7220_setextled;
2556 dd->ipath_f_get_base_info = ipath_7220_get_base_info;
2557 dd->ipath_f_free_irq = ipath_7220_free_irq;
2558 dd->ipath_f_tidtemplate = ipath_7220_tidtemplate;
2559 dd->ipath_f_intr_fallback = ipath_7220_intr_fallback;
2560 dd->ipath_f_xgxs_reset = ipath_7220_xgxs_reset;
2561 dd->ipath_f_get_ib_cfg = ipath_7220_get_ib_cfg;
2562 dd->ipath_f_set_ib_cfg = ipath_7220_set_ib_cfg;
2563 dd->ipath_f_config_jint = ipath_7220_config_jint;
2564 dd->ipath_f_config_ports = ipath_7220_config_ports;
2565 dd->ipath_f_read_counters = ipath_7220_read_counters;
2566 dd->ipath_f_get_msgheader = ipath_7220_get_msgheader;
2567 dd->ipath_f_ib_updown = ipath_7220_ib_updown;
2568
2569 /* initialize chip-specific variables */
2570 ipath_init_7220_variables(dd);
2571}
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c
index 4471674975cd..27dd89476660 100644
--- a/drivers/infiniband/hw/ipath/ipath_init_chip.c
+++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -155,24 +155,13 @@ static int bringup_link(struct ipath_devdata *dd)
155 dd->ipath_control); 155 dd->ipath_control);
156 156
157 /* 157 /*
158 * Note that prior to try 14 or 15 of IB, the credit scaling 158 * set initial max size pkt IBC will send, including ICRC; it's the
159 * wasn't working, because it was swapped for writes with the 159 * PIO buffer size in dwords, less 1; also see ipath_set_mtu()
160 * 1 bit default linkstate field
161 */ 160 */
161 val = (dd->ipath_ibmaxlen >> 2) + 1;
162 ibc = val << dd->ibcc_mpl_shift;
162 163
163 /* ignore pbc and align word */ 164 /* flowcontrolwatermark is in units of KBytes */
164 val = dd->ipath_piosize2k - 2 * sizeof(u32);
165 /*
166 * for ICRC, which we only send in diag test pkt mode, and we
167 * don't need to worry about that for mtu
168 */
169 val += 1;
170 /*
171 * Set the IBC maxpktlength to the size of our pio buffers the
172 * maxpktlength is in words. This is *not* the IB data MTU.
173 */
174 ibc = (val / sizeof(u32)) << INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
175 /* in KB */
176 ibc |= 0x5ULL << INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT; 165 ibc |= 0x5ULL << INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT;
177 /* 166 /*
178 * How often flowctrl sent. More or less in usecs; balance against 167 * How often flowctrl sent. More or less in usecs; balance against
@@ -191,10 +180,13 @@ static int bringup_link(struct ipath_devdata *dd)
191 /* 180 /*
192 * Want to start out with both LINKCMD and LINKINITCMD in NOP 181 * Want to start out with both LINKCMD and LINKINITCMD in NOP
193 * (0 and 0). Don't put linkinitcmd in ipath_ibcctrl, want that 182 * (0 and 0). Don't put linkinitcmd in ipath_ibcctrl, want that
194 * to stay a NOP 183 * to stay a NOP. Flag that we are disabled, for the (unlikely)
184 * case that some recovery path is trying to bring the link up
185 * before we are ready.
195 */ 186 */
196 ibc |= INFINIPATH_IBCC_LINKINITCMD_DISABLE << 187 ibc |= INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
197 INFINIPATH_IBCC_LINKINITCMD_SHIFT; 188 INFINIPATH_IBCC_LINKINITCMD_SHIFT;
189 dd->ipath_flags |= IPATH_IB_LINK_DISABLED;
198 ipath_cdbg(VERBOSE, "Writing 0x%llx to ibcctrl\n", 190 ipath_cdbg(VERBOSE, "Writing 0x%llx to ibcctrl\n",
199 (unsigned long long) ibc); 191 (unsigned long long) ibc);
200 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, ibc); 192 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, ibc);
@@ -227,17 +219,26 @@ static struct ipath_portdata *create_portdata0(struct ipath_devdata *dd)
227 pd->port_cnt = 1; 219 pd->port_cnt = 1;
228 /* The port 0 pkey table is used by the layer interface. */ 220 /* The port 0 pkey table is used by the layer interface. */
229 pd->port_pkeys[0] = IPATH_DEFAULT_P_KEY; 221 pd->port_pkeys[0] = IPATH_DEFAULT_P_KEY;
222 pd->port_seq_cnt = 1;
230 } 223 }
231 return pd; 224 return pd;
232} 225}
233 226
234static int init_chip_first(struct ipath_devdata *dd, 227static int init_chip_first(struct ipath_devdata *dd)
235 struct ipath_portdata **pdp)
236{ 228{
237 struct ipath_portdata *pd = NULL; 229 struct ipath_portdata *pd;
238 int ret = 0; 230 int ret = 0;
239 u64 val; 231 u64 val;
240 232
233 spin_lock_init(&dd->ipath_kernel_tid_lock);
234 spin_lock_init(&dd->ipath_user_tid_lock);
235 spin_lock_init(&dd->ipath_sendctrl_lock);
236 spin_lock_init(&dd->ipath_sdma_lock);
237 spin_lock_init(&dd->ipath_gpio_lock);
238 spin_lock_init(&dd->ipath_eep_st_lock);
239 spin_lock_init(&dd->ipath_sdepb_lock);
240 mutex_init(&dd->ipath_eep_lock);
241
241 /* 242 /*
242 * skip cfgports stuff because we are not allocating memory, 243 * skip cfgports stuff because we are not allocating memory,
243 * and we don't want problems if the portcnt changed due to 244 * and we don't want problems if the portcnt changed due to
@@ -250,12 +251,14 @@ static int init_chip_first(struct ipath_devdata *dd,
250 else if (ipath_cfgports <= dd->ipath_portcnt) { 251 else if (ipath_cfgports <= dd->ipath_portcnt) {
251 dd->ipath_cfgports = ipath_cfgports; 252 dd->ipath_cfgports = ipath_cfgports;
252 ipath_dbg("Configured to use %u ports out of %u in chip\n", 253 ipath_dbg("Configured to use %u ports out of %u in chip\n",
253 dd->ipath_cfgports, dd->ipath_portcnt); 254 dd->ipath_cfgports, ipath_read_kreg32(dd,
255 dd->ipath_kregs->kr_portcnt));
254 } else { 256 } else {
255 dd->ipath_cfgports = dd->ipath_portcnt; 257 dd->ipath_cfgports = dd->ipath_portcnt;
256 ipath_dbg("Tried to configured to use %u ports; chip " 258 ipath_dbg("Tried to configured to use %u ports; chip "
257 "only supports %u\n", ipath_cfgports, 259 "only supports %u\n", ipath_cfgports,
258 dd->ipath_portcnt); 260 ipath_read_kreg32(dd,
261 dd->ipath_kregs->kr_portcnt));
259 } 262 }
260 /* 263 /*
261 * Allocate full portcnt array, rather than just cfgports, because 264 * Allocate full portcnt array, rather than just cfgports, because
@@ -295,12 +298,9 @@ static int init_chip_first(struct ipath_devdata *dd,
295 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiosize); 298 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiosize);
296 dd->ipath_piosize2k = val & ~0U; 299 dd->ipath_piosize2k = val & ~0U;
297 dd->ipath_piosize4k = val >> 32; 300 dd->ipath_piosize4k = val >> 32;
298 /* 301 if (dd->ipath_piosize4k == 0 && ipath_mtu4096)
299 * Note: the chips support a maximum MTU of 4096, but the driver 302 ipath_mtu4096 = 0; /* 4KB not supported by this chip */
300 * hasn't implemented this feature yet, so set the initial value 303 dd->ipath_ibmtu = ipath_mtu4096 ? 4096 : 2048;
301 * to 2048.
302 */
303 dd->ipath_ibmtu = 2048;
304 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiobufcnt); 304 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiobufcnt);
305 dd->ipath_piobcnt2k = val & ~0U; 305 dd->ipath_piobcnt2k = val & ~0U;
306 dd->ipath_piobcnt4k = val >> 32; 306 dd->ipath_piobcnt4k = val >> 32;
@@ -328,43 +328,46 @@ static int init_chip_first(struct ipath_devdata *dd,
328 else ipath_dbg("%u 2k piobufs @ %p\n", 328 else ipath_dbg("%u 2k piobufs @ %p\n",
329 dd->ipath_piobcnt2k, dd->ipath_pio2kbase); 329 dd->ipath_piobcnt2k, dd->ipath_pio2kbase);
330 330
331 spin_lock_init(&dd->ipath_tid_lock);
332 spin_lock_init(&dd->ipath_sendctrl_lock);
333 spin_lock_init(&dd->ipath_gpio_lock);
334 spin_lock_init(&dd->ipath_eep_st_lock);
335 mutex_init(&dd->ipath_eep_lock);
336
337done: 331done:
338 *pdp = pd;
339 return ret; 332 return ret;
340} 333}
341 334
342/** 335/**
343 * init_chip_reset - re-initialize after a reset, or enable 336 * init_chip_reset - re-initialize after a reset, or enable
344 * @dd: the infinipath device 337 * @dd: the infinipath device
345 * @pdp: output for port data
346 * 338 *
347 * sanity check at least some of the values after reset, and 339 * sanity check at least some of the values after reset, and
348 * ensure no receive or transmit (explictly, in case reset 340 * ensure no receive or transmit (explictly, in case reset
349 * failed 341 * failed
350 */ 342 */
351static int init_chip_reset(struct ipath_devdata *dd, 343static int init_chip_reset(struct ipath_devdata *dd)
352 struct ipath_portdata **pdp)
353{ 344{
354 u32 rtmp; 345 u32 rtmp;
346 int i;
347 unsigned long flags;
348
349 /*
350 * ensure chip does no sends or receives, tail updates, or
351 * pioavail updates while we re-initialize
352 */
353 dd->ipath_rcvctrl &= ~(1ULL << dd->ipath_r_tailupd_shift);
354 for (i = 0; i < dd->ipath_portcnt; i++) {
355 clear_bit(dd->ipath_r_portenable_shift + i,
356 &dd->ipath_rcvctrl);
357 clear_bit(dd->ipath_r_intravail_shift + i,
358 &dd->ipath_rcvctrl);
359 }
360 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
361 dd->ipath_rcvctrl);
355 362
356 *pdp = dd->ipath_pd[0]; 363 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
357 /* ensure chip does no sends or receives while we re-initialize */ 364 dd->ipath_sendctrl = 0U; /* no sdma, etc */
358 dd->ipath_control = dd->ipath_sendctrl = dd->ipath_rcvctrl = 0U;
359 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl);
360 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl); 365 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
361 ipath_write_kreg(dd, dd->ipath_kregs->kr_control, dd->ipath_control); 366 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
367 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
368
369 ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL);
362 370
363 rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
364 if (dd->ipath_portcnt != rtmp)
365 dev_info(&dd->pcidev->dev, "portcnt was %u before "
366 "reset, now %u, using original\n",
367 dd->ipath_portcnt, rtmp);
368 rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt); 371 rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt);
369 if (rtmp != dd->ipath_rcvtidcnt) 372 if (rtmp != dd->ipath_rcvtidcnt)
370 dev_info(&dd->pcidev->dev, "tidcnt was %u before " 373 dev_info(&dd->pcidev->dev, "tidcnt was %u before "
@@ -467,10 +470,10 @@ static void init_shadow_tids(struct ipath_devdata *dd)
467 dd->ipath_physshadow = addrs; 470 dd->ipath_physshadow = addrs;
468} 471}
469 472
470static void enable_chip(struct ipath_devdata *dd, 473static void enable_chip(struct ipath_devdata *dd, int reinit)
471 struct ipath_portdata *pd, int reinit)
472{ 474{
473 u32 val; 475 u32 val;
476 u64 rcvmask;
474 unsigned long flags; 477 unsigned long flags;
475 int i; 478 int i;
476 479
@@ -484,17 +487,28 @@ static void enable_chip(struct ipath_devdata *dd,
484 /* Enable PIO send, and update of PIOavail regs to memory. */ 487 /* Enable PIO send, and update of PIOavail regs to memory. */
485 dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE | 488 dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE |
486 INFINIPATH_S_PIOBUFAVAILUPD; 489 INFINIPATH_S_PIOBUFAVAILUPD;
490
491 /*
492 * Set the PIO avail update threshold to host memory
493 * on chips that support it.
494 */
495 if (dd->ipath_pioupd_thresh)
496 dd->ipath_sendctrl |= dd->ipath_pioupd_thresh
497 << INFINIPATH_S_UPDTHRESH_SHIFT;
487 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl); 498 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
488 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 499 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
489 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); 500 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
490 501
491 /* 502 /*
492 * enable port 0 receive, and receive interrupt. other ports 503 * Enable kernel ports' receive and receive interrupt.
493 * done as user opens and inits them. 504 * Other ports done as user opens and inits them.
494 */ 505 */
495 dd->ipath_rcvctrl = (1ULL << dd->ipath_r_tailupd_shift) | 506 rcvmask = 1ULL;
496 (1ULL << dd->ipath_r_portenable_shift) | 507 dd->ipath_rcvctrl |= (rcvmask << dd->ipath_r_portenable_shift) |
497 (1ULL << dd->ipath_r_intravail_shift); 508 (rcvmask << dd->ipath_r_intravail_shift);
509 if (!(dd->ipath_flags & IPATH_NODMA_RTAIL))
510 dd->ipath_rcvctrl |= (1ULL << dd->ipath_r_tailupd_shift);
511
498 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 512 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
499 dd->ipath_rcvctrl); 513 dd->ipath_rcvctrl);
500 514
@@ -505,16 +519,16 @@ static void enable_chip(struct ipath_devdata *dd,
505 dd->ipath_flags |= IPATH_INITTED; 519 dd->ipath_flags |= IPATH_INITTED;
506 520
507 /* 521 /*
508 * init our shadow copies of head from tail values, and write 522 * Init our shadow copies of head from tail values,
509 * head values to match. 523 * and write head values to match.
510 */ 524 */
511 val = ipath_read_ureg32(dd, ur_rcvegrindextail, 0); 525 val = ipath_read_ureg32(dd, ur_rcvegrindextail, 0);
512 (void)ipath_write_ureg(dd, ur_rcvegrindexhead, val, 0); 526 ipath_write_ureg(dd, ur_rcvegrindexhead, val, 0);
513 527
514 /* Initialize so we interrupt on next packet received */ 528 /* Initialize so we interrupt on next packet received */
515 (void)ipath_write_ureg(dd, ur_rcvhdrhead, 529 ipath_write_ureg(dd, ur_rcvhdrhead,
516 dd->ipath_rhdrhead_intr_off | 530 dd->ipath_rhdrhead_intr_off |
517 dd->ipath_pd[0]->port_head, 0); 531 dd->ipath_pd[0]->port_head, 0);
518 532
519 /* 533 /*
520 * by now pioavail updates to memory should have occurred, so 534 * by now pioavail updates to memory should have occurred, so
@@ -523,25 +537,26 @@ static void enable_chip(struct ipath_devdata *dd,
523 * initial values of the generation bit correct. 537 * initial values of the generation bit correct.
524 */ 538 */
525 for (i = 0; i < dd->ipath_pioavregs; i++) { 539 for (i = 0; i < dd->ipath_pioavregs; i++) {
526 __le64 val; 540 __le64 pioavail;
527 541
528 /* 542 /*
529 * Chip Errata bug 6641; even and odd qwords>3 are swapped. 543 * Chip Errata bug 6641; even and odd qwords>3 are swapped.
530 */ 544 */
531 if (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) 545 if (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS))
532 val = dd->ipath_pioavailregs_dma[i ^ 1]; 546 pioavail = dd->ipath_pioavailregs_dma[i ^ 1];
533 else 547 else
534 val = dd->ipath_pioavailregs_dma[i]; 548 pioavail = dd->ipath_pioavailregs_dma[i];
535 dd->ipath_pioavailshadow[i] = le64_to_cpu(val); 549 dd->ipath_pioavailshadow[i] = le64_to_cpu(pioavail) |
550 (~dd->ipath_pioavailkernel[i] <<
551 INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT);
536 } 552 }
537 /* can get counters, stats, etc. */ 553 /* can get counters, stats, etc. */
538 dd->ipath_flags |= IPATH_PRESENT; 554 dd->ipath_flags |= IPATH_PRESENT;
539} 555}
540 556
541static int init_housekeeping(struct ipath_devdata *dd, 557static int init_housekeeping(struct ipath_devdata *dd, int reinit)
542 struct ipath_portdata **pdp, int reinit)
543{ 558{
544 char boardn[32]; 559 char boardn[40];
545 int ret = 0; 560 int ret = 0;
546 561
547 /* 562 /*
@@ -600,18 +615,9 @@ static int init_housekeeping(struct ipath_devdata *dd,
600 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, 615 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
601 INFINIPATH_E_RESET); 616 INFINIPATH_E_RESET);
602 617
603 if (reinit) 618 ipath_cdbg(VERBOSE, "Revision %llx (PCI %x)\n",
604 ret = init_chip_reset(dd, pdp); 619 (unsigned long long) dd->ipath_revision,
605 else 620 dd->ipath_pcirev);
606 ret = init_chip_first(dd, pdp);
607
608 if (ret)
609 goto done;
610
611 ipath_cdbg(VERBOSE, "Revision %llx (PCI %x), %u ports, %u tids, "
612 "%u egrtids\n", (unsigned long long) dd->ipath_revision,
613 dd->ipath_pcirev, dd->ipath_portcnt, dd->ipath_rcvtidcnt,
614 dd->ipath_rcvegrcnt);
615 621
616 if (((dd->ipath_revision >> INFINIPATH_R_SOFTWARE_SHIFT) & 622 if (((dd->ipath_revision >> INFINIPATH_R_SOFTWARE_SHIFT) &
617 INFINIPATH_R_SOFTWARE_MASK) != IPATH_CHIP_SWVERSION) { 623 INFINIPATH_R_SOFTWARE_MASK) != IPATH_CHIP_SWVERSION) {
@@ -650,10 +656,39 @@ static int init_housekeeping(struct ipath_devdata *dd,
650 656
651 ipath_dbg("%s", dd->ipath_boardversion); 657 ipath_dbg("%s", dd->ipath_boardversion);
652 658
659 if (ret)
660 goto done;
661
662 if (reinit)
663 ret = init_chip_reset(dd);
664 else
665 ret = init_chip_first(dd);
666
653done: 667done:
654 return ret; 668 return ret;
655} 669}
656 670
671static void verify_interrupt(unsigned long opaque)
672{
673 struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
674
675 if (!dd)
676 return; /* being torn down */
677
678 /*
679 * If we don't have any interrupts, let the user know and
680 * don't bother checking again.
681 */
682 if (dd->ipath_int_counter == 0) {
683 if (!dd->ipath_f_intr_fallback(dd))
684 dev_err(&dd->pcidev->dev, "No interrupts detected, "
685 "not usable.\n");
686 else /* re-arm the timer to see if fallback works */
687 mod_timer(&dd->ipath_intrchk_timer, jiffies + HZ/2);
688 } else
689 ipath_cdbg(VERBOSE, "%u interrupts at timer check\n",
690 dd->ipath_int_counter);
691}
657 692
658/** 693/**
659 * ipath_init_chip - do the actual initialization sequence on the chip 694 * ipath_init_chip - do the actual initialization sequence on the chip
@@ -676,11 +711,11 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
676 u32 val32, kpiobufs; 711 u32 val32, kpiobufs;
677 u32 piobufs, uports; 712 u32 piobufs, uports;
678 u64 val; 713 u64 val;
679 struct ipath_portdata *pd = NULL; /* keep gcc4 happy */ 714 struct ipath_portdata *pd;
680 gfp_t gfp_flags = GFP_USER | __GFP_COMP; 715 gfp_t gfp_flags = GFP_USER | __GFP_COMP;
681 unsigned long flags; 716 unsigned long flags;
682 717
683 ret = init_housekeeping(dd, &pd, reinit); 718 ret = init_housekeeping(dd, reinit);
684 if (ret) 719 if (ret)
685 goto done; 720 goto done;
686 721
@@ -700,7 +735,7 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
700 * we now use routines that backend onto __get_free_pages, the 735 * we now use routines that backend onto __get_free_pages, the
701 * rest would be wasted. 736 * rest would be wasted.
702 */ 737 */
703 dd->ipath_rcvhdrcnt = dd->ipath_rcvegrcnt; 738 dd->ipath_rcvhdrcnt = max(dd->ipath_p0_rcvegrcnt, dd->ipath_rcvegrcnt);
704 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrcnt, 739 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrcnt,
705 dd->ipath_rcvhdrcnt); 740 dd->ipath_rcvhdrcnt);
706 741
@@ -731,8 +766,8 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
731 if (kpiobufs + (uports * IPATH_MIN_USER_PORT_BUFCNT) > piobufs) { 766 if (kpiobufs + (uports * IPATH_MIN_USER_PORT_BUFCNT) > piobufs) {
732 int i = (int) piobufs - 767 int i = (int) piobufs -
733 (int) (uports * IPATH_MIN_USER_PORT_BUFCNT); 768 (int) (uports * IPATH_MIN_USER_PORT_BUFCNT);
734 if (i < 0) 769 if (i < 1)
735 i = 0; 770 i = 1;
736 dev_info(&dd->pcidev->dev, "Allocating %d PIO bufs of " 771 dev_info(&dd->pcidev->dev, "Allocating %d PIO bufs of "
737 "%d for kernel leaves too few for %d user ports " 772 "%d for kernel leaves too few for %d user ports "
738 "(%d each); using %u\n", kpiobufs, 773 "(%d each); using %u\n", kpiobufs,
@@ -751,24 +786,40 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
751 ipath_dbg("allocating %u pbufs/port leaves %u unused, " 786 ipath_dbg("allocating %u pbufs/port leaves %u unused, "
752 "add to kernel\n", dd->ipath_pbufsport, val32); 787 "add to kernel\n", dd->ipath_pbufsport, val32);
753 dd->ipath_lastport_piobuf -= val32; 788 dd->ipath_lastport_piobuf -= val32;
789 kpiobufs += val32;
754 ipath_dbg("%u pbufs/port leaves %u unused, add to kernel\n", 790 ipath_dbg("%u pbufs/port leaves %u unused, add to kernel\n",
755 dd->ipath_pbufsport, val32); 791 dd->ipath_pbufsport, val32);
756 } 792 }
757 dd->ipath_lastpioindex = dd->ipath_lastport_piobuf; 793 dd->ipath_lastpioindex = 0;
794 dd->ipath_lastpioindexl = dd->ipath_piobcnt2k;
795 ipath_chg_pioavailkernel(dd, 0, piobufs, 1);
758 ipath_cdbg(VERBOSE, "%d PIO bufs for kernel out of %d total %u " 796 ipath_cdbg(VERBOSE, "%d PIO bufs for kernel out of %d total %u "
759 "each for %u user ports\n", kpiobufs, 797 "each for %u user ports\n", kpiobufs,
760 piobufs, dd->ipath_pbufsport, uports); 798 piobufs, dd->ipath_pbufsport, uports);
799 if (dd->ipath_pioupd_thresh) {
800 if (dd->ipath_pbufsport < dd->ipath_pioupd_thresh)
801 dd->ipath_pioupd_thresh = dd->ipath_pbufsport;
802 if (kpiobufs < dd->ipath_pioupd_thresh)
803 dd->ipath_pioupd_thresh = kpiobufs;
804 }
805
806 ret = dd->ipath_f_early_init(dd);
807 if (ret) {
808 ipath_dev_err(dd, "Early initialization failure\n");
809 goto done;
810 }
761 811
762 dd->ipath_f_early_init(dd);
763 /* 812 /*
764 * cancel any possible active sends from early driver load. 813 * Cancel any possible active sends from early driver load.
765 * Follows early_init because some chips have to initialize 814 * Follows early_init because some chips have to initialize
766 * PIO buffers in early_init to avoid false parity errors. 815 * PIO buffers in early_init to avoid false parity errors.
767 */ 816 */
768 ipath_cancel_sends(dd, 0); 817 ipath_cancel_sends(dd, 0);
769 818
770 /* early_init sets rcvhdrentsize and rcvhdrsize, so this must be 819 /*
771 * done after early_init */ 820 * Early_init sets rcvhdrentsize and rcvhdrsize, so this must be
821 * done after early_init.
822 */
772 dd->ipath_hdrqlast = 823 dd->ipath_hdrqlast =
773 dd->ipath_rcvhdrentsize * (dd->ipath_rcvhdrcnt - 1); 824 dd->ipath_rcvhdrentsize * (dd->ipath_rcvhdrcnt - 1);
774 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrentsize, 825 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrentsize,
@@ -783,8 +834,8 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
783 goto done; 834 goto done;
784 } 835 }
785 836
786 (void)ipath_write_kreg(dd, dd->ipath_kregs->kr_sendpioavailaddr, 837 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendpioavailaddr,
787 dd->ipath_pioavailregs_phys); 838 dd->ipath_pioavailregs_phys);
788 /* 839 /*
789 * this is to detect s/w errors, which the h/w works around by 840 * this is to detect s/w errors, which the h/w works around by
790 * ignoring the low 6 bits of address, if it wasn't aligned. 841 * ignoring the low 6 bits of address, if it wasn't aligned.
@@ -843,58 +894,65 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
843 /* enable errors that are masked, at least this first time. */ 894 /* enable errors that are masked, at least this first time. */
844 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 895 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
845 ~dd->ipath_maskederrs); 896 ~dd->ipath_maskederrs);
846 dd->ipath_errormask = ipath_read_kreg64(dd, 897 dd->ipath_maskederrs = 0; /* don't re-enable ignored in timer */
847 dd->ipath_kregs->kr_errormask); 898 dd->ipath_errormask =
899 ipath_read_kreg64(dd, dd->ipath_kregs->kr_errormask);
848 /* clear any interrupts up to this point (ints still not enabled) */ 900 /* clear any interrupts up to this point (ints still not enabled) */
849 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL); 901 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
850 902
903 dd->ipath_f_tidtemplate(dd);
904
851 /* 905 /*
852 * Set up the port 0 (kernel) rcvhdr q and egr TIDs. If doing 906 * Set up the port 0 (kernel) rcvhdr q and egr TIDs. If doing
853 * re-init, the simplest way to handle this is to free 907 * re-init, the simplest way to handle this is to free
854 * existing, and re-allocate. 908 * existing, and re-allocate.
855 * Need to re-create rest of port 0 portdata as well. 909 * Need to re-create rest of port 0 portdata as well.
856 */ 910 */
911 pd = dd->ipath_pd[0];
857 if (reinit) { 912 if (reinit) {
858 /* Alloc and init new ipath_portdata for port0, 913 struct ipath_portdata *npd;
914
915 /*
916 * Alloc and init new ipath_portdata for port0,
859 * Then free old pd. Could lead to fragmentation, but also 917 * Then free old pd. Could lead to fragmentation, but also
860 * makes later support for hot-swap easier. 918 * makes later support for hot-swap easier.
861 */ 919 */
862 struct ipath_portdata *npd;
863 npd = create_portdata0(dd); 920 npd = create_portdata0(dd);
864 if (npd) { 921 if (npd) {
865 ipath_free_pddata(dd, pd); 922 ipath_free_pddata(dd, pd);
866 dd->ipath_pd[0] = pd = npd; 923 dd->ipath_pd[0] = npd;
924 pd = npd;
867 } else { 925 } else {
868 ipath_dev_err(dd, "Unable to allocate portdata for" 926 ipath_dev_err(dd, "Unable to allocate portdata"
869 " port 0, failing\n"); 927 " for port 0, failing\n");
870 ret = -ENOMEM; 928 ret = -ENOMEM;
871 goto done; 929 goto done;
872 } 930 }
873 } 931 }
874 dd->ipath_f_tidtemplate(dd);
875 ret = ipath_create_rcvhdrq(dd, pd); 932 ret = ipath_create_rcvhdrq(dd, pd);
876 if (!ret) { 933 if (!ret)
877 dd->ipath_hdrqtailptr =
878 (volatile __le64 *)pd->port_rcvhdrtail_kvaddr;
879 ret = create_port0_egr(dd); 934 ret = create_port0_egr(dd);
880 } 935 if (ret) {
881 if (ret) 936 ipath_dev_err(dd, "failed to allocate kernel port's "
882 ipath_dev_err(dd, "failed to allocate port 0 (kernel) "
883 "rcvhdrq and/or egr bufs\n"); 937 "rcvhdrq and/or egr bufs\n");
938 goto done;
939 }
884 else 940 else
885 enable_chip(dd, pd, reinit); 941 enable_chip(dd, reinit);
886
887 942
888 if (!ret && !reinit) { 943 if (!reinit) {
889 /* used when we close a port, for DMA already in flight at close */ 944 /*
945 * Used when we close a port, for DMA already in flight
946 * at close.
947 */
890 dd->ipath_dummy_hdrq = dma_alloc_coherent( 948 dd->ipath_dummy_hdrq = dma_alloc_coherent(
891 &dd->pcidev->dev, pd->port_rcvhdrq_size, 949 &dd->pcidev->dev, dd->ipath_pd[0]->port_rcvhdrq_size,
892 &dd->ipath_dummy_hdrq_phys, 950 &dd->ipath_dummy_hdrq_phys,
893 gfp_flags); 951 gfp_flags);
894 if (!dd->ipath_dummy_hdrq ) { 952 if (!dd->ipath_dummy_hdrq) {
895 dev_info(&dd->pcidev->dev, 953 dev_info(&dd->pcidev->dev,
896 "Couldn't allocate 0x%lx bytes for dummy hdrq\n", 954 "Couldn't allocate 0x%lx bytes for dummy hdrq\n",
897 pd->port_rcvhdrq_size); 955 dd->ipath_pd[0]->port_rcvhdrq_size);
898 /* fallback to just 0'ing */ 956 /* fallback to just 0'ing */
899 dd->ipath_dummy_hdrq_phys = 0UL; 957 dd->ipath_dummy_hdrq_phys = 0UL;
900 } 958 }
@@ -906,7 +964,7 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
906 */ 964 */
907 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL); 965 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL);
908 966
909 if(!dd->ipath_stats_timer_active) { 967 if (!dd->ipath_stats_timer_active) {
910 /* 968 /*
911 * first init, or after an admin disable/enable 969 * first init, or after an admin disable/enable
912 * set up stats retrieval timer, even if we had errors 970 * set up stats retrieval timer, even if we had errors
@@ -922,6 +980,16 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
922 dd->ipath_stats_timer_active = 1; 980 dd->ipath_stats_timer_active = 1;
923 } 981 }
924 982
983 /* Set up SendDMA if chip supports it */
984 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
985 ret = setup_sdma(dd);
986
987 /* Set up HoL state */
988 init_timer(&dd->ipath_hol_timer);
989 dd->ipath_hol_timer.function = ipath_hol_event;
990 dd->ipath_hol_timer.data = (unsigned long)dd;
991 dd->ipath_hol_state = IPATH_HOL_UP;
992
925done: 993done:
926 if (!ret) { 994 if (!ret) {
927 *dd->ipath_statusp |= IPATH_STATUS_CHIP_PRESENT; 995 *dd->ipath_statusp |= IPATH_STATUS_CHIP_PRESENT;
@@ -934,6 +1002,20 @@ done:
934 0ULL); 1002 0ULL);
935 /* chip is usable; mark it as initialized */ 1003 /* chip is usable; mark it as initialized */
936 *dd->ipath_statusp |= IPATH_STATUS_INITTED; 1004 *dd->ipath_statusp |= IPATH_STATUS_INITTED;
1005
1006 /*
1007 * setup to verify we get an interrupt, and fallback
1008 * to an alternate if necessary and possible
1009 */
1010 if (!reinit) {
1011 init_timer(&dd->ipath_intrchk_timer);
1012 dd->ipath_intrchk_timer.function =
1013 verify_interrupt;
1014 dd->ipath_intrchk_timer.data =
1015 (unsigned long) dd;
1016 }
1017 dd->ipath_intrchk_timer.expires = jiffies + HZ/2;
1018 add_timer(&dd->ipath_intrchk_timer);
937 } else 1019 } else
938 ipath_dev_err(dd, "No interrupts enabled, couldn't " 1020 ipath_dev_err(dd, "No interrupts enabled, couldn't "
939 "setup interrupt address\n"); 1021 "setup interrupt address\n");
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c
index 92e58c921522..1b58f4737c71 100644
--- a/drivers/infiniband/hw/ipath/ipath_intr.c
+++ b/drivers/infiniband/hw/ipath/ipath_intr.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -32,6 +32,7 @@
32 */ 32 */
33 33
34#include <linux/pci.h> 34#include <linux/pci.h>
35#include <linux/delay.h>
35 36
36#include "ipath_kernel.h" 37#include "ipath_kernel.h"
37#include "ipath_verbs.h" 38#include "ipath_verbs.h"
@@ -59,9 +60,11 @@ static void ipath_clrpiobuf(struct ipath_devdata *dd, u32 pnum)
59 dev_info(&dd->pcidev->dev, 60 dev_info(&dd->pcidev->dev,
60 "Rewrite PIO buffer %u, to recover from parity error\n", 61 "Rewrite PIO buffer %u, to recover from parity error\n",
61 pnum); 62 pnum);
62 *pbuf = dwcnt+1; /* no flush required, since already in freeze */ 63
63 while(--dwcnt) 64 /* no flush required, since already in freeze */
64 *pbuf++ = 0; 65 writel(dwcnt + 1, pbuf);
66 while (--dwcnt)
67 writel(0, pbuf++);
65} 68}
66 69
67/* 70/*
@@ -70,7 +73,7 @@ static void ipath_clrpiobuf(struct ipath_devdata *dd, u32 pnum)
70 * If rewrite is true, and bits are set in the sendbufferror registers, 73 * If rewrite is true, and bits are set in the sendbufferror registers,
71 * we'll write to the buffer, for error recovery on parity errors. 74 * we'll write to the buffer, for error recovery on parity errors.
72 */ 75 */
73static void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite) 76void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite)
74{ 77{
75 u32 piobcnt; 78 u32 piobcnt;
76 unsigned long sbuf[4]; 79 unsigned long sbuf[4];
@@ -84,12 +87,14 @@ static void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite)
84 dd, dd->ipath_kregs->kr_sendbuffererror); 87 dd, dd->ipath_kregs->kr_sendbuffererror);
85 sbuf[1] = ipath_read_kreg64( 88 sbuf[1] = ipath_read_kreg64(
86 dd, dd->ipath_kregs->kr_sendbuffererror + 1); 89 dd, dd->ipath_kregs->kr_sendbuffererror + 1);
87 if (piobcnt > 128) { 90 if (piobcnt > 128)
88 sbuf[2] = ipath_read_kreg64( 91 sbuf[2] = ipath_read_kreg64(
89 dd, dd->ipath_kregs->kr_sendbuffererror + 2); 92 dd, dd->ipath_kregs->kr_sendbuffererror + 2);
93 if (piobcnt > 192)
90 sbuf[3] = ipath_read_kreg64( 94 sbuf[3] = ipath_read_kreg64(
91 dd, dd->ipath_kregs->kr_sendbuffererror + 3); 95 dd, dd->ipath_kregs->kr_sendbuffererror + 3);
92 } 96 else
97 sbuf[3] = 0;
93 98
94 if (sbuf[0] || sbuf[1] || (piobcnt > 128 && (sbuf[2] || sbuf[3]))) { 99 if (sbuf[0] || sbuf[1] || (piobcnt > 128 && (sbuf[2] || sbuf[3]))) {
95 int i; 100 int i;
@@ -254,24 +259,20 @@ void ipath_format_hwerrors(u64 hwerrs,
254} 259}
255 260
256/* return the strings for the most common link states */ 261/* return the strings for the most common link states */
257static char *ib_linkstate(u32 linkstate) 262static char *ib_linkstate(struct ipath_devdata *dd, u64 ibcs)
258{ 263{
259 char *ret; 264 char *ret;
265 u32 state;
260 266
261 switch (linkstate) { 267 state = ipath_ib_state(dd, ibcs);
262 case IPATH_IBSTATE_INIT: 268 if (state == dd->ib_init)
263 ret = "Init"; 269 ret = "Init";
264 break; 270 else if (state == dd->ib_arm)
265 case IPATH_IBSTATE_ARM:
266 ret = "Arm"; 271 ret = "Arm";
267 break; 272 else if (state == dd->ib_active)
268 case IPATH_IBSTATE_ACTIVE:
269 ret = "Active"; 273 ret = "Active";
270 break; 274 else
271 default:
272 ret = "Down"; 275 ret = "Down";
273 }
274
275 return ret; 276 return ret;
276} 277}
277 278
@@ -286,103 +287,172 @@ void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev)
286} 287}
287 288
288static void handle_e_ibstatuschanged(struct ipath_devdata *dd, 289static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
289 ipath_err_t errs, int noprint) 290 ipath_err_t errs)
290{ 291{
291 u64 val; 292 u32 ltstate, lstate, ibstate, lastlstate;
292 u32 ltstate, lstate; 293 u32 init = dd->ib_init;
294 u32 arm = dd->ib_arm;
295 u32 active = dd->ib_active;
296 const u64 ibcs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
297
298 lstate = ipath_ib_linkstate(dd, ibcs); /* linkstate */
299 ibstate = ipath_ib_state(dd, ibcs);
300 /* linkstate at last interrupt */
301 lastlstate = ipath_ib_linkstate(dd, dd->ipath_lastibcstat);
302 ltstate = ipath_ib_linktrstate(dd, ibcs); /* linktrainingtate */
293 303
294 /* 304 /*
295 * even if diags are enabled, we want to notice LINKINIT, etc. 305 * Since going into a recovery state causes the link state to go
296 * We just don't want to change the LED state, or 306 * down and since recovery is transitory, it is better if we "miss"
297 * dd->ipath_kregs->kr_ibcctrl 307 * ever seeing the link training state go into recovery (i.e.,
308 * ignore this transition for link state special handling purposes)
309 * without even updating ipath_lastibcstat.
298 */ 310 */
299 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus); 311 if ((ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN) ||
300 lstate = val & IPATH_IBSTATE_MASK; 312 (ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT) ||
313 (ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERIDLE))
314 goto done;
301 315
302 /* 316 /*
303 * this is confusing enough when it happens that I want to always put it 317 * if linkstate transitions into INIT from any of the various down
304 * on the console and in the logs. If it was a requested state change, 318 * states, or if it transitions from any of the up (INIT or better)
305 * we'll have already cleared the flags, so we won't print this warning 319 * states into any of the down states (except link recovery), then
320 * call the chip-specific code to take appropriate actions.
306 */ 321 */
307 if ((lstate != IPATH_IBSTATE_ARM && lstate != IPATH_IBSTATE_ACTIVE) 322 if (lstate >= INFINIPATH_IBCS_L_STATE_INIT &&
308 && (dd->ipath_flags & (IPATH_LINKARMED | IPATH_LINKACTIVE))) { 323 lastlstate == INFINIPATH_IBCS_L_STATE_DOWN) {
309 dev_info(&dd->pcidev->dev, "Link state changed from %s to %s\n", 324 /* transitioned to UP */
310 (dd->ipath_flags & IPATH_LINKARMED) ? "ARM" : "ACTIVE", 325 if (dd->ipath_f_ib_updown(dd, 1, ibcs)) {
311 ib_linkstate(lstate)); 326 /* link came up, so we must no longer be disabled */
312 /* 327 dd->ipath_flags &= ~IPATH_IB_LINK_DISABLED;
313 * Flush all queued sends when link went to DOWN or INIT, 328 ipath_cdbg(LINKVERB, "LinkUp handled, skipped\n");
314 * to be sure that they don't block SMA and other MAD packets 329 goto skip_ibchange; /* chip-code handled */
315 */ 330 }
316 ipath_cancel_sends(dd, 1); 331 } else if ((lastlstate >= INFINIPATH_IBCS_L_STATE_INIT ||
317 } 332 (dd->ipath_flags & IPATH_IB_FORCE_NOTIFY)) &&
318 else if (lstate == IPATH_IBSTATE_INIT || lstate == IPATH_IBSTATE_ARM || 333 ltstate <= INFINIPATH_IBCS_LT_STATE_CFGWAITRMT &&
319 lstate == IPATH_IBSTATE_ACTIVE) { 334 ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP) {
320 /* 335 int handled;
321 * only print at SMA if there is a change, debug if not 336 handled = dd->ipath_f_ib_updown(dd, 0, ibcs);
322 * (sometimes we want to know that, usually not). 337 dd->ipath_flags &= ~IPATH_IB_FORCE_NOTIFY;
323 */ 338 if (handled) {
324 if (lstate == ((unsigned) dd->ipath_lastibcstat 339 ipath_cdbg(LINKVERB, "LinkDown handled, skipped\n");
325 & IPATH_IBSTATE_MASK)) { 340 goto skip_ibchange; /* chip-code handled */
326 ipath_dbg("Status change intr but no change (%s)\n",
327 ib_linkstate(lstate));
328 } 341 }
329 else
330 ipath_cdbg(VERBOSE, "Unit %u link state %s, last "
331 "was %s\n", dd->ipath_unit,
332 ib_linkstate(lstate),
333 ib_linkstate((unsigned)
334 dd->ipath_lastibcstat
335 & IPATH_IBSTATE_MASK));
336 } 342 }
337 else { 343
338 lstate = dd->ipath_lastibcstat & IPATH_IBSTATE_MASK; 344 /*
339 if (lstate == IPATH_IBSTATE_INIT || 345 * Significant enough to always print and get into logs, if it was
340 lstate == IPATH_IBSTATE_ARM || 346 * unexpected. If it was a requested state change, we'll have
341 lstate == IPATH_IBSTATE_ACTIVE) 347 * already cleared the flags, so we won't print this warning
342 ipath_cdbg(VERBOSE, "Unit %u link state down" 348 */
343 " (state 0x%x), from %s\n", 349 if ((ibstate != arm && ibstate != active) &&
344 dd->ipath_unit, 350 (dd->ipath_flags & (IPATH_LINKARMED | IPATH_LINKACTIVE))) {
345 (u32)val & IPATH_IBSTATE_MASK, 351 dev_info(&dd->pcidev->dev, "Link state changed from %s "
346 ib_linkstate(lstate)); 352 "to %s\n", (dd->ipath_flags & IPATH_LINKARMED) ?
347 else 353 "ARM" : "ACTIVE", ib_linkstate(dd, ibcs));
348 ipath_cdbg(VERBOSE, "Unit %u link state changed "
349 "to 0x%x from down (%x)\n",
350 dd->ipath_unit, (u32) val, lstate);
351 } 354 }
352 ltstate = (val >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
353 INFINIPATH_IBCS_LINKTRAININGSTATE_MASK;
354 lstate = (val >> INFINIPATH_IBCS_LINKSTATE_SHIFT) &
355 INFINIPATH_IBCS_LINKSTATE_MASK;
356 355
357 if (ltstate == INFINIPATH_IBCS_LT_STATE_POLLACTIVE || 356 if (ltstate == INFINIPATH_IBCS_LT_STATE_POLLACTIVE ||
358 ltstate == INFINIPATH_IBCS_LT_STATE_POLLQUIET) { 357 ltstate == INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
359 u32 last_ltstate; 358 u32 lastlts;
360 359 lastlts = ipath_ib_linktrstate(dd, dd->ipath_lastibcstat);
361 /* 360 /*
362 * Ignore cycling back and forth from Polling.Active 361 * Ignore cycling back and forth from Polling.Active to
363 * to Polling.Quiet while waiting for the other end of 362 * Polling.Quiet while waiting for the other end of the link
364 * the link to come up. We will cycle back and forth 363 * to come up, except to try and decide if we are connected
365 * between them if no cable is plugged in, 364 * to a live IB device or not. We will cycle back and
366 * the other device is powered off or disabled, etc. 365 * forth between them if no cable is plugged in, the other
366 * device is powered off or disabled, etc.
367 */ 367 */
368 last_ltstate = (dd->ipath_lastibcstat >> 368 if (lastlts == INFINIPATH_IBCS_LT_STATE_POLLACTIVE ||
369 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) 369 lastlts == INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
370 & INFINIPATH_IBCS_LINKTRAININGSTATE_MASK; 370 if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) &&
371 if (last_ltstate == INFINIPATH_IBCS_LT_STATE_POLLACTIVE 371 (++dd->ipath_ibpollcnt == 40)) {
372 || last_ltstate ==
373 INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
374 if (dd->ipath_ibpollcnt > 40) {
375 dd->ipath_flags |= IPATH_NOCABLE; 372 dd->ipath_flags |= IPATH_NOCABLE;
376 *dd->ipath_statusp |= 373 *dd->ipath_statusp |=
377 IPATH_STATUS_IB_NOCABLE; 374 IPATH_STATUS_IB_NOCABLE;
378 } else 375 ipath_cdbg(LINKVERB, "Set NOCABLE\n");
379 dd->ipath_ibpollcnt++; 376 }
377 ipath_cdbg(LINKVERB, "POLL change to %s (%x)\n",
378 ipath_ibcstatus_str[ltstate], ibstate);
380 goto skip_ibchange; 379 goto skip_ibchange;
381 } 380 }
382 } 381 }
383 dd->ipath_ibpollcnt = 0; /* some state other than 2 or 3 */ 382
383 dd->ipath_ibpollcnt = 0; /* not poll*, now */
384 ipath_stats.sps_iblink++; 384 ipath_stats.sps_iblink++;
385 if (ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP) { 385
386 if (ibstate != init && dd->ipath_lastlinkrecov && ipath_linkrecovery) {
387 u64 linkrecov;
388 linkrecov = ipath_snap_cntr(dd,
389 dd->ipath_cregs->cr_iblinkerrrecovcnt);
390 if (linkrecov != dd->ipath_lastlinkrecov) {
391 ipath_dbg("IB linkrecov up %Lx (%s %s) recov %Lu\n",
392 ibcs, ib_linkstate(dd, ibcs),
393 ipath_ibcstatus_str[ltstate],
394 linkrecov);
395 /* and no more until active again */
396 dd->ipath_lastlinkrecov = 0;
397 ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
398 goto skip_ibchange;
399 }
400 }
401
402 if (ibstate == init || ibstate == arm || ibstate == active) {
403 *dd->ipath_statusp &= ~IPATH_STATUS_IB_NOCABLE;
404 if (ibstate == init || ibstate == arm) {
405 *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
406 if (dd->ipath_flags & IPATH_LINKACTIVE)
407 signal_ib_event(dd, IB_EVENT_PORT_ERR);
408 }
409 if (ibstate == arm) {
410 dd->ipath_flags |= IPATH_LINKARMED;
411 dd->ipath_flags &= ~(IPATH_LINKUNK |
412 IPATH_LINKINIT | IPATH_LINKDOWN |
413 IPATH_LINKACTIVE | IPATH_NOCABLE);
414 ipath_hol_down(dd);
415 } else if (ibstate == init) {
416 /*
417 * set INIT and DOWN. Down is checked by
418 * most of the other code, but INIT is
419 * useful to know in a few places.
420 */
421 dd->ipath_flags |= IPATH_LINKINIT |
422 IPATH_LINKDOWN;
423 dd->ipath_flags &= ~(IPATH_LINKUNK |
424 IPATH_LINKARMED | IPATH_LINKACTIVE |
425 IPATH_NOCABLE);
426 ipath_hol_down(dd);
427 } else { /* active */
428 dd->ipath_lastlinkrecov = ipath_snap_cntr(dd,
429 dd->ipath_cregs->cr_iblinkerrrecovcnt);
430 *dd->ipath_statusp |=
431 IPATH_STATUS_IB_READY | IPATH_STATUS_IB_CONF;
432 dd->ipath_flags |= IPATH_LINKACTIVE;
433 dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
434 | IPATH_LINKDOWN | IPATH_LINKARMED |
435 IPATH_NOCABLE);
436 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
437 ipath_restart_sdma(dd);
438 signal_ib_event(dd, IB_EVENT_PORT_ACTIVE);
439 /* LED active not handled in chip _f_updown */
440 dd->ipath_f_setextled(dd, lstate, ltstate);
441 ipath_hol_up(dd);
442 }
443
444 /*
445 * print after we've already done the work, so as not to
446 * delay the state changes and notifications, for debugging
447 */
448 if (lstate == lastlstate)
449 ipath_cdbg(LINKVERB, "Unchanged from last: %s "
450 "(%x)\n", ib_linkstate(dd, ibcs), ibstate);
451 else
452 ipath_cdbg(VERBOSE, "Unit %u: link up to %s %s (%x)\n",
453 dd->ipath_unit, ib_linkstate(dd, ibcs),
454 ipath_ibcstatus_str[ltstate], ibstate);
455 } else { /* down */
386 if (dd->ipath_flags & IPATH_LINKACTIVE) 456 if (dd->ipath_flags & IPATH_LINKACTIVE)
387 signal_ib_event(dd, IB_EVENT_PORT_ERR); 457 signal_ib_event(dd, IB_EVENT_PORT_ERR);
388 dd->ipath_flags |= IPATH_LINKDOWN; 458 dd->ipath_flags |= IPATH_LINKDOWN;
@@ -391,69 +461,28 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
391 IPATH_LINKARMED); 461 IPATH_LINKARMED);
392 *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY; 462 *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
393 dd->ipath_lli_counter = 0; 463 dd->ipath_lli_counter = 0;
394 if (!noprint) {
395 if (((dd->ipath_lastibcstat >>
396 INFINIPATH_IBCS_LINKSTATE_SHIFT) &
397 INFINIPATH_IBCS_LINKSTATE_MASK)
398 == INFINIPATH_IBCS_L_STATE_ACTIVE)
399 /* if from up to down be more vocal */
400 ipath_cdbg(VERBOSE,
401 "Unit %u link now down (%s)\n",
402 dd->ipath_unit,
403 ipath_ibcstatus_str[ltstate]);
404 else
405 ipath_cdbg(VERBOSE, "Unit %u link is "
406 "down (%s)\n", dd->ipath_unit,
407 ipath_ibcstatus_str[ltstate]);
408 }
409 464
410 dd->ipath_f_setextled(dd, lstate, ltstate); 465 if (lastlstate != INFINIPATH_IBCS_L_STATE_DOWN)
411 } else if ((val & IPATH_IBSTATE_MASK) == IPATH_IBSTATE_ACTIVE) { 466 ipath_cdbg(VERBOSE, "Unit %u link state down "
412 dd->ipath_flags |= IPATH_LINKACTIVE; 467 "(state 0x%x), from %s\n",
413 dd->ipath_flags &= 468 dd->ipath_unit, lstate,
414 ~(IPATH_LINKUNK | IPATH_LINKINIT | IPATH_LINKDOWN | 469 ib_linkstate(dd, dd->ipath_lastibcstat));
415 IPATH_LINKARMED | IPATH_NOCABLE); 470 else
416 *dd->ipath_statusp &= ~IPATH_STATUS_IB_NOCABLE; 471 ipath_cdbg(LINKVERB, "Unit %u link state changed "
417 *dd->ipath_statusp |= 472 "to %s (0x%x) from down (%x)\n",
418 IPATH_STATUS_IB_READY | IPATH_STATUS_IB_CONF; 473 dd->ipath_unit,
419 dd->ipath_f_setextled(dd, lstate, ltstate); 474 ipath_ibcstatus_str[ltstate],
420 signal_ib_event(dd, IB_EVENT_PORT_ACTIVE); 475 ibstate, lastlstate);
421 } else if ((val & IPATH_IBSTATE_MASK) == IPATH_IBSTATE_INIT) {
422 if (dd->ipath_flags & IPATH_LINKACTIVE)
423 signal_ib_event(dd, IB_EVENT_PORT_ERR);
424 /*
425 * set INIT and DOWN. Down is checked by most of the other
426 * code, but INIT is useful to know in a few places.
427 */
428 dd->ipath_flags |= IPATH_LINKINIT | IPATH_LINKDOWN;
429 dd->ipath_flags &=
430 ~(IPATH_LINKUNK | IPATH_LINKACTIVE | IPATH_LINKARMED
431 | IPATH_NOCABLE);
432 *dd->ipath_statusp &= ~(IPATH_STATUS_IB_NOCABLE
433 | IPATH_STATUS_IB_READY);
434 dd->ipath_f_setextled(dd, lstate, ltstate);
435 } else if ((val & IPATH_IBSTATE_MASK) == IPATH_IBSTATE_ARM) {
436 if (dd->ipath_flags & IPATH_LINKACTIVE)
437 signal_ib_event(dd, IB_EVENT_PORT_ERR);
438 dd->ipath_flags |= IPATH_LINKARMED;
439 dd->ipath_flags &=
440 ~(IPATH_LINKUNK | IPATH_LINKDOWN | IPATH_LINKINIT |
441 IPATH_LINKACTIVE | IPATH_NOCABLE);
442 *dd->ipath_statusp &= ~(IPATH_STATUS_IB_NOCABLE
443 | IPATH_STATUS_IB_READY);
444 dd->ipath_f_setextled(dd, lstate, ltstate);
445 } else {
446 if (!noprint)
447 ipath_dbg("IBstatuschange unit %u: %s (%x)\n",
448 dd->ipath_unit,
449 ipath_ibcstatus_str[ltstate], ltstate);
450 } 476 }
477
451skip_ibchange: 478skip_ibchange:
452 dd->ipath_lastibcstat = val; 479 dd->ipath_lastibcstat = ibcs;
480done:
481 return;
453} 482}
454 483
455static void handle_supp_msgs(struct ipath_devdata *dd, 484static void handle_supp_msgs(struct ipath_devdata *dd,
456 unsigned supp_msgs, char *msg, int msgsz) 485 unsigned supp_msgs, char *msg, u32 msgsz)
457{ 486{
458 /* 487 /*
459 * Print the message unless it's ibc status change only, which 488 * Print the message unless it's ibc status change only, which
@@ -461,12 +490,19 @@ static void handle_supp_msgs(struct ipath_devdata *dd,
461 */ 490 */
462 if (dd->ipath_lasterror & ~INFINIPATH_E_IBSTATUSCHANGED) { 491 if (dd->ipath_lasterror & ~INFINIPATH_E_IBSTATUSCHANGED) {
463 int iserr; 492 int iserr;
464 iserr = ipath_decode_err(msg, msgsz, 493 ipath_err_t mask;
494 iserr = ipath_decode_err(dd, msg, msgsz,
465 dd->ipath_lasterror & 495 dd->ipath_lasterror &
466 ~INFINIPATH_E_IBSTATUSCHANGED); 496 ~INFINIPATH_E_IBSTATUSCHANGED);
467 if (dd->ipath_lasterror & 497
468 ~(INFINIPATH_E_RRCVEGRFULL | 498 mask = INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
469 INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS)) 499 INFINIPATH_E_PKTERRS | INFINIPATH_E_SDMADISABLED;
500
501 /* if we're in debug, then don't mask SDMADISABLED msgs */
502 if (ipath_debug & __IPATH_DBG)
503 mask &= ~INFINIPATH_E_SDMADISABLED;
504
505 if (dd->ipath_lasterror & ~mask)
470 ipath_dev_err(dd, "Suppressed %u messages for " 506 ipath_dev_err(dd, "Suppressed %u messages for "
471 "fast-repeating errors (%s) (%llx)\n", 507 "fast-repeating errors (%s) (%llx)\n",
472 supp_msgs, msg, 508 supp_msgs, msg,
@@ -493,7 +529,7 @@ static void handle_supp_msgs(struct ipath_devdata *dd,
493 529
494static unsigned handle_frequent_errors(struct ipath_devdata *dd, 530static unsigned handle_frequent_errors(struct ipath_devdata *dd,
495 ipath_err_t errs, char *msg, 531 ipath_err_t errs, char *msg,
496 int msgsz, int *noprint) 532 u32 msgsz, int *noprint)
497{ 533{
498 unsigned long nc; 534 unsigned long nc;
499 static unsigned long nextmsg_time; 535 static unsigned long nextmsg_time;
@@ -523,19 +559,125 @@ static unsigned handle_frequent_errors(struct ipath_devdata *dd,
523 return supp_msgs; 559 return supp_msgs;
524} 560}
525 561
562static void handle_sdma_errors(struct ipath_devdata *dd, ipath_err_t errs)
563{
564 unsigned long flags;
565 int expected;
566
567 if (ipath_debug & __IPATH_DBG) {
568 char msg[128];
569 ipath_decode_err(dd, msg, sizeof msg, errs &
570 INFINIPATH_E_SDMAERRS);
571 ipath_dbg("errors %lx (%s)\n", (unsigned long)errs, msg);
572 }
573 if (ipath_debug & __IPATH_VERBDBG) {
574 unsigned long tl, hd, status, lengen;
575 tl = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmatail);
576 hd = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmahead);
577 status = ipath_read_kreg64(dd
578 , dd->ipath_kregs->kr_senddmastatus);
579 lengen = ipath_read_kreg64(dd,
580 dd->ipath_kregs->kr_senddmalengen);
581 ipath_cdbg(VERBOSE, "sdma tl 0x%lx hd 0x%lx status 0x%lx "
582 "lengen 0x%lx\n", tl, hd, status, lengen);
583 }
584
585 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
586 __set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
587 expected = test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
588 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
589 if (!expected)
590 ipath_cancel_sends(dd, 1);
591}
592
593static void handle_sdma_intr(struct ipath_devdata *dd, u64 istat)
594{
595 unsigned long flags;
596 int expected;
597
598 if ((istat & INFINIPATH_I_SDMAINT) &&
599 !test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
600 ipath_sdma_intr(dd);
601
602 if (istat & INFINIPATH_I_SDMADISABLED) {
603 expected = test_bit(IPATH_SDMA_ABORTING,
604 &dd->ipath_sdma_status);
605 ipath_dbg("%s SDmaDisabled intr\n",
606 expected ? "expected" : "unexpected");
607 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
608 __set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
609 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
610 if (!expected)
611 ipath_cancel_sends(dd, 1);
612 if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
613 tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
614 }
615}
616
617static int handle_hdrq_full(struct ipath_devdata *dd)
618{
619 int chkerrpkts = 0;
620 u32 hd, tl;
621 u32 i;
622
623 ipath_stats.sps_hdrqfull++;
624 for (i = 0; i < dd->ipath_cfgports; i++) {
625 struct ipath_portdata *pd = dd->ipath_pd[i];
626
627 if (i == 0) {
628 /*
629 * For kernel receive queues, we just want to know
630 * if there are packets in the queue that we can
631 * process.
632 */
633 if (pd->port_head != ipath_get_hdrqtail(pd))
634 chkerrpkts |= 1 << i;
635 continue;
636 }
637
638 /* Skip if user context is not open */
639 if (!pd || !pd->port_cnt)
640 continue;
641
642 /* Don't report the same point multiple times. */
643 if (dd->ipath_flags & IPATH_NODMA_RTAIL)
644 tl = ipath_read_ureg32(dd, ur_rcvhdrtail, i);
645 else
646 tl = ipath_get_rcvhdrtail(pd);
647 if (tl == pd->port_lastrcvhdrqtail)
648 continue;
649
650 hd = ipath_read_ureg32(dd, ur_rcvhdrhead, i);
651 if (hd == (tl + 1) || (!hd && tl == dd->ipath_hdrqlast)) {
652 pd->port_lastrcvhdrqtail = tl;
653 pd->port_hdrqfull++;
654 /* flush hdrqfull so that poll() sees it */
655 wmb();
656 wake_up_interruptible(&pd->port_wait);
657 }
658 }
659
660 return chkerrpkts;
661}
662
526static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs) 663static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
527{ 664{
528 char msg[128]; 665 char msg[128];
529 u64 ignore_this_time = 0; 666 u64 ignore_this_time = 0;
530 int i, iserr = 0; 667 u64 iserr = 0;
531 int chkerrpkts = 0, noprint = 0; 668 int chkerrpkts = 0, noprint = 0;
532 unsigned supp_msgs; 669 unsigned supp_msgs;
533 int log_idx; 670 int log_idx;
534 671
535 supp_msgs = handle_frequent_errors(dd, errs, msg, sizeof msg, &noprint); 672 /*
673 * don't report errors that are masked, either at init
674 * (not set in ipath_errormask), or temporarily (set in
675 * ipath_maskederrs)
676 */
677 errs &= dd->ipath_errormask & ~dd->ipath_maskederrs;
536 678
537 /* don't report errors that are masked */ 679 supp_msgs = handle_frequent_errors(dd, errs, msg, (u32)sizeof msg,
538 errs &= ~dd->ipath_maskederrs; 680 &noprint);
539 681
540 /* do these first, they are most important */ 682 /* do these first, they are most important */
541 if (errs & INFINIPATH_E_HARDWARE) { 683 if (errs & INFINIPATH_E_HARDWARE) {
@@ -550,6 +692,9 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
550 } 692 }
551 } 693 }
552 694
695 if (errs & INFINIPATH_E_SDMAERRS)
696 handle_sdma_errors(dd, errs);
697
553 if (!noprint && (errs & ~dd->ipath_e_bitsextant)) 698 if (!noprint && (errs & ~dd->ipath_e_bitsextant))
554 ipath_dev_err(dd, "error interrupt with unknown errors " 699 ipath_dev_err(dd, "error interrupt with unknown errors "
555 "%llx set\n", (unsigned long long) 700 "%llx set\n", (unsigned long long)
@@ -580,18 +725,19 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
580 * ones on this particular interrupt, which also isn't great 725 * ones on this particular interrupt, which also isn't great
581 */ 726 */
582 dd->ipath_maskederrs |= dd->ipath_lasterror | errs; 727 dd->ipath_maskederrs |= dd->ipath_lasterror | errs;
728
583 dd->ipath_errormask &= ~dd->ipath_maskederrs; 729 dd->ipath_errormask &= ~dd->ipath_maskederrs;
584 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 730 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
585 dd->ipath_errormask); 731 dd->ipath_errormask);
586 s_iserr = ipath_decode_err(msg, sizeof msg, 732 s_iserr = ipath_decode_err(dd, msg, sizeof msg,
587 dd->ipath_maskederrs); 733 dd->ipath_maskederrs);
588 734
589 if (dd->ipath_maskederrs & 735 if (dd->ipath_maskederrs &
590 ~(INFINIPATH_E_RRCVEGRFULL | 736 ~(INFINIPATH_E_RRCVEGRFULL |
591 INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS)) 737 INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS))
592 ipath_dev_err(dd, "Temporarily disabling " 738 ipath_dev_err(dd, "Temporarily disabling "
593 "error(s) %llx reporting; too frequent (%s)\n", 739 "error(s) %llx reporting; too frequent (%s)\n",
594 (unsigned long long)dd->ipath_maskederrs, 740 (unsigned long long) dd->ipath_maskederrs,
595 msg); 741 msg);
596 else { 742 else {
597 /* 743 /*
@@ -633,26 +779,43 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
633 INFINIPATH_E_IBSTATUSCHANGED); 779 INFINIPATH_E_IBSTATUSCHANGED);
634 } 780 }
635 781
636 /* likely due to cancel, so suppress */ 782 if (errs & INFINIPATH_E_SENDSPECIALTRIGGER) {
783 dd->ipath_spectriggerhit++;
784 ipath_dbg("%lu special trigger hits\n",
785 dd->ipath_spectriggerhit);
786 }
787
788 /* likely due to cancel; so suppress message unless verbose */
637 if ((errs & (INFINIPATH_E_SPKTLEN | INFINIPATH_E_SPIOARMLAUNCH)) && 789 if ((errs & (INFINIPATH_E_SPKTLEN | INFINIPATH_E_SPIOARMLAUNCH)) &&
638 dd->ipath_lastcancel > jiffies) { 790 dd->ipath_lastcancel > jiffies) {
639 ipath_dbg("Suppressed armlaunch/spktlen after error send cancel\n"); 791 /* armlaunch takes precedence; it often causes both. */
792 ipath_cdbg(VERBOSE,
793 "Suppressed %s error (%llx) after sendbuf cancel\n",
794 (errs & INFINIPATH_E_SPIOARMLAUNCH) ?
795 "armlaunch" : "sendpktlen", (unsigned long long)errs);
640 errs &= ~(INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SPKTLEN); 796 errs &= ~(INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SPKTLEN);
641 } 797 }
642 798
643 if (!errs) 799 if (!errs)
644 return 0; 800 return 0;
645 801
646 if (!noprint) 802 if (!noprint) {
803 ipath_err_t mask;
647 /* 804 /*
648 * the ones we mask off are handled specially below or above 805 * The ones we mask off are handled specially below
806 * or above. Also mask SDMADISABLED by default as it
807 * is too chatty.
649 */ 808 */
650 ipath_decode_err(msg, sizeof msg, 809 mask = INFINIPATH_E_IBSTATUSCHANGED |
651 errs & ~(INFINIPATH_E_IBSTATUSCHANGED | 810 INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
652 INFINIPATH_E_RRCVEGRFULL | 811 INFINIPATH_E_HARDWARE | INFINIPATH_E_SDMADISABLED;
653 INFINIPATH_E_RRCVHDRFULL | 812
654 INFINIPATH_E_HARDWARE)); 813 /* if we're in debug, then don't mask SDMADISABLED msgs */
655 else 814 if (ipath_debug & __IPATH_DBG)
815 mask &= ~INFINIPATH_E_SDMADISABLED;
816
817 ipath_decode_err(dd, msg, sizeof msg, errs & ~mask);
818 } else
656 /* so we don't need if (!noprint) at strlcat's below */ 819 /* so we don't need if (!noprint) at strlcat's below */
657 *msg = 0; 820 *msg = 0;
658 821
@@ -677,40 +840,8 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
677 * fast_stats, no more than every 5 seconds, user ports get printed 840 * fast_stats, no more than every 5 seconds, user ports get printed
678 * on close 841 * on close
679 */ 842 */
680 if (errs & INFINIPATH_E_RRCVHDRFULL) { 843 if (errs & INFINIPATH_E_RRCVHDRFULL)
681 u32 hd, tl; 844 chkerrpkts |= handle_hdrq_full(dd);
682 ipath_stats.sps_hdrqfull++;
683 for (i = 0; i < dd->ipath_cfgports; i++) {
684 struct ipath_portdata *pd = dd->ipath_pd[i];
685 if (i == 0) {
686 hd = pd->port_head;
687 tl = (u32) le64_to_cpu(
688 *dd->ipath_hdrqtailptr);
689 } else if (pd && pd->port_cnt &&
690 pd->port_rcvhdrtail_kvaddr) {
691 /*
692 * don't report same point multiple times,
693 * except kernel
694 */
695 tl = *(u64 *) pd->port_rcvhdrtail_kvaddr;
696 if (tl == pd->port_lastrcvhdrqtail)
697 continue;
698 hd = ipath_read_ureg32(dd, ur_rcvhdrhead,
699 i);
700 } else
701 continue;
702 if (hd == (tl + 1) ||
703 (!hd && tl == dd->ipath_hdrqlast)) {
704 if (i == 0)
705 chkerrpkts = 1;
706 pd->port_lastrcvhdrqtail = tl;
707 pd->port_hdrqfull++;
708 /* flush hdrqfull so that poll() sees it */
709 wmb();
710 wake_up_interruptible(&pd->port_wait);
711 }
712 }
713 }
714 if (errs & INFINIPATH_E_RRCVEGRFULL) { 845 if (errs & INFINIPATH_E_RRCVEGRFULL) {
715 struct ipath_portdata *pd = dd->ipath_pd[0]; 846 struct ipath_portdata *pd = dd->ipath_pd[0];
716 847
@@ -721,9 +852,8 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
721 * vs user) 852 * vs user)
722 */ 853 */
723 ipath_stats.sps_etidfull++; 854 ipath_stats.sps_etidfull++;
724 if (pd->port_head != 855 if (pd->port_head != ipath_get_hdrqtail(pd))
725 (u32) le64_to_cpu(*dd->ipath_hdrqtailptr)) 856 chkerrpkts |= 1;
726 chkerrpkts = 1;
727 } 857 }
728 858
729 /* 859 /*
@@ -741,16 +871,13 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
741 dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT 871 dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
742 | IPATH_LINKARMED | IPATH_LINKACTIVE); 872 | IPATH_LINKARMED | IPATH_LINKACTIVE);
743 *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY; 873 *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
744 if (!noprint) {
745 u64 st = ipath_read_kreg64(
746 dd, dd->ipath_kregs->kr_ibcstatus);
747 874
748 ipath_dbg("Lost link, link now down (%s)\n", 875 ipath_dbg("Lost link, link now down (%s)\n",
749 ipath_ibcstatus_str[st & 0xf]); 876 ipath_ibcstatus_str[ipath_read_kreg64(dd,
750 } 877 dd->ipath_kregs->kr_ibcstatus) & 0xf]);
751 } 878 }
752 if (errs & INFINIPATH_E_IBSTATUSCHANGED) 879 if (errs & INFINIPATH_E_IBSTATUSCHANGED)
753 handle_e_ibstatuschanged(dd, errs, noprint); 880 handle_e_ibstatuschanged(dd, errs);
754 881
755 if (errs & INFINIPATH_E_RESET) { 882 if (errs & INFINIPATH_E_RESET) {
756 if (!noprint) 883 if (!noprint)
@@ -765,9 +892,6 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
765 if (!noprint && *msg) { 892 if (!noprint && *msg) {
766 if (iserr) 893 if (iserr)
767 ipath_dev_err(dd, "%s error\n", msg); 894 ipath_dev_err(dd, "%s error\n", msg);
768 else
769 dev_info(&dd->pcidev->dev, "%s packet problems\n",
770 msg);
771 } 895 }
772 if (dd->ipath_state_wanted & dd->ipath_flags) { 896 if (dd->ipath_state_wanted & dd->ipath_flags) {
773 ipath_cdbg(VERBOSE, "driver wanted state %x, iflags now %x, " 897 ipath_cdbg(VERBOSE, "driver wanted state %x, iflags now %x, "
@@ -779,7 +903,6 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
779 return chkerrpkts; 903 return chkerrpkts;
780} 904}
781 905
782
783/* 906/*
784 * try to cleanup as much as possible for anything that might have gone 907 * try to cleanup as much as possible for anything that might have gone
785 * wrong while in freeze mode, such as pio buffers being written by user 908 * wrong while in freeze mode, such as pio buffers being written by user
@@ -796,8 +919,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
796void ipath_clear_freeze(struct ipath_devdata *dd) 919void ipath_clear_freeze(struct ipath_devdata *dd)
797{ 920{
798 int i, im; 921 int i, im;
799 __le64 val; 922 u64 val;
800 unsigned long flags;
801 923
802 /* disable error interrupts, to avoid confusion */ 924 /* disable error interrupts, to avoid confusion */
803 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL); 925 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL);
@@ -816,14 +938,7 @@ void ipath_clear_freeze(struct ipath_devdata *dd)
816 dd->ipath_control); 938 dd->ipath_control);
817 939
818 /* ensure pio avail updates continue */ 940 /* ensure pio avail updates continue */
819 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); 941 ipath_force_pio_avail_update(dd);
820 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
821 dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
822 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
823 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
824 dd->ipath_sendctrl);
825 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
826 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
827 942
828 /* 943 /*
829 * We just enabled pioavailupdate, so dma copy is almost certainly 944 * We just enabled pioavailupdate, so dma copy is almost certainly
@@ -831,10 +946,13 @@ void ipath_clear_freeze(struct ipath_devdata *dd)
831 */ 946 */
832 for (i = 0; i < dd->ipath_pioavregs; i++) { 947 for (i = 0; i < dd->ipath_pioavregs; i++) {
833 /* deal with 6110 chip bug */ 948 /* deal with 6110 chip bug */
834 im = i > 3 ? i ^ 1 : i; 949 im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ?
950 i ^ 1 : i;
835 val = ipath_read_kreg64(dd, (0x1000 / sizeof(u64)) + im); 951 val = ipath_read_kreg64(dd, (0x1000 / sizeof(u64)) + im);
836 dd->ipath_pioavailregs_dma[i] = dd->ipath_pioavailshadow[i] 952 dd->ipath_pioavailregs_dma[i] = cpu_to_le64(val);
837 = le64_to_cpu(val); 953 dd->ipath_pioavailshadow[i] = val |
954 (~dd->ipath_pioavailkernel[i] <<
955 INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT);
838 } 956 }
839 957
840 /* 958 /*
@@ -950,7 +1068,7 @@ set:
950 * process was waiting for a packet to arrive, and didn't want 1068 * process was waiting for a packet to arrive, and didn't want
951 * to poll 1069 * to poll
952 */ 1070 */
953static void handle_urcv(struct ipath_devdata *dd, u32 istat) 1071static void handle_urcv(struct ipath_devdata *dd, u64 istat)
954{ 1072{
955 u64 portr; 1073 u64 portr;
956 int i; 1074 int i;
@@ -966,12 +1084,13 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat)
966 * and ipath_poll_next()... 1084 * and ipath_poll_next()...
967 */ 1085 */
968 rmb(); 1086 rmb();
969 portr = ((istat >> INFINIPATH_I_RCVAVAIL_SHIFT) & 1087 portr = ((istat >> dd->ipath_i_rcvavail_shift) &
970 dd->ipath_i_rcvavail_mask) 1088 dd->ipath_i_rcvavail_mask) |
971 | ((istat >> INFINIPATH_I_RCVURG_SHIFT) & 1089 ((istat >> dd->ipath_i_rcvurg_shift) &
972 dd->ipath_i_rcvurg_mask); 1090 dd->ipath_i_rcvurg_mask);
973 for (i = 1; i < dd->ipath_cfgports; i++) { 1091 for (i = 1; i < dd->ipath_cfgports; i++) {
974 struct ipath_portdata *pd = dd->ipath_pd[i]; 1092 struct ipath_portdata *pd = dd->ipath_pd[i];
1093
975 if (portr & (1 << i) && pd && pd->port_cnt) { 1094 if (portr & (1 << i) && pd && pd->port_cnt) {
976 if (test_and_clear_bit(IPATH_PORT_WAITING_RCV, 1095 if (test_and_clear_bit(IPATH_PORT_WAITING_RCV,
977 &pd->port_flag)) { 1096 &pd->port_flag)) {
@@ -988,7 +1107,7 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat)
988 } 1107 }
989 if (rcvdint) { 1108 if (rcvdint) {
990 /* only want to take one interrupt, so turn off the rcv 1109 /* only want to take one interrupt, so turn off the rcv
991 * interrupt for all the ports that we did the wakeup on 1110 * interrupt for all the ports that we set the rcv_waiting
992 * (but never for kernel port) 1111 * (but never for kernel port)
993 */ 1112 */
994 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 1113 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
@@ -999,12 +1118,11 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat)
999irqreturn_t ipath_intr(int irq, void *data) 1118irqreturn_t ipath_intr(int irq, void *data)
1000{ 1119{
1001 struct ipath_devdata *dd = data; 1120 struct ipath_devdata *dd = data;
1002 u32 istat, chk0rcv = 0; 1121 u64 istat, chk0rcv = 0;
1003 ipath_err_t estat = 0; 1122 ipath_err_t estat = 0;
1004 irqreturn_t ret; 1123 irqreturn_t ret;
1005 static unsigned unexpected = 0; 1124 static unsigned unexpected = 0;
1006 static const u32 port0rbits = (1U<<INFINIPATH_I_RCVAVAIL_SHIFT) | 1125 u64 kportrbits;
1007 (1U<<INFINIPATH_I_RCVURG_SHIFT);
1008 1126
1009 ipath_stats.sps_ints++; 1127 ipath_stats.sps_ints++;
1010 1128
@@ -1053,17 +1171,17 @@ irqreturn_t ipath_intr(int irq, void *data)
1053 1171
1054 if (unlikely(istat & ~dd->ipath_i_bitsextant)) 1172 if (unlikely(istat & ~dd->ipath_i_bitsextant))
1055 ipath_dev_err(dd, 1173 ipath_dev_err(dd,
1056 "interrupt with unknown interrupts %x set\n", 1174 "interrupt with unknown interrupts %Lx set\n",
1057 istat & (u32) ~ dd->ipath_i_bitsextant); 1175 istat & ~dd->ipath_i_bitsextant);
1058 else 1176 else if (istat & ~INFINIPATH_I_ERROR) /* errors do own printing */
1059 ipath_cdbg(VERBOSE, "intr stat=0x%x\n", istat); 1177 ipath_cdbg(VERBOSE, "intr stat=0x%Lx\n", istat);
1060 1178
1061 if (unlikely(istat & INFINIPATH_I_ERROR)) { 1179 if (istat & INFINIPATH_I_ERROR) {
1062 ipath_stats.sps_errints++; 1180 ipath_stats.sps_errints++;
1063 estat = ipath_read_kreg64(dd, 1181 estat = ipath_read_kreg64(dd,
1064 dd->ipath_kregs->kr_errorstatus); 1182 dd->ipath_kregs->kr_errorstatus);
1065 if (!estat) 1183 if (!estat)
1066 dev_info(&dd->pcidev->dev, "error interrupt (%x), " 1184 dev_info(&dd->pcidev->dev, "error interrupt (%Lx), "
1067 "but no error bits set!\n", istat); 1185 "but no error bits set!\n", istat);
1068 else if (estat == -1LL) 1186 else if (estat == -1LL)
1069 /* 1187 /*
@@ -1073,9 +1191,7 @@ irqreturn_t ipath_intr(int irq, void *data)
1073 ipath_dev_err(dd, "Read of error status failed " 1191 ipath_dev_err(dd, "Read of error status failed "
1074 "(all bits set); ignoring\n"); 1192 "(all bits set); ignoring\n");
1075 else 1193 else
1076 if (handle_errors(dd, estat)) 1194 chk0rcv |= handle_errors(dd, estat);
1077 /* force calling ipath_kreceive() */
1078 chk0rcv = 1;
1079 } 1195 }
1080 1196
1081 if (istat & INFINIPATH_I_GPIO) { 1197 if (istat & INFINIPATH_I_GPIO) {
@@ -1093,8 +1209,7 @@ irqreturn_t ipath_intr(int irq, void *data)
1093 1209
1094 gpiostatus = ipath_read_kreg32( 1210 gpiostatus = ipath_read_kreg32(
1095 dd, dd->ipath_kregs->kr_gpio_status); 1211 dd, dd->ipath_kregs->kr_gpio_status);
1096 /* First the error-counter case. 1212 /* First the error-counter case. */
1097 */
1098 if ((gpiostatus & IPATH_GPIO_ERRINTR_MASK) && 1213 if ((gpiostatus & IPATH_GPIO_ERRINTR_MASK) &&
1099 (dd->ipath_flags & IPATH_GPIO_ERRINTRS)) { 1214 (dd->ipath_flags & IPATH_GPIO_ERRINTRS)) {
1100 /* want to clear the bits we see asserted. */ 1215 /* want to clear the bits we see asserted. */
@@ -1156,7 +1271,6 @@ irqreturn_t ipath_intr(int irq, void *data)
1156 (u64) to_clear); 1271 (u64) to_clear);
1157 } 1272 }
1158 } 1273 }
1159 chk0rcv |= istat & port0rbits;
1160 1274
1161 /* 1275 /*
1162 * Clear the interrupt bits we found set, unless they are receive 1276 * Clear the interrupt bits we found set, unless they are receive
@@ -1169,22 +1283,25 @@ irqreturn_t ipath_intr(int irq, void *data)
1169 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat); 1283 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat);
1170 1284
1171 /* 1285 /*
1172 * handle port0 receive before checking for pio buffers available, 1286 * Handle kernel receive queues before checking for pio buffers
1173 * since receives can overflow; piobuf waiters can afford a few 1287 * available since receives can overflow; piobuf waiters can afford
1174 * extra cycles, since they were waiting anyway, and user's waiting 1288 * a few extra cycles, since they were waiting anyway, and user's
1175 * for receive are at the bottom. 1289 * waiting for receive are at the bottom.
1176 */ 1290 */
1177 if (chk0rcv) { 1291 kportrbits = (1ULL << dd->ipath_i_rcvavail_shift) |
1292 (1ULL << dd->ipath_i_rcvurg_shift);
1293 if (chk0rcv || (istat & kportrbits)) {
1294 istat &= ~kportrbits;
1178 ipath_kreceive(dd->ipath_pd[0]); 1295 ipath_kreceive(dd->ipath_pd[0]);
1179 istat &= ~port0rbits;
1180 } 1296 }
1181 1297
1182 if (istat & ((dd->ipath_i_rcvavail_mask << 1298 if (istat & ((dd->ipath_i_rcvavail_mask << dd->ipath_i_rcvavail_shift) |
1183 INFINIPATH_I_RCVAVAIL_SHIFT) 1299 (dd->ipath_i_rcvurg_mask << dd->ipath_i_rcvurg_shift)))
1184 | (dd->ipath_i_rcvurg_mask <<
1185 INFINIPATH_I_RCVURG_SHIFT)))
1186 handle_urcv(dd, istat); 1300 handle_urcv(dd, istat);
1187 1301
1302 if (istat & (INFINIPATH_I_SDMAINT | INFINIPATH_I_SDMADISABLED))
1303 handle_sdma_intr(dd, istat);
1304
1188 if (istat & INFINIPATH_I_SPIOBUFAVAIL) { 1305 if (istat & INFINIPATH_I_SPIOBUFAVAIL) {
1189 unsigned long flags; 1306 unsigned long flags;
1190 1307
@@ -1195,7 +1312,10 @@ irqreturn_t ipath_intr(int irq, void *data)
1195 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 1312 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1196 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); 1313 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1197 1314
1198 handle_layer_pioavail(dd); 1315 if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA))
1316 handle_layer_pioavail(dd);
1317 else
1318 ipath_dbg("unexpected BUFAVAIL intr\n");
1199 } 1319 }
1200 1320
1201 ret = IRQ_HANDLED; 1321 ret = IRQ_HANDLED;
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h
index ecf3f7ff7717..5863cbe99303 100644
--- a/drivers/infiniband/hw/ipath/ipath_kernel.h
+++ b/drivers/infiniband/hw/ipath/ipath_kernel.h
@@ -1,7 +1,7 @@
1#ifndef _IPATH_KERNEL_H 1#ifndef _IPATH_KERNEL_H
2#define _IPATH_KERNEL_H 2#define _IPATH_KERNEL_H
3/* 3/*
4 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 4 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
5 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 5 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
6 * 6 *
7 * This software is available to you under a choice of one of two 7 * This software is available to you under a choice of one of two
@@ -42,6 +42,8 @@
42#include <linux/pci.h> 42#include <linux/pci.h>
43#include <linux/dma-mapping.h> 43#include <linux/dma-mapping.h>
44#include <linux/mutex.h> 44#include <linux/mutex.h>
45#include <linux/list.h>
46#include <linux/scatterlist.h>
45#include <asm/io.h> 47#include <asm/io.h>
46#include <rdma/ib_verbs.h> 48#include <rdma/ib_verbs.h>
47 49
@@ -175,9 +177,13 @@ struct ipath_portdata {
175 u16 poll_type; 177 u16 poll_type;
176 /* port rcvhdrq head offset */ 178 /* port rcvhdrq head offset */
177 u32 port_head; 179 u32 port_head;
180 /* receive packet sequence counter */
181 u32 port_seq_cnt;
178}; 182};
179 183
180struct sk_buff; 184struct sk_buff;
185struct ipath_sge_state;
186struct ipath_verbs_txreq;
181 187
182/* 188/*
183 * control information for layered drivers 189 * control information for layered drivers
@@ -191,6 +197,40 @@ struct ipath_skbinfo {
191 dma_addr_t phys; 197 dma_addr_t phys;
192}; 198};
193 199
200struct ipath_sdma_txreq {
201 int flags;
202 int sg_count;
203 union {
204 struct scatterlist *sg;
205 void *map_addr;
206 };
207 void (*callback)(void *, int);
208 void *callback_cookie;
209 int callback_status;
210 u16 start_idx; /* sdma private */
211 u16 next_descq_idx; /* sdma private */
212 struct list_head list; /* sdma private */
213};
214
215struct ipath_sdma_desc {
216 __le64 qw[2];
217};
218
219#define IPATH_SDMA_TXREQ_F_USELARGEBUF 0x1
220#define IPATH_SDMA_TXREQ_F_HEADTOHOST 0x2
221#define IPATH_SDMA_TXREQ_F_INTREQ 0x4
222#define IPATH_SDMA_TXREQ_F_FREEBUF 0x8
223#define IPATH_SDMA_TXREQ_F_FREEDESC 0x10
224#define IPATH_SDMA_TXREQ_F_VL15 0x20
225
226#define IPATH_SDMA_TXREQ_S_OK 0
227#define IPATH_SDMA_TXREQ_S_SENDERROR 1
228#define IPATH_SDMA_TXREQ_S_ABORTED 2
229#define IPATH_SDMA_TXREQ_S_SHUTDOWN 3
230
231/* max dwords in small buffer packet */
232#define IPATH_SMALLBUF_DWORDS (dd->ipath_piosize2k >> 2)
233
194/* 234/*
195 * Possible IB config parameters for ipath_f_get/set_ib_cfg() 235 * Possible IB config parameters for ipath_f_get/set_ib_cfg()
196 */ 236 */
@@ -221,11 +261,6 @@ struct ipath_devdata {
221 unsigned long ipath_physaddr; 261 unsigned long ipath_physaddr;
222 /* base of memory alloced for ipath_kregbase, for free */ 262 /* base of memory alloced for ipath_kregbase, for free */
223 u64 *ipath_kregalloc; 263 u64 *ipath_kregalloc;
224 /*
225 * virtual address where port0 rcvhdrqtail updated for this unit.
226 * only written to by the chip, not the driver.
227 */
228 volatile __le64 *ipath_hdrqtailptr;
229 /* ipath_cfgports pointers */ 264 /* ipath_cfgports pointers */
230 struct ipath_portdata **ipath_pd; 265 struct ipath_portdata **ipath_pd;
231 /* sk_buffs used by port 0 eager receive queue */ 266 /* sk_buffs used by port 0 eager receive queue */
@@ -283,6 +318,7 @@ struct ipath_devdata {
283 /* per chip actions needed for IB Link up/down changes */ 318 /* per chip actions needed for IB Link up/down changes */
284 int (*ipath_f_ib_updown)(struct ipath_devdata *, int, u64); 319 int (*ipath_f_ib_updown)(struct ipath_devdata *, int, u64);
285 320
321 unsigned ipath_lastegr_idx;
286 struct ipath_ibdev *verbs_dev; 322 struct ipath_ibdev *verbs_dev;
287 struct timer_list verbs_timer; 323 struct timer_list verbs_timer;
288 /* total dwords sent (summed from counter) */ 324 /* total dwords sent (summed from counter) */
@@ -309,6 +345,7 @@ struct ipath_devdata {
309 ipath_err_t ipath_lasthwerror; 345 ipath_err_t ipath_lasthwerror;
310 /* errors masked because they occur too fast */ 346 /* errors masked because they occur too fast */
311 ipath_err_t ipath_maskederrs; 347 ipath_err_t ipath_maskederrs;
348 u64 ipath_lastlinkrecov; /* link recoveries at last ACTIVE */
312 /* time in jiffies at which to re-enable maskederrs */ 349 /* time in jiffies at which to re-enable maskederrs */
313 unsigned long ipath_unmasktime; 350 unsigned long ipath_unmasktime;
314 /* count of egrfull errors, combined for all ports */ 351 /* count of egrfull errors, combined for all ports */
@@ -347,6 +384,7 @@ struct ipath_devdata {
347 u32 ipath_lastrpkts; 384 u32 ipath_lastrpkts;
348 /* pio bufs allocated per port */ 385 /* pio bufs allocated per port */
349 u32 ipath_pbufsport; 386 u32 ipath_pbufsport;
387 u32 ipath_pioupd_thresh; /* update threshold, some chips */
350 /* 388 /*
351 * number of ports configured as max; zero is set to number chip 389 * number of ports configured as max; zero is set to number chip
352 * supports, less gives more pio bufs/port, etc. 390 * supports, less gives more pio bufs/port, etc.
@@ -365,6 +403,7 @@ struct ipath_devdata {
365 * get to multiple devices 403 * get to multiple devices
366 */ 404 */
367 u32 ipath_lastpioindex; 405 u32 ipath_lastpioindex;
406 u32 ipath_lastpioindexl;
368 /* max length of freezemsg */ 407 /* max length of freezemsg */
369 u32 ipath_freezelen; 408 u32 ipath_freezelen;
370 /* 409 /*
@@ -381,6 +420,15 @@ struct ipath_devdata {
381 u32 ipath_pcibar0; 420 u32 ipath_pcibar0;
382 /* so we can rewrite it after a chip reset */ 421 /* so we can rewrite it after a chip reset */
383 u32 ipath_pcibar1; 422 u32 ipath_pcibar1;
423 u32 ipath_x1_fix_tries;
424 u32 ipath_autoneg_tries;
425 u32 serdes_first_init_done;
426
427 struct ipath_relock {
428 atomic_t ipath_relock_timer_active;
429 struct timer_list ipath_relock_timer;
430 unsigned int ipath_relock_interval; /* in jiffies */
431 } ipath_relock_singleton;
384 432
385 /* interrupt number */ 433 /* interrupt number */
386 int ipath_irq; 434 int ipath_irq;
@@ -403,7 +451,7 @@ struct ipath_devdata {
403 u64 __iomem *ipath_egrtidbase; 451 u64 __iomem *ipath_egrtidbase;
404 /* lock to workaround chip bug 9437 and others */ 452 /* lock to workaround chip bug 9437 and others */
405 spinlock_t ipath_kernel_tid_lock; 453 spinlock_t ipath_kernel_tid_lock;
406 spinlock_t ipath_tid_lock; 454 spinlock_t ipath_user_tid_lock;
407 spinlock_t ipath_sendctrl_lock; 455 spinlock_t ipath_sendctrl_lock;
408 456
409 /* 457 /*
@@ -422,11 +470,48 @@ struct ipath_devdata {
422 struct class_device *diag_class_dev; 470 struct class_device *diag_class_dev;
423 /* timer used to prevent stats overflow, error throttling, etc. */ 471 /* timer used to prevent stats overflow, error throttling, etc. */
424 struct timer_list ipath_stats_timer; 472 struct timer_list ipath_stats_timer;
473 /* timer to verify interrupts work, and fallback if possible */
474 struct timer_list ipath_intrchk_timer;
425 void *ipath_dummy_hdrq; /* used after port close */ 475 void *ipath_dummy_hdrq; /* used after port close */
426 dma_addr_t ipath_dummy_hdrq_phys; 476 dma_addr_t ipath_dummy_hdrq_phys;
427 477
478 /* SendDMA related entries */
479 spinlock_t ipath_sdma_lock;
480 u64 ipath_sdma_status;
481 unsigned long ipath_sdma_abort_jiffies;
482 unsigned long ipath_sdma_abort_intr_timeout;
483 unsigned long ipath_sdma_buf_jiffies;
484 struct ipath_sdma_desc *ipath_sdma_descq;
485 u64 ipath_sdma_descq_added;
486 u64 ipath_sdma_descq_removed;
487 int ipath_sdma_desc_nreserved;
488 u16 ipath_sdma_descq_cnt;
489 u16 ipath_sdma_descq_tail;
490 u16 ipath_sdma_descq_head;
491 u16 ipath_sdma_next_intr;
492 u16 ipath_sdma_reset_wait;
493 u8 ipath_sdma_generation;
494 struct tasklet_struct ipath_sdma_abort_task;
495 struct tasklet_struct ipath_sdma_notify_task;
496 struct list_head ipath_sdma_activelist;
497 struct list_head ipath_sdma_notifylist;
498 atomic_t ipath_sdma_vl15_count;
499 struct timer_list ipath_sdma_vl15_timer;
500
501 dma_addr_t ipath_sdma_descq_phys;
502 volatile __le64 *ipath_sdma_head_dma;
503 dma_addr_t ipath_sdma_head_phys;
504
428 unsigned long ipath_ureg_align; /* user register alignment */ 505 unsigned long ipath_ureg_align; /* user register alignment */
429 506
507 struct delayed_work ipath_autoneg_work;
508 wait_queue_head_t ipath_autoneg_wait;
509
510 /* HoL blocking / user app forward-progress state */
511 unsigned ipath_hol_state;
512 unsigned ipath_hol_next;
513 struct timer_list ipath_hol_timer;
514
430 /* 515 /*
431 * Shadow copies of registers; size indicates read access size. 516 * Shadow copies of registers; size indicates read access size.
432 * Most of them are readonly, but some are write-only register, 517 * Most of them are readonly, but some are write-only register,
@@ -447,6 +532,8 @@ struct ipath_devdata {
447 * init time. 532 * init time.
448 */ 533 */
449 unsigned long ipath_pioavailshadow[8]; 534 unsigned long ipath_pioavailshadow[8];
535 /* bitmap of send buffers available for the kernel to use with PIO. */
536 unsigned long ipath_pioavailkernel[8];
450 /* shadow of kr_gpio_out, for rmw ops */ 537 /* shadow of kr_gpio_out, for rmw ops */
451 u64 ipath_gpio_out; 538 u64 ipath_gpio_out;
452 /* shadow the gpio mask register */ 539 /* shadow the gpio mask register */
@@ -472,6 +559,8 @@ struct ipath_devdata {
472 u64 ipath_intconfig; 559 u64 ipath_intconfig;
473 /* kr_sendpiobufbase value */ 560 /* kr_sendpiobufbase value */
474 u64 ipath_piobufbase; 561 u64 ipath_piobufbase;
562 /* kr_ibcddrctrl shadow */
563 u64 ipath_ibcddrctrl;
475 564
476 /* these are the "32 bit" regs */ 565 /* these are the "32 bit" regs */
477 566
@@ -488,7 +577,10 @@ struct ipath_devdata {
488 unsigned long ipath_rcvctrl; 577 unsigned long ipath_rcvctrl;
489 /* shadow kr_sendctrl */ 578 /* shadow kr_sendctrl */
490 unsigned long ipath_sendctrl; 579 unsigned long ipath_sendctrl;
491 unsigned long ipath_lastcancel; /* to not count armlaunch after cancel */ 580 /* to not count armlaunch after cancel */
581 unsigned long ipath_lastcancel;
582 /* count cases where special trigger was needed (double write) */
583 unsigned long ipath_spectriggerhit;
492 584
493 /* value we put in kr_rcvhdrcnt */ 585 /* value we put in kr_rcvhdrcnt */
494 u32 ipath_rcvhdrcnt; 586 u32 ipath_rcvhdrcnt;
@@ -510,6 +602,7 @@ struct ipath_devdata {
510 u32 ipath_piobcnt4k; 602 u32 ipath_piobcnt4k;
511 /* size in bytes of "4KB" PIO buffers */ 603 /* size in bytes of "4KB" PIO buffers */
512 u32 ipath_piosize4k; 604 u32 ipath_piosize4k;
605 u32 ipath_pioreserved; /* reserved special-inkernel; */
513 /* kr_rcvegrbase value */ 606 /* kr_rcvegrbase value */
514 u32 ipath_rcvegrbase; 607 u32 ipath_rcvegrbase;
515 /* kr_rcvegrcnt value */ 608 /* kr_rcvegrcnt value */
@@ -546,10 +639,10 @@ struct ipath_devdata {
546 u32 ipath_init_ibmaxlen; 639 u32 ipath_init_ibmaxlen;
547 /* size of each rcvegrbuffer */ 640 /* size of each rcvegrbuffer */
548 u32 ipath_rcvegrbufsize; 641 u32 ipath_rcvegrbufsize;
549 /* width (2,4,8,16,32) from HT config reg */ 642 /* localbus width (1, 2,4,8,16,32) from config space */
550 u32 ipath_htwidth; 643 u32 ipath_lbus_width;
551 /* HT speed (200,400,800,1000) from HT config */ 644 /* localbus speed (HT: 200,400,800,1000; PCIe 2500) */
552 u32 ipath_htspeed; 645 u32 ipath_lbus_speed;
553 /* 646 /*
554 * number of sequential ibcstatus change for polling active/quiet 647 * number of sequential ibcstatus change for polling active/quiet
555 * (i.e., link not coming up). 648 * (i.e., link not coming up).
@@ -573,21 +666,14 @@ struct ipath_devdata {
573 */ 666 */
574 u8 ipath_serial[16]; 667 u8 ipath_serial[16];
575 /* human readable board version */ 668 /* human readable board version */
576 u8 ipath_boardversion[80]; 669 u8 ipath_boardversion[96];
670 u8 ipath_lbus_info[32]; /* human readable localbus info */
577 /* chip major rev, from ipath_revision */ 671 /* chip major rev, from ipath_revision */
578 u8 ipath_majrev; 672 u8 ipath_majrev;
579 /* chip minor rev, from ipath_revision */ 673 /* chip minor rev, from ipath_revision */
580 u8 ipath_minrev; 674 u8 ipath_minrev;
581 /* board rev, from ipath_revision */ 675 /* board rev, from ipath_revision */
582 u8 ipath_boardrev; 676 u8 ipath_boardrev;
583
584 u8 ipath_r_portenable_shift;
585 u8 ipath_r_intravail_shift;
586 u8 ipath_r_tailupd_shift;
587 u8 ipath_r_portcfg_shift;
588
589 /* unit # of this chip, if present */
590 int ipath_unit;
591 /* saved for restore after reset */ 677 /* saved for restore after reset */
592 u8 ipath_pci_cacheline; 678 u8 ipath_pci_cacheline;
593 /* LID mask control */ 679 /* LID mask control */
@@ -603,6 +689,14 @@ struct ipath_devdata {
603 /* Rx Polarity inversion (compensate for ~tx on partner) */ 689 /* Rx Polarity inversion (compensate for ~tx on partner) */
604 u8 ipath_rx_pol_inv; 690 u8 ipath_rx_pol_inv;
605 691
692 u8 ipath_r_portenable_shift;
693 u8 ipath_r_intravail_shift;
694 u8 ipath_r_tailupd_shift;
695 u8 ipath_r_portcfg_shift;
696
697 /* unit # of this chip, if present */
698 int ipath_unit;
699
606 /* local link integrity counter */ 700 /* local link integrity counter */
607 u32 ipath_lli_counter; 701 u32 ipath_lli_counter;
608 /* local link integrity errors */ 702 /* local link integrity errors */
@@ -617,9 +711,6 @@ struct ipath_devdata {
617 u32 ipath_overrun_thresh_errs; 711 u32 ipath_overrun_thresh_errs;
618 u32 ipath_lli_errs; 712 u32 ipath_lli_errs;
619 713
620 /* status check work */
621 struct delayed_work status_work;
622
623 /* 714 /*
624 * Not all devices managed by a driver instance are the same 715 * Not all devices managed by a driver instance are the same
625 * type, so these fields must be per-device. 716 * type, so these fields must be per-device.
@@ -632,8 +723,8 @@ struct ipath_devdata {
632 * Below should be computable from number of ports, 723 * Below should be computable from number of ports,
633 * since they are never modified. 724 * since they are never modified.
634 */ 725 */
635 u32 ipath_i_rcvavail_mask; 726 u64 ipath_i_rcvavail_mask;
636 u32 ipath_i_rcvurg_mask; 727 u64 ipath_i_rcvurg_mask;
637 u16 ipath_i_rcvurg_shift; 728 u16 ipath_i_rcvurg_shift;
638 u16 ipath_i_rcvavail_shift; 729 u16 ipath_i_rcvavail_shift;
639 730
@@ -641,8 +732,9 @@ struct ipath_devdata {
641 * Register bits for selecting i2c direction and values, used for 732 * Register bits for selecting i2c direction and values, used for
642 * I2C serial flash. 733 * I2C serial flash.
643 */ 734 */
644 u16 ipath_gpio_sda_num; 735 u8 ipath_gpio_sda_num;
645 u16 ipath_gpio_scl_num; 736 u8 ipath_gpio_scl_num;
737 u8 ipath_i2c_chain_type;
646 u64 ipath_gpio_sda; 738 u64 ipath_gpio_sda;
647 u64 ipath_gpio_scl; 739 u64 ipath_gpio_scl;
648 740
@@ -703,13 +795,51 @@ struct ipath_devdata {
703 /* interrupt mitigation reload register info */ 795 /* interrupt mitigation reload register info */
704 u16 ipath_jint_idle_ticks; /* idle clock ticks */ 796 u16 ipath_jint_idle_ticks; /* idle clock ticks */
705 u16 ipath_jint_max_packets; /* max packets across all ports */ 797 u16 ipath_jint_max_packets; /* max packets across all ports */
798
799 /*
800 * lock for access to SerDes, and flags to sequence preset
801 * versus steady-state. 7220-only at the moment.
802 */
803 spinlock_t ipath_sdepb_lock;
804 u8 ipath_presets_needed; /* Set if presets to be restored next DOWN */
706}; 805};
707 806
807/* ipath_hol_state values (stopping/starting user proc, send flushing) */
808#define IPATH_HOL_UP 0
809#define IPATH_HOL_DOWN 1
810/* ipath_hol_next toggle values, used when hol_state IPATH_HOL_DOWN */
811#define IPATH_HOL_DOWNSTOP 0
812#define IPATH_HOL_DOWNCONT 1
813
814/* bit positions for sdma_status */
815#define IPATH_SDMA_ABORTING 0
816#define IPATH_SDMA_DISARMED 1
817#define IPATH_SDMA_DISABLED 2
818#define IPATH_SDMA_LAYERBUF 3
819#define IPATH_SDMA_RUNNING 62
820#define IPATH_SDMA_SHUTDOWN 63
821
822/* bit combinations that correspond to abort states */
823#define IPATH_SDMA_ABORT_NONE 0
824#define IPATH_SDMA_ABORT_ABORTING (1UL << IPATH_SDMA_ABORTING)
825#define IPATH_SDMA_ABORT_DISARMED ((1UL << IPATH_SDMA_ABORTING) | \
826 (1UL << IPATH_SDMA_DISARMED))
827#define IPATH_SDMA_ABORT_DISABLED ((1UL << IPATH_SDMA_ABORTING) | \
828 (1UL << IPATH_SDMA_DISABLED))
829#define IPATH_SDMA_ABORT_ABORTED ((1UL << IPATH_SDMA_ABORTING) | \
830 (1UL << IPATH_SDMA_DISARMED) | (1UL << IPATH_SDMA_DISABLED))
831#define IPATH_SDMA_ABORT_MASK ((1UL<<IPATH_SDMA_ABORTING) | \
832 (1UL << IPATH_SDMA_DISARMED) | (1UL << IPATH_SDMA_DISABLED))
833
834#define IPATH_SDMA_BUF_NONE 0
835#define IPATH_SDMA_BUF_MASK (1UL<<IPATH_SDMA_LAYERBUF)
836
708/* Private data for file operations */ 837/* Private data for file operations */
709struct ipath_filedata { 838struct ipath_filedata {
710 struct ipath_portdata *pd; 839 struct ipath_portdata *pd;
711 unsigned subport; 840 unsigned subport;
712 unsigned tidcursor; 841 unsigned tidcursor;
842 struct ipath_user_sdma_queue *pq;
713}; 843};
714extern struct list_head ipath_dev_list; 844extern struct list_head ipath_dev_list;
715extern spinlock_t ipath_devs_lock; 845extern spinlock_t ipath_devs_lock;
@@ -718,7 +848,7 @@ extern struct ipath_devdata *ipath_lookup(int unit);
718int ipath_init_chip(struct ipath_devdata *, int); 848int ipath_init_chip(struct ipath_devdata *, int);
719int ipath_enable_wc(struct ipath_devdata *dd); 849int ipath_enable_wc(struct ipath_devdata *dd);
720void ipath_disable_wc(struct ipath_devdata *dd); 850void ipath_disable_wc(struct ipath_devdata *dd);
721int ipath_count_units(int *npresentp, int *nupp, u32 *maxportsp); 851int ipath_count_units(int *npresentp, int *nupp, int *maxportsp);
722void ipath_shutdown_device(struct ipath_devdata *); 852void ipath_shutdown_device(struct ipath_devdata *);
723void ipath_clear_freeze(struct ipath_devdata *); 853void ipath_clear_freeze(struct ipath_devdata *);
724 854
@@ -741,7 +871,8 @@ struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd, gfp_t);
741extern int ipath_diag_inuse; 871extern int ipath_diag_inuse;
742 872
743irqreturn_t ipath_intr(int irq, void *devid); 873irqreturn_t ipath_intr(int irq, void *devid);
744int ipath_decode_err(char *buf, size_t blen, ipath_err_t err); 874int ipath_decode_err(struct ipath_devdata *dd, char *buf, size_t blen,
875 ipath_err_t err);
745#if __IPATH_INFO || __IPATH_DBG 876#if __IPATH_INFO || __IPATH_DBG
746extern const char *ipath_ibcstatus_str[]; 877extern const char *ipath_ibcstatus_str[];
747#endif 878#endif
@@ -774,6 +905,13 @@ int ipath_set_lid(struct ipath_devdata *, u32, u8);
774int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv); 905int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv);
775void ipath_enable_armlaunch(struct ipath_devdata *); 906void ipath_enable_armlaunch(struct ipath_devdata *);
776void ipath_disable_armlaunch(struct ipath_devdata *); 907void ipath_disable_armlaunch(struct ipath_devdata *);
908void ipath_hol_down(struct ipath_devdata *);
909void ipath_hol_up(struct ipath_devdata *);
910void ipath_hol_event(unsigned long);
911void ipath_toggle_rclkrls(struct ipath_devdata *);
912void ipath_sd7220_clr_ibpar(struct ipath_devdata *);
913void ipath_set_relock_poll(struct ipath_devdata *, int);
914void ipath_shutdown_relock_poll(struct ipath_devdata *);
777 915
778/* for use in system calls, where we want to know device type, etc. */ 916/* for use in system calls, where we want to know device type, etc. */
779#define port_fp(fp) ((struct ipath_filedata *)(fp)->private_data)->pd 917#define port_fp(fp) ((struct ipath_filedata *)(fp)->private_data)->pd
@@ -781,11 +919,15 @@ void ipath_disable_armlaunch(struct ipath_devdata *);
781 ((struct ipath_filedata *)(fp)->private_data)->subport 919 ((struct ipath_filedata *)(fp)->private_data)->subport
782#define tidcursor_fp(fp) \ 920#define tidcursor_fp(fp) \
783 ((struct ipath_filedata *)(fp)->private_data)->tidcursor 921 ((struct ipath_filedata *)(fp)->private_data)->tidcursor
922#define user_sdma_queue_fp(fp) \
923 ((struct ipath_filedata *)(fp)->private_data)->pq
784 924
785/* 925/*
786 * values for ipath_flags 926 * values for ipath_flags
787 */ 927 */
788/* The chip is up and initted */ 928 /* chip can report link latency (IB 1.2) */
929#define IPATH_HAS_LINK_LATENCY 0x1
930 /* The chip is up and initted */
789#define IPATH_INITTED 0x2 931#define IPATH_INITTED 0x2
790 /* set if any user code has set kr_rcvhdrsize */ 932 /* set if any user code has set kr_rcvhdrsize */
791#define IPATH_RCVHDRSZ_SET 0x4 933#define IPATH_RCVHDRSZ_SET 0x4
@@ -809,6 +951,8 @@ void ipath_disable_armlaunch(struct ipath_devdata *);
809#define IPATH_LINKUNK 0x400 951#define IPATH_LINKUNK 0x400
810 /* Write combining flush needed for PIO */ 952 /* Write combining flush needed for PIO */
811#define IPATH_PIO_FLUSH_WC 0x1000 953#define IPATH_PIO_FLUSH_WC 0x1000
954 /* DMA Receive tail pointer */
955#define IPATH_NODMA_RTAIL 0x2000
812 /* no IB cable, or no device on IB cable */ 956 /* no IB cable, or no device on IB cable */
813#define IPATH_NOCABLE 0x4000 957#define IPATH_NOCABLE 0x4000
814 /* Supports port zero per packet receive interrupts via 958 /* Supports port zero per packet receive interrupts via
@@ -819,16 +963,26 @@ void ipath_disable_armlaunch(struct ipath_devdata *);
819 /* packet/word counters are 32 bit, else those 4 counters 963 /* packet/word counters are 32 bit, else those 4 counters
820 * are 64bit */ 964 * are 64bit */
821#define IPATH_32BITCOUNTERS 0x20000 965#define IPATH_32BITCOUNTERS 0x20000
822 /* can miss port0 rx interrupts */
823 /* Interrupt register is 64 bits */ 966 /* Interrupt register is 64 bits */
824#define IPATH_INTREG_64 0x40000 967#define IPATH_INTREG_64 0x40000
968 /* can miss port0 rx interrupts */
825#define IPATH_DISABLED 0x80000 /* administratively disabled */ 969#define IPATH_DISABLED 0x80000 /* administratively disabled */
826 /* Use GPIO interrupts for new counters */ 970 /* Use GPIO interrupts for new counters */
827#define IPATH_GPIO_ERRINTRS 0x100000 971#define IPATH_GPIO_ERRINTRS 0x100000
828#define IPATH_SWAP_PIOBUFS 0x200000 972#define IPATH_SWAP_PIOBUFS 0x200000
973 /* Supports Send DMA */
974#define IPATH_HAS_SEND_DMA 0x400000
975 /* Supports Send Count (not just word count) in PBC */
976#define IPATH_HAS_PBC_CNT 0x800000
829 /* Suppress heartbeat, even if turning off loopback */ 977 /* Suppress heartbeat, even if turning off loopback */
830#define IPATH_NO_HRTBT 0x1000000 978#define IPATH_NO_HRTBT 0x1000000
979#define IPATH_HAS_THRESH_UPDATE 0x4000000
831#define IPATH_HAS_MULT_IB_SPEED 0x8000000 980#define IPATH_HAS_MULT_IB_SPEED 0x8000000
981#define IPATH_IB_AUTONEG_INPROG 0x10000000
982#define IPATH_IB_AUTONEG_FAILED 0x20000000
983 /* Linkdown-disable intentionally, Do not attempt to bring up */
984#define IPATH_IB_LINK_DISABLED 0x40000000
985#define IPATH_IB_FORCE_NOTIFY 0x80000000 /* force notify on next ib change */
832 986
833/* Bits in GPIO for the added interrupts */ 987/* Bits in GPIO for the added interrupts */
834#define IPATH_GPIO_PORT0_BIT 2 988#define IPATH_GPIO_PORT0_BIT 2
@@ -847,13 +1001,18 @@ void ipath_disable_armlaunch(struct ipath_devdata *);
847 1001
848/* free up any allocated data at closes */ 1002/* free up any allocated data at closes */
849void ipath_free_data(struct ipath_portdata *dd); 1003void ipath_free_data(struct ipath_portdata *dd);
850u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32 *); 1004u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32, u32 *);
1005void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
1006 unsigned len, int avail);
1007void ipath_init_iba7220_funcs(struct ipath_devdata *);
851void ipath_init_iba6120_funcs(struct ipath_devdata *); 1008void ipath_init_iba6120_funcs(struct ipath_devdata *);
852void ipath_init_iba6110_funcs(struct ipath_devdata *); 1009void ipath_init_iba6110_funcs(struct ipath_devdata *);
853void ipath_get_eeprom_info(struct ipath_devdata *); 1010void ipath_get_eeprom_info(struct ipath_devdata *);
854int ipath_update_eeprom_log(struct ipath_devdata *dd); 1011int ipath_update_eeprom_log(struct ipath_devdata *dd);
855void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr); 1012void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr);
856u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg); 1013u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg);
1014void ipath_disarm_senderrbufs(struct ipath_devdata *, int);
1015void ipath_force_pio_avail_update(struct ipath_devdata *);
857void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev); 1016void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev);
858 1017
859/* 1018/*
@@ -865,6 +1024,34 @@ void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev);
865#define IPATH_LED_LOG 2 /* Logical (link) YELLOW LED */ 1024#define IPATH_LED_LOG 2 /* Logical (link) YELLOW LED */
866void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val); 1025void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val);
867 1026
1027/* send dma routines */
1028int setup_sdma(struct ipath_devdata *);
1029void teardown_sdma(struct ipath_devdata *);
1030void ipath_restart_sdma(struct ipath_devdata *);
1031void ipath_sdma_intr(struct ipath_devdata *);
1032int ipath_sdma_verbs_send(struct ipath_devdata *, struct ipath_sge_state *,
1033 u32, struct ipath_verbs_txreq *);
1034/* ipath_sdma_lock should be locked before calling this. */
1035int ipath_sdma_make_progress(struct ipath_devdata *dd);
1036
1037/* must be called under ipath_sdma_lock */
1038static inline u16 ipath_sdma_descq_freecnt(const struct ipath_devdata *dd)
1039{
1040 return dd->ipath_sdma_descq_cnt -
1041 (dd->ipath_sdma_descq_added - dd->ipath_sdma_descq_removed) -
1042 1 - dd->ipath_sdma_desc_nreserved;
1043}
1044
1045static inline void ipath_sdma_desc_reserve(struct ipath_devdata *dd, u16 cnt)
1046{
1047 dd->ipath_sdma_desc_nreserved += cnt;
1048}
1049
1050static inline void ipath_sdma_desc_unreserve(struct ipath_devdata *dd, u16 cnt)
1051{
1052 dd->ipath_sdma_desc_nreserved -= cnt;
1053}
1054
868/* 1055/*
869 * number of words used for protocol header if not set by ipath_userinit(); 1056 * number of words used for protocol header if not set by ipath_userinit();
870 */ 1057 */
@@ -875,6 +1062,8 @@ void ipath_release_user_pages(struct page **, size_t);
875void ipath_release_user_pages_on_close(struct page **, size_t); 1062void ipath_release_user_pages_on_close(struct page **, size_t);
876int ipath_eeprom_read(struct ipath_devdata *, u8, void *, int); 1063int ipath_eeprom_read(struct ipath_devdata *, u8, void *, int);
877int ipath_eeprom_write(struct ipath_devdata *, u8, const void *, int); 1064int ipath_eeprom_write(struct ipath_devdata *, u8, const void *, int);
1065int ipath_tempsense_read(struct ipath_devdata *, u8 regnum);
1066int ipath_tempsense_write(struct ipath_devdata *, u8 regnum, u8 data);
878 1067
879/* these are used for the registers that vary with port */ 1068/* these are used for the registers that vary with port */
880void ipath_write_kreg_port(const struct ipath_devdata *, ipath_kreg, 1069void ipath_write_kreg_port(const struct ipath_devdata *, ipath_kreg,
@@ -891,8 +1080,7 @@ void ipath_write_kreg_port(const struct ipath_devdata *, ipath_kreg,
891 1080
892/* 1081/*
893 * At the moment, none of the s-registers are writable, so no 1082 * At the moment, none of the s-registers are writable, so no
894 * ipath_write_sreg(), and none of the c-registers are writable, so no 1083 * ipath_write_sreg().
895 * ipath_write_creg().
896 */ 1084 */
897 1085
898/** 1086/**
@@ -1001,6 +1189,27 @@ static inline u32 ipath_get_rcvhdrtail(const struct ipath_portdata *pd)
1001 pd->port_rcvhdrtail_kvaddr)); 1189 pd->port_rcvhdrtail_kvaddr));
1002} 1190}
1003 1191
1192static inline u32 ipath_get_hdrqtail(const struct ipath_portdata *pd)
1193{
1194 const struct ipath_devdata *dd = pd->port_dd;
1195 u32 hdrqtail;
1196
1197 if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
1198 __le32 *rhf_addr;
1199 u32 seq;
1200
1201 rhf_addr = (__le32 *) pd->port_rcvhdrq +
1202 pd->port_head + dd->ipath_rhf_offset;
1203 seq = ipath_hdrget_seq(rhf_addr);
1204 hdrqtail = pd->port_head;
1205 if (seq == pd->port_seq_cnt)
1206 hdrqtail++;
1207 } else
1208 hdrqtail = ipath_get_rcvhdrtail(pd);
1209
1210 return hdrqtail;
1211}
1212
1004static inline u64 ipath_read_ireg(const struct ipath_devdata *dd, ipath_kreg r) 1213static inline u64 ipath_read_ireg(const struct ipath_devdata *dd, ipath_kreg r)
1005{ 1214{
1006 return (dd->ipath_flags & IPATH_INTREG_64) ? 1215 return (dd->ipath_flags & IPATH_INTREG_64) ?
@@ -1029,6 +1238,21 @@ static inline u32 ipath_ib_linktrstate(struct ipath_devdata *dd, u64 ibcs)
1029} 1238}
1030 1239
1031/* 1240/*
1241 * from contents of IBCStatus (or a saved copy), return logical link state
1242 * combination of link state and linktraining state (down, active, init,
1243 * arm, etc.
1244 */
1245static inline u32 ipath_ib_state(struct ipath_devdata *dd, u64 ibcs)
1246{
1247 u32 ibs;
1248 ibs = (u32)(ibcs >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
1249 dd->ibcs_lts_mask;
1250 ibs |= (u32)(ibcs &
1251 (INFINIPATH_IBCS_LINKSTATE_MASK << dd->ibcs_ls_shift));
1252 return ibs;
1253}
1254
1255/*
1032 * sysfs interface. 1256 * sysfs interface.
1033 */ 1257 */
1034 1258
@@ -1053,6 +1277,7 @@ int ipathfs_remove_device(struct ipath_devdata *);
1053dma_addr_t ipath_map_page(struct pci_dev *, struct page *, unsigned long, 1277dma_addr_t ipath_map_page(struct pci_dev *, struct page *, unsigned long,
1054 size_t, int); 1278 size_t, int);
1055dma_addr_t ipath_map_single(struct pci_dev *, void *, size_t, int); 1279dma_addr_t ipath_map_single(struct pci_dev *, void *, size_t, int);
1280const char *ipath_get_unit_name(int unit);
1056 1281
1057/* 1282/*
1058 * Flush write combining store buffers (if present) and perform a write 1283 * Flush write combining store buffers (if present) and perform a write
@@ -1065,11 +1290,8 @@ dma_addr_t ipath_map_single(struct pci_dev *, void *, size_t, int);
1065#endif 1290#endif
1066 1291
1067extern unsigned ipath_debug; /* debugging bit mask */ 1292extern unsigned ipath_debug; /* debugging bit mask */
1068 1293extern unsigned ipath_linkrecovery;
1069#define IPATH_MAX_PARITY_ATTEMPTS 10000 /* max times to try recovery */ 1294extern unsigned ipath_mtu4096;
1070
1071const char *ipath_get_unit_name(int unit);
1072
1073extern struct mutex ipath_mutex; 1295extern struct mutex ipath_mutex;
1074 1296
1075#define IPATH_DRV_NAME "ib_ipath" 1297#define IPATH_DRV_NAME "ib_ipath"
@@ -1096,7 +1318,7 @@ extern struct mutex ipath_mutex;
1096 1318
1097# define __IPATH_DBG_WHICH(which,fmt,...) \ 1319# define __IPATH_DBG_WHICH(which,fmt,...) \
1098 do { \ 1320 do { \
1099 if(unlikely(ipath_debug&(which))) \ 1321 if (unlikely(ipath_debug & (which))) \
1100 printk(KERN_DEBUG IPATH_DRV_NAME ": %s: " fmt, \ 1322 printk(KERN_DEBUG IPATH_DRV_NAME ": %s: " fmt, \
1101 __func__,##__VA_ARGS__); \ 1323 __func__,##__VA_ARGS__); \
1102 } while(0) 1324 } while(0)
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c
index b34b91d3723a..1ff46ae7dd99 100644
--- a/drivers/infiniband/hw/ipath/ipath_mad.c
+++ b/drivers/infiniband/hw/ipath/ipath_mad.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -146,6 +146,15 @@ static int recv_subn_get_guidinfo(struct ib_smp *smp,
146 return reply(smp); 146 return reply(smp);
147} 147}
148 148
149static void set_link_width_enabled(struct ipath_devdata *dd, u32 w)
150{
151 (void) dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LWID_ENB, w);
152}
153
154static void set_link_speed_enabled(struct ipath_devdata *dd, u32 s)
155{
156 (void) dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_SPD_ENB, s);
157}
149 158
150static int get_overrunthreshold(struct ipath_devdata *dd) 159static int get_overrunthreshold(struct ipath_devdata *dd)
151{ 160{
@@ -226,6 +235,7 @@ static int recv_subn_get_portinfo(struct ib_smp *smp,
226 struct ib_device *ibdev, u8 port) 235 struct ib_device *ibdev, u8 port)
227{ 236{
228 struct ipath_ibdev *dev; 237 struct ipath_ibdev *dev;
238 struct ipath_devdata *dd;
229 struct ib_port_info *pip = (struct ib_port_info *)smp->data; 239 struct ib_port_info *pip = (struct ib_port_info *)smp->data;
230 u16 lid; 240 u16 lid;
231 u8 ibcstat; 241 u8 ibcstat;
@@ -239,6 +249,7 @@ static int recv_subn_get_portinfo(struct ib_smp *smp,
239 } 249 }
240 250
241 dev = to_idev(ibdev); 251 dev = to_idev(ibdev);
252 dd = dev->dd;
242 253
243 /* Clear all fields. Only set the non-zero fields. */ 254 /* Clear all fields. Only set the non-zero fields. */
244 memset(smp->data, 0, sizeof(smp->data)); 255 memset(smp->data, 0, sizeof(smp->data));
@@ -248,25 +259,28 @@ static int recv_subn_get_portinfo(struct ib_smp *smp,
248 dev->mkeyprot == 0) 259 dev->mkeyprot == 0)
249 pip->mkey = dev->mkey; 260 pip->mkey = dev->mkey;
250 pip->gid_prefix = dev->gid_prefix; 261 pip->gid_prefix = dev->gid_prefix;
251 lid = dev->dd->ipath_lid; 262 lid = dd->ipath_lid;
252 pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE; 263 pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE;
253 pip->sm_lid = cpu_to_be16(dev->sm_lid); 264 pip->sm_lid = cpu_to_be16(dev->sm_lid);
254 pip->cap_mask = cpu_to_be32(dev->port_cap_flags); 265 pip->cap_mask = cpu_to_be32(dev->port_cap_flags);
255 /* pip->diag_code; */ 266 /* pip->diag_code; */
256 pip->mkey_lease_period = cpu_to_be16(dev->mkey_lease_period); 267 pip->mkey_lease_period = cpu_to_be16(dev->mkey_lease_period);
257 pip->local_port_num = port; 268 pip->local_port_num = port;
258 pip->link_width_enabled = dev->link_width_enabled; 269 pip->link_width_enabled = dd->ipath_link_width_enabled;
259 pip->link_width_supported = 3; /* 1x or 4x */ 270 pip->link_width_supported = dd->ipath_link_width_supported;
260 pip->link_width_active = 2; /* 4x */ 271 pip->link_width_active = dd->ipath_link_width_active;
261 pip->linkspeed_portstate = 0x10; /* 2.5Gbps */ 272 pip->linkspeed_portstate = dd->ipath_link_speed_supported << 4;
262 ibcstat = dev->dd->ipath_lastibcstat; 273 ibcstat = dd->ipath_lastibcstat;
263 pip->linkspeed_portstate |= ((ibcstat >> 4) & 0x3) + 1; 274 /* map LinkState to IB portinfo values. */
275 pip->linkspeed_portstate |= ipath_ib_linkstate(dd, ibcstat) + 1;
276
264 pip->portphysstate_linkdown = 277 pip->portphysstate_linkdown =
265 (ipath_cvt_physportstate[ibcstat & 0xf] << 4) | 278 (ipath_cvt_physportstate[ibcstat & dd->ibcs_lts_mask] << 4) |
266 (get_linkdowndefaultstate(dev->dd) ? 1 : 2); 279 (get_linkdowndefaultstate(dd) ? 1 : 2);
267 pip->mkeyprot_resv_lmc = (dev->mkeyprot << 6) | dev->dd->ipath_lmc; 280 pip->mkeyprot_resv_lmc = (dev->mkeyprot << 6) | dd->ipath_lmc;
268 pip->linkspeedactive_enabled = 0x11; /* 2.5Gbps, 2.5Gbps */ 281 pip->linkspeedactive_enabled = (dd->ipath_link_speed_active << 4) |
269 switch (dev->dd->ipath_ibmtu) { 282 dd->ipath_link_speed_enabled;
283 switch (dd->ipath_ibmtu) {
270 case 4096: 284 case 4096:
271 mtu = IB_MTU_4096; 285 mtu = IB_MTU_4096;
272 break; 286 break;
@@ -292,19 +306,15 @@ static int recv_subn_get_portinfo(struct ib_smp *smp,
292 /* pip->vl_arb_high_cap; // only one VL */ 306 /* pip->vl_arb_high_cap; // only one VL */
293 /* pip->vl_arb_low_cap; // only one VL */ 307 /* pip->vl_arb_low_cap; // only one VL */
294 /* InitTypeReply = 0 */ 308 /* InitTypeReply = 0 */
295 /* 309 /* our mtu cap depends on whether 4K MTU enabled or not */
296 * Note: the chips support a maximum MTU of 4096, but the driver 310 pip->inittypereply_mtucap = ipath_mtu4096 ? IB_MTU_4096 : IB_MTU_2048;
297 * hasn't implemented this feature yet, so set the maximum value 311 /* HCAs ignore VLStallCount and HOQLife */
298 * to 2048.
299 */
300 pip->inittypereply_mtucap = IB_MTU_2048;
301 // HCAs ignore VLStallCount and HOQLife
302 /* pip->vlstallcnt_hoqlife; */ 312 /* pip->vlstallcnt_hoqlife; */
303 pip->operationalvl_pei_peo_fpi_fpo = 0x10; /* OVLs = 1 */ 313 pip->operationalvl_pei_peo_fpi_fpo = 0x10; /* OVLs = 1 */
304 pip->mkey_violations = cpu_to_be16(dev->mkey_violations); 314 pip->mkey_violations = cpu_to_be16(dev->mkey_violations);
305 /* P_KeyViolations are counted by hardware. */ 315 /* P_KeyViolations are counted by hardware. */
306 pip->pkey_violations = 316 pip->pkey_violations =
307 cpu_to_be16((ipath_get_cr_errpkey(dev->dd) - 317 cpu_to_be16((ipath_get_cr_errpkey(dd) -
308 dev->z_pkey_violations) & 0xFFFF); 318 dev->z_pkey_violations) & 0xFFFF);
309 pip->qkey_violations = cpu_to_be16(dev->qkey_violations); 319 pip->qkey_violations = cpu_to_be16(dev->qkey_violations);
310 /* Only the hardware GUID is supported for now */ 320 /* Only the hardware GUID is supported for now */
@@ -313,10 +323,17 @@ static int recv_subn_get_portinfo(struct ib_smp *smp,
313 /* 32.768 usec. response time (guessing) */ 323 /* 32.768 usec. response time (guessing) */
314 pip->resv_resptimevalue = 3; 324 pip->resv_resptimevalue = 3;
315 pip->localphyerrors_overrunerrors = 325 pip->localphyerrors_overrunerrors =
316 (get_phyerrthreshold(dev->dd) << 4) | 326 (get_phyerrthreshold(dd) << 4) |
317 get_overrunthreshold(dev->dd); 327 get_overrunthreshold(dd);
318 /* pip->max_credit_hint; */ 328 /* pip->max_credit_hint; */
319 /* pip->link_roundtrip_latency[3]; */ 329 if (dev->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) {
330 u32 v;
331
332 v = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LINKLATENCY);
333 pip->link_roundtrip_latency[0] = v >> 16;
334 pip->link_roundtrip_latency[1] = v >> 8;
335 pip->link_roundtrip_latency[2] = v;
336 }
320 337
321 ret = reply(smp); 338 ret = reply(smp);
322 339
@@ -444,19 +461,25 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
444 ib_dispatch_event(&event); 461 ib_dispatch_event(&event);
445 } 462 }
446 463
447 /* Only 4x supported but allow 1x or 4x to be set (see 14.2.6.6). */ 464 /* Allow 1x or 4x to be set (see 14.2.6.6). */
448 lwe = pip->link_width_enabled; 465 lwe = pip->link_width_enabled;
449 if ((lwe >= 4 && lwe <= 8) || (lwe >= 0xC && lwe <= 0xFE)) 466 if (lwe) {
450 goto err; 467 if (lwe == 0xFF)
451 if (lwe == 0xFF) 468 lwe = dd->ipath_link_width_supported;
452 dev->link_width_enabled = 3; /* 1x or 4x */ 469 else if (lwe >= 16 || (lwe & ~dd->ipath_link_width_supported))
453 else if (lwe) 470 goto err;
454 dev->link_width_enabled = lwe; 471 set_link_width_enabled(dd, lwe);
472 }
455 473
456 /* Only 2.5 Gbs supported. */ 474 /* Allow 2.5 or 5.0 Gbs. */
457 lse = pip->linkspeedactive_enabled & 0xF; 475 lse = pip->linkspeedactive_enabled & 0xF;
458 if (lse >= 2 && lse <= 0xE) 476 if (lse) {
459 goto err; 477 if (lse == 15)
478 lse = dd->ipath_link_speed_supported;
479 else if (lse >= 8 || (lse & ~dd->ipath_link_speed_supported))
480 goto err;
481 set_link_speed_enabled(dd, lse);
482 }
460 483
461 /* Set link down default state. */ 484 /* Set link down default state. */
462 switch (pip->portphysstate_linkdown & 0xF) { 485 switch (pip->portphysstate_linkdown & 0xF) {
@@ -491,6 +514,8 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
491 mtu = 2048; 514 mtu = 2048;
492 break; 515 break;
493 case IB_MTU_4096: 516 case IB_MTU_4096:
517 if (!ipath_mtu4096)
518 goto err;
494 mtu = 4096; 519 mtu = 4096;
495 break; 520 break;
496 default: 521 default:
@@ -565,6 +590,10 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
565 else 590 else
566 goto err; 591 goto err;
567 ipath_set_linkstate(dd, lstate); 592 ipath_set_linkstate(dd, lstate);
593 if (lstate == IPATH_IB_LINKDOWN_DISABLE) {
594 ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
595 goto done;
596 }
568 ipath_wait_linkstate(dd, IPATH_LINKINIT | IPATH_LINKARMED | 597 ipath_wait_linkstate(dd, IPATH_LINKINIT | IPATH_LINKARMED |
569 IPATH_LINKACTIVE, 1000); 598 IPATH_LINKACTIVE, 1000);
570 break; 599 break;
@@ -948,10 +977,14 @@ static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp,
948 * nsec. 0 == 4 nsec., 1 == 8 nsec., ..., 255 == 1020 nsec. Sample 977 * nsec. 0 == 4 nsec., 1 == 8 nsec., ..., 255 == 1020 nsec. Sample
949 * intervals are counted in ticks. Since we use Linux timers, that 978 * intervals are counted in ticks. Since we use Linux timers, that
950 * count in jiffies, we can't sample for less than 1000 ticks if HZ 979 * count in jiffies, we can't sample for less than 1000 ticks if HZ
951 * == 1000 (4000 ticks if HZ is 250). 980 * == 1000 (4000 ticks if HZ is 250). link_speed_active returns 2 for
981 * DDR, 1 for SDR, set the tick to 1 for DDR, 0 for SDR on chips that
982 * have hardware support for delaying packets.
952 */ 983 */
953 /* XXX This is WRONG. */ 984 if (crp->cr_psstat)
954 p->tick = 250; /* 1 usec. */ 985 p->tick = dev->dd->ipath_link_speed_active - 1;
986 else
987 p->tick = 250; /* 1 usec. */
955 p->counter_width = 4; /* 32 bit counters */ 988 p->counter_width = 4; /* 32 bit counters */
956 p->counter_mask0_9 = COUNTER_MASK0_9; 989 p->counter_mask0_9 = COUNTER_MASK0_9;
957 spin_lock_irqsave(&dev->pending_lock, flags); 990 spin_lock_irqsave(&dev->pending_lock, flags);
@@ -1364,7 +1397,8 @@ static int process_subn(struct ib_device *ibdev, int mad_flags,
1364 } 1397 }
1365 1398
1366 /* Is the mkey in the process of expiring? */ 1399 /* Is the mkey in the process of expiring? */
1367 if (dev->mkey_lease_timeout && jiffies >= dev->mkey_lease_timeout) { 1400 if (dev->mkey_lease_timeout &&
1401 time_after_eq(jiffies, dev->mkey_lease_timeout)) {
1368 /* Clear timeout and mkey protection field. */ 1402 /* Clear timeout and mkey protection field. */
1369 dev->mkey_lease_timeout = 0; 1403 dev->mkey_lease_timeout = 0;
1370 dev->mkeyprot = 0; 1404 dev->mkeyprot = 0;
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index 087ed3166479..dd5b6e9d57c2 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -340,6 +340,7 @@ static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type)
340 qp->s_flags &= IPATH_S_SIGNAL_REQ_WR; 340 qp->s_flags &= IPATH_S_SIGNAL_REQ_WR;
341 qp->s_hdrwords = 0; 341 qp->s_hdrwords = 0;
342 qp->s_wqe = NULL; 342 qp->s_wqe = NULL;
343 qp->s_pkt_delay = 0;
343 qp->s_psn = 0; 344 qp->s_psn = 0;
344 qp->r_psn = 0; 345 qp->r_psn = 0;
345 qp->r_msn = 0; 346 qp->r_msn = 0;
@@ -392,7 +393,6 @@ int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
392 qp->ibqp.qp_num, qp->remote_qpn, err); 393 qp->ibqp.qp_num, qp->remote_qpn, err);
393 394
394 spin_lock(&dev->pending_lock); 395 spin_lock(&dev->pending_lock);
395 /* XXX What if its already removed by the timeout code? */
396 if (!list_empty(&qp->timerwait)) 396 if (!list_empty(&qp->timerwait))
397 list_del_init(&qp->timerwait); 397 list_del_init(&qp->timerwait);
398 if (!list_empty(&qp->piowait)) 398 if (!list_empty(&qp->piowait))
@@ -516,13 +516,13 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
516 goto inval; 516 goto inval;
517 517
518 /* 518 /*
519 * Note: the chips support a maximum MTU of 4096, but the driver 519 * don't allow invalid Path MTU values or greater than 2048
520 * hasn't implemented this feature yet, so don't allow Path MTU 520 * unless we are configured for a 4KB MTU
521 * values greater than 2048.
522 */ 521 */
523 if (attr_mask & IB_QP_PATH_MTU) 522 if ((attr_mask & IB_QP_PATH_MTU) &&
524 if (attr->path_mtu > IB_MTU_2048) 523 (ib_mtu_enum_to_int(attr->path_mtu) == -1 ||
525 goto inval; 524 (attr->path_mtu > IB_MTU_2048 && !ipath_mtu4096)))
525 goto inval;
526 526
527 if (attr_mask & IB_QP_PATH_MIG_STATE) 527 if (attr_mask & IB_QP_PATH_MIG_STATE)
528 if (attr->path_mig_state != IB_MIG_MIGRATED && 528 if (attr->path_mig_state != IB_MIG_MIGRATED &&
@@ -564,8 +564,10 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
564 if (attr_mask & IB_QP_ACCESS_FLAGS) 564 if (attr_mask & IB_QP_ACCESS_FLAGS)
565 qp->qp_access_flags = attr->qp_access_flags; 565 qp->qp_access_flags = attr->qp_access_flags;
566 566
567 if (attr_mask & IB_QP_AV) 567 if (attr_mask & IB_QP_AV) {
568 qp->remote_ah_attr = attr->ah_attr; 568 qp->remote_ah_attr = attr->ah_attr;
569 qp->s_dmult = ipath_ib_rate_to_mult(attr->ah_attr.static_rate);
570 }
569 571
570 if (attr_mask & IB_QP_PATH_MTU) 572 if (attr_mask & IB_QP_PATH_MTU)
571 qp->path_mtu = attr->path_mtu; 573 qp->path_mtu = attr->path_mtu;
@@ -748,22 +750,33 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
748 size_t sz; 750 size_t sz;
749 struct ib_qp *ret; 751 struct ib_qp *ret;
750 752
751 if (init_attr->cap.max_send_sge > ib_ipath_max_sges || 753 if (init_attr->create_flags) {
752 init_attr->cap.max_recv_sge > ib_ipath_max_sges || 754 ret = ERR_PTR(-EINVAL);
753 init_attr->cap.max_send_wr > ib_ipath_max_qp_wrs ||
754 init_attr->cap.max_recv_wr > ib_ipath_max_qp_wrs) {
755 ret = ERR_PTR(-ENOMEM);
756 goto bail; 755 goto bail;
757 } 756 }
758 757
759 if (init_attr->cap.max_send_sge + 758 if (init_attr->cap.max_send_sge > ib_ipath_max_sges ||
760 init_attr->cap.max_recv_sge + 759 init_attr->cap.max_send_wr > ib_ipath_max_qp_wrs) {
761 init_attr->cap.max_send_wr +
762 init_attr->cap.max_recv_wr == 0) {
763 ret = ERR_PTR(-EINVAL); 760 ret = ERR_PTR(-EINVAL);
764 goto bail; 761 goto bail;
765 } 762 }
766 763
764 /* Check receive queue parameters if no SRQ is specified. */
765 if (!init_attr->srq) {
766 if (init_attr->cap.max_recv_sge > ib_ipath_max_sges ||
767 init_attr->cap.max_recv_wr > ib_ipath_max_qp_wrs) {
768 ret = ERR_PTR(-EINVAL);
769 goto bail;
770 }
771 if (init_attr->cap.max_send_sge +
772 init_attr->cap.max_send_wr +
773 init_attr->cap.max_recv_sge +
774 init_attr->cap.max_recv_wr == 0) {
775 ret = ERR_PTR(-EINVAL);
776 goto bail;
777 }
778 }
779
767 switch (init_attr->qp_type) { 780 switch (init_attr->qp_type) {
768 case IB_QPT_UC: 781 case IB_QPT_UC:
769 case IB_QPT_RC: 782 case IB_QPT_RC:
@@ -840,6 +853,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
840 goto bail_qp; 853 goto bail_qp;
841 } 854 }
842 qp->ip = NULL; 855 qp->ip = NULL;
856 qp->s_tx = NULL;
843 ipath_reset_qp(qp, init_attr->qp_type); 857 ipath_reset_qp(qp, init_attr->qp_type);
844 break; 858 break;
845 859
@@ -945,12 +959,20 @@ int ipath_destroy_qp(struct ib_qp *ibqp)
945 /* Stop the sending tasklet. */ 959 /* Stop the sending tasklet. */
946 tasklet_kill(&qp->s_task); 960 tasklet_kill(&qp->s_task);
947 961
962 if (qp->s_tx) {
963 atomic_dec(&qp->refcount);
964 if (qp->s_tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF)
965 kfree(qp->s_tx->txreq.map_addr);
966 }
967
948 /* Make sure the QP isn't on the timeout list. */ 968 /* Make sure the QP isn't on the timeout list. */
949 spin_lock_irqsave(&dev->pending_lock, flags); 969 spin_lock_irqsave(&dev->pending_lock, flags);
950 if (!list_empty(&qp->timerwait)) 970 if (!list_empty(&qp->timerwait))
951 list_del_init(&qp->timerwait); 971 list_del_init(&qp->timerwait);
952 if (!list_empty(&qp->piowait)) 972 if (!list_empty(&qp->piowait))
953 list_del_init(&qp->piowait); 973 list_del_init(&qp->piowait);
974 if (qp->s_tx)
975 list_add(&qp->s_tx->txreq.list, &dev->txreq_free);
954 spin_unlock_irqrestore(&dev->pending_lock, flags); 976 spin_unlock_irqrestore(&dev->pending_lock, flags);
955 977
956 /* 978 /*
@@ -1021,7 +1043,6 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc)
1021 qp->ibqp.qp_num, qp->remote_qpn, wc->status); 1043 qp->ibqp.qp_num, qp->remote_qpn, wc->status);
1022 1044
1023 spin_lock(&dev->pending_lock); 1045 spin_lock(&dev->pending_lock);
1024 /* XXX What if its already removed by the timeout code? */
1025 if (!list_empty(&qp->timerwait)) 1046 if (!list_empty(&qp->timerwait))
1026 list_del_init(&qp->timerwait); 1047 list_del_init(&qp->timerwait);
1027 if (!list_empty(&qp->piowait)) 1048 if (!list_empty(&qp->piowait))
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index 40f3e37d7adc..c405dfba5531 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -31,6 +31,8 @@
31 * SOFTWARE. 31 * SOFTWARE.
32 */ 32 */
33 33
34#include <linux/io.h>
35
34#include "ipath_verbs.h" 36#include "ipath_verbs.h"
35#include "ipath_kernel.h" 37#include "ipath_kernel.h"
36 38
@@ -306,7 +308,7 @@ int ipath_make_rc_req(struct ipath_qp *qp)
306 else { 308 else {
307 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE); 309 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
308 /* Immediate data comes after the BTH */ 310 /* Immediate data comes after the BTH */
309 ohdr->u.imm_data = wqe->wr.imm_data; 311 ohdr->u.imm_data = wqe->wr.ex.imm_data;
310 hwords += 1; 312 hwords += 1;
311 } 313 }
312 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 314 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
@@ -344,7 +346,7 @@ int ipath_make_rc_req(struct ipath_qp *qp)
344 qp->s_state = 346 qp->s_state =
345 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE); 347 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
346 /* Immediate data comes after RETH */ 348 /* Immediate data comes after RETH */
347 ohdr->u.rc.imm_data = wqe->wr.imm_data; 349 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
348 hwords += 1; 350 hwords += 1;
349 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 351 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
350 bth0 |= 1 << 23; 352 bth0 |= 1 << 23;
@@ -488,7 +490,7 @@ int ipath_make_rc_req(struct ipath_qp *qp)
488 else { 490 else {
489 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); 491 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
490 /* Immediate data comes after the BTH */ 492 /* Immediate data comes after the BTH */
491 ohdr->u.imm_data = wqe->wr.imm_data; 493 ohdr->u.imm_data = wqe->wr.ex.imm_data;
492 hwords += 1; 494 hwords += 1;
493 } 495 }
494 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 496 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
@@ -524,7 +526,7 @@ int ipath_make_rc_req(struct ipath_qp *qp)
524 else { 526 else {
525 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE); 527 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
526 /* Immediate data comes after the BTH */ 528 /* Immediate data comes after the BTH */
527 ohdr->u.imm_data = wqe->wr.imm_data; 529 ohdr->u.imm_data = wqe->wr.ex.imm_data;
528 hwords += 1; 530 hwords += 1;
529 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 531 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
530 bth0 |= 1 << 23; 532 bth0 |= 1 << 23;
@@ -585,19 +587,39 @@ bail:
585static void send_rc_ack(struct ipath_qp *qp) 587static void send_rc_ack(struct ipath_qp *qp)
586{ 588{
587 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); 589 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
590 struct ipath_devdata *dd;
588 u16 lrh0; 591 u16 lrh0;
589 u32 bth0; 592 u32 bth0;
590 u32 hwords; 593 u32 hwords;
594 u32 __iomem *piobuf;
591 struct ipath_ib_header hdr; 595 struct ipath_ib_header hdr;
592 struct ipath_other_headers *ohdr; 596 struct ipath_other_headers *ohdr;
593 unsigned long flags; 597 unsigned long flags;
594 598
599 spin_lock_irqsave(&qp->s_lock, flags);
600
595 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */ 601 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
596 if (qp->r_head_ack_queue != qp->s_tail_ack_queue || 602 if (qp->r_head_ack_queue != qp->s_tail_ack_queue ||
597 (qp->s_flags & IPATH_S_ACK_PENDING) || 603 (qp->s_flags & IPATH_S_ACK_PENDING) ||
598 qp->s_ack_state != OP(ACKNOWLEDGE)) 604 qp->s_ack_state != OP(ACKNOWLEDGE))
599 goto queue_ack; 605 goto queue_ack;
600 606
607 spin_unlock_irqrestore(&qp->s_lock, flags);
608
609 dd = dev->dd;
610 piobuf = ipath_getpiobuf(dd, 0, NULL);
611 if (!piobuf) {
612 /*
613 * We are out of PIO buffers at the moment.
614 * Pass responsibility for sending the ACK to the
615 * send tasklet so that when a PIO buffer becomes
616 * available, the ACK is sent ahead of other outgoing
617 * packets.
618 */
619 spin_lock_irqsave(&qp->s_lock, flags);
620 goto queue_ack;
621 }
622
601 /* Construct the header. */ 623 /* Construct the header. */
602 ohdr = &hdr.u.oth; 624 ohdr = &hdr.u.oth;
603 lrh0 = IPATH_LRH_BTH; 625 lrh0 = IPATH_LRH_BTH;
@@ -611,7 +633,7 @@ static void send_rc_ack(struct ipath_qp *qp)
611 lrh0 = IPATH_LRH_GRH; 633 lrh0 = IPATH_LRH_GRH;
612 } 634 }
613 /* read pkey_index w/o lock (its atomic) */ 635 /* read pkey_index w/o lock (its atomic) */
614 bth0 = ipath_get_pkey(dev->dd, qp->s_pkey_index) | 636 bth0 = ipath_get_pkey(dd, qp->s_pkey_index) |
615 (OP(ACKNOWLEDGE) << 24) | (1 << 22); 637 (OP(ACKNOWLEDGE) << 24) | (1 << 22);
616 if (qp->r_nak_state) 638 if (qp->r_nak_state)
617 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) | 639 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
@@ -623,30 +645,29 @@ static void send_rc_ack(struct ipath_qp *qp)
623 hdr.lrh[0] = cpu_to_be16(lrh0); 645 hdr.lrh[0] = cpu_to_be16(lrh0);
624 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); 646 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
625 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC); 647 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
626 hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid); 648 hdr.lrh[3] = cpu_to_be16(dd->ipath_lid);
627 ohdr->bth[0] = cpu_to_be32(bth0); 649 ohdr->bth[0] = cpu_to_be32(bth0);
628 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); 650 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
629 ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK); 651 ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK);
630 652
631 /* 653 writeq(hwords + 1, piobuf);
632 * If we can send the ACK, clear the ACK state.
633 */
634 if (ipath_verbs_send(qp, &hdr, hwords, NULL, 0) == 0) {
635 dev->n_unicast_xmit++;
636 goto done;
637 }
638 654
639 /* 655 if (dd->ipath_flags & IPATH_PIO_FLUSH_WC) {
640 * We are out of PIO buffers at the moment. 656 u32 *hdrp = (u32 *) &hdr;
641 * Pass responsibility for sending the ACK to the 657
642 * send tasklet so that when a PIO buffer becomes 658 ipath_flush_wc();
643 * available, the ACK is sent ahead of other outgoing 659 __iowrite32_copy(piobuf + 2, hdrp, hwords - 1);
644 * packets. 660 ipath_flush_wc();
645 */ 661 __raw_writel(hdrp[hwords - 1], piobuf + hwords + 1);
646 dev->n_rc_qacks++; 662 } else
663 __iowrite32_copy(piobuf + 2, (u32 *) &hdr, hwords);
664
665 ipath_flush_wc();
666
667 dev->n_unicast_xmit++;
668 goto done;
647 669
648queue_ack: 670queue_ack:
649 spin_lock_irqsave(&qp->s_lock, flags);
650 dev->n_rc_qacks++; 671 dev->n_rc_qacks++;
651 qp->s_flags |= IPATH_S_ACK_PENDING; 672 qp->s_flags |= IPATH_S_ACK_PENDING;
652 qp->s_nak_state = qp->r_nak_state; 673 qp->s_nak_state = qp->r_nak_state;
diff --git a/drivers/infiniband/hw/ipath/ipath_registers.h b/drivers/infiniband/hw/ipath/ipath_registers.h
index 92ad73a7fff0..8f44d0cf3833 100644
--- a/drivers/infiniband/hw/ipath/ipath_registers.h
+++ b/drivers/infiniband/hw/ipath/ipath_registers.h
@@ -63,67 +63,92 @@
63/* kr_control bits */ 63/* kr_control bits */
64#define INFINIPATH_C_FREEZEMODE 0x00000002 64#define INFINIPATH_C_FREEZEMODE 0x00000002
65#define INFINIPATH_C_LINKENABLE 0x00000004 65#define INFINIPATH_C_LINKENABLE 0x00000004
66#define INFINIPATH_C_RESET 0x00000001
67 66
68/* kr_sendctrl bits */ 67/* kr_sendctrl bits */
69#define INFINIPATH_S_DISARMPIOBUF_SHIFT 16 68#define INFINIPATH_S_DISARMPIOBUF_SHIFT 16
69#define INFINIPATH_S_UPDTHRESH_SHIFT 24
70#define INFINIPATH_S_UPDTHRESH_MASK 0x1f
70 71
71#define IPATH_S_ABORT 0 72#define IPATH_S_ABORT 0
72#define IPATH_S_PIOINTBUFAVAIL 1 73#define IPATH_S_PIOINTBUFAVAIL 1
73#define IPATH_S_PIOBUFAVAILUPD 2 74#define IPATH_S_PIOBUFAVAILUPD 2
74#define IPATH_S_PIOENABLE 3 75#define IPATH_S_PIOENABLE 3
76#define IPATH_S_SDMAINTENABLE 9
77#define IPATH_S_SDMASINGLEDESCRIPTOR 10
78#define IPATH_S_SDMAENABLE 11
79#define IPATH_S_SDMAHALT 12
75#define IPATH_S_DISARM 31 80#define IPATH_S_DISARM 31
76 81
77#define INFINIPATH_S_ABORT (1U << IPATH_S_ABORT) 82#define INFINIPATH_S_ABORT (1U << IPATH_S_ABORT)
78#define INFINIPATH_S_PIOINTBUFAVAIL (1U << IPATH_S_PIOINTBUFAVAIL) 83#define INFINIPATH_S_PIOINTBUFAVAIL (1U << IPATH_S_PIOINTBUFAVAIL)
79#define INFINIPATH_S_PIOBUFAVAILUPD (1U << IPATH_S_PIOBUFAVAILUPD) 84#define INFINIPATH_S_PIOBUFAVAILUPD (1U << IPATH_S_PIOBUFAVAILUPD)
80#define INFINIPATH_S_PIOENABLE (1U << IPATH_S_PIOENABLE) 85#define INFINIPATH_S_PIOENABLE (1U << IPATH_S_PIOENABLE)
86#define INFINIPATH_S_SDMAINTENABLE (1U << IPATH_S_SDMAINTENABLE)
87#define INFINIPATH_S_SDMASINGLEDESCRIPTOR \
88 (1U << IPATH_S_SDMASINGLEDESCRIPTOR)
89#define INFINIPATH_S_SDMAENABLE (1U << IPATH_S_SDMAENABLE)
90#define INFINIPATH_S_SDMAHALT (1U << IPATH_S_SDMAHALT)
81#define INFINIPATH_S_DISARM (1U << IPATH_S_DISARM) 91#define INFINIPATH_S_DISARM (1U << IPATH_S_DISARM)
82 92
83/* kr_rcvctrl bits */ 93/* kr_rcvctrl bits that are the same on multiple chips */
84#define INFINIPATH_R_PORTENABLE_SHIFT 0 94#define INFINIPATH_R_PORTENABLE_SHIFT 0
85#define INFINIPATH_R_QPMAP_ENABLE (1ULL << 38) 95#define INFINIPATH_R_QPMAP_ENABLE (1ULL << 38)
86 96
87/* kr_intstatus, kr_intclear, kr_intmask bits */ 97/* kr_intstatus, kr_intclear, kr_intmask bits */
88#define INFINIPATH_I_RCVURG_SHIFT 0 98#define INFINIPATH_I_SDMAINT 0x8000000000000000ULL
89#define INFINIPATH_I_RCVAVAIL_SHIFT 12 99#define INFINIPATH_I_SDMADISABLED 0x4000000000000000ULL
90#define INFINIPATH_I_ERROR 0x80000000 100#define INFINIPATH_I_ERROR 0x0000000080000000ULL
91#define INFINIPATH_I_SPIOSENT 0x40000000 101#define INFINIPATH_I_SPIOSENT 0x0000000040000000ULL
92#define INFINIPATH_I_SPIOBUFAVAIL 0x20000000 102#define INFINIPATH_I_SPIOBUFAVAIL 0x0000000020000000ULL
93#define INFINIPATH_I_GPIO 0x10000000 103#define INFINIPATH_I_GPIO 0x0000000010000000ULL
104#define INFINIPATH_I_JINT 0x0000000004000000ULL
94 105
95/* kr_errorstatus, kr_errorclear, kr_errormask bits */ 106/* kr_errorstatus, kr_errorclear, kr_errormask bits */
96#define INFINIPATH_E_RFORMATERR 0x0000000000000001ULL 107#define INFINIPATH_E_RFORMATERR 0x0000000000000001ULL
97#define INFINIPATH_E_RVCRC 0x0000000000000002ULL 108#define INFINIPATH_E_RVCRC 0x0000000000000002ULL
98#define INFINIPATH_E_RICRC 0x0000000000000004ULL 109#define INFINIPATH_E_RICRC 0x0000000000000004ULL
99#define INFINIPATH_E_RMINPKTLEN 0x0000000000000008ULL 110#define INFINIPATH_E_RMINPKTLEN 0x0000000000000008ULL
100#define INFINIPATH_E_RMAXPKTLEN 0x0000000000000010ULL 111#define INFINIPATH_E_RMAXPKTLEN 0x0000000000000010ULL
101#define INFINIPATH_E_RLONGPKTLEN 0x0000000000000020ULL 112#define INFINIPATH_E_RLONGPKTLEN 0x0000000000000020ULL
102#define INFINIPATH_E_RSHORTPKTLEN 0x0000000000000040ULL 113#define INFINIPATH_E_RSHORTPKTLEN 0x0000000000000040ULL
103#define INFINIPATH_E_RUNEXPCHAR 0x0000000000000080ULL 114#define INFINIPATH_E_RUNEXPCHAR 0x0000000000000080ULL
104#define INFINIPATH_E_RUNSUPVL 0x0000000000000100ULL 115#define INFINIPATH_E_RUNSUPVL 0x0000000000000100ULL
105#define INFINIPATH_E_REBP 0x0000000000000200ULL 116#define INFINIPATH_E_REBP 0x0000000000000200ULL
106#define INFINIPATH_E_RIBFLOW 0x0000000000000400ULL 117#define INFINIPATH_E_RIBFLOW 0x0000000000000400ULL
107#define INFINIPATH_E_RBADVERSION 0x0000000000000800ULL 118#define INFINIPATH_E_RBADVERSION 0x0000000000000800ULL
108#define INFINIPATH_E_RRCVEGRFULL 0x0000000000001000ULL 119#define INFINIPATH_E_RRCVEGRFULL 0x0000000000001000ULL
109#define INFINIPATH_E_RRCVHDRFULL 0x0000000000002000ULL 120#define INFINIPATH_E_RRCVHDRFULL 0x0000000000002000ULL
110#define INFINIPATH_E_RBADTID 0x0000000000004000ULL 121#define INFINIPATH_E_RBADTID 0x0000000000004000ULL
111#define INFINIPATH_E_RHDRLEN 0x0000000000008000ULL 122#define INFINIPATH_E_RHDRLEN 0x0000000000008000ULL
112#define INFINIPATH_E_RHDR 0x0000000000010000ULL 123#define INFINIPATH_E_RHDR 0x0000000000010000ULL
113#define INFINIPATH_E_RIBLOSTLINK 0x0000000000020000ULL 124#define INFINIPATH_E_RIBLOSTLINK 0x0000000000020000ULL
114#define INFINIPATH_E_SMINPKTLEN 0x0000000020000000ULL 125#define INFINIPATH_E_SENDSPECIALTRIGGER 0x0000000008000000ULL
115#define INFINIPATH_E_SMAXPKTLEN 0x0000000040000000ULL 126#define INFINIPATH_E_SDMADISABLED 0x0000000010000000ULL
116#define INFINIPATH_E_SUNDERRUN 0x0000000080000000ULL 127#define INFINIPATH_E_SMINPKTLEN 0x0000000020000000ULL
117#define INFINIPATH_E_SPKTLEN 0x0000000100000000ULL 128#define INFINIPATH_E_SMAXPKTLEN 0x0000000040000000ULL
118#define INFINIPATH_E_SDROPPEDSMPPKT 0x0000000200000000ULL 129#define INFINIPATH_E_SUNDERRUN 0x0000000080000000ULL
119#define INFINIPATH_E_SDROPPEDDATAPKT 0x0000000400000000ULL 130#define INFINIPATH_E_SPKTLEN 0x0000000100000000ULL
120#define INFINIPATH_E_SPIOARMLAUNCH 0x0000000800000000ULL 131#define INFINIPATH_E_SDROPPEDSMPPKT 0x0000000200000000ULL
121#define INFINIPATH_E_SUNEXPERRPKTNUM 0x0000001000000000ULL 132#define INFINIPATH_E_SDROPPEDDATAPKT 0x0000000400000000ULL
122#define INFINIPATH_E_SUNSUPVL 0x0000002000000000ULL 133#define INFINIPATH_E_SPIOARMLAUNCH 0x0000000800000000ULL
123#define INFINIPATH_E_IBSTATUSCHANGED 0x0001000000000000ULL 134#define INFINIPATH_E_SUNEXPERRPKTNUM 0x0000001000000000ULL
124#define INFINIPATH_E_INVALIDADDR 0x0002000000000000ULL 135#define INFINIPATH_E_SUNSUPVL 0x0000002000000000ULL
125#define INFINIPATH_E_RESET 0x0004000000000000ULL 136#define INFINIPATH_E_SENDBUFMISUSE 0x0000004000000000ULL
126#define INFINIPATH_E_HARDWARE 0x0008000000000000ULL 137#define INFINIPATH_E_SDMAGENMISMATCH 0x0000008000000000ULL
138#define INFINIPATH_E_SDMAOUTOFBOUND 0x0000010000000000ULL
139#define INFINIPATH_E_SDMATAILOUTOFBOUND 0x0000020000000000ULL
140#define INFINIPATH_E_SDMABASE 0x0000040000000000ULL
141#define INFINIPATH_E_SDMA1STDESC 0x0000080000000000ULL
142#define INFINIPATH_E_SDMARPYTAG 0x0000100000000000ULL
143#define INFINIPATH_E_SDMADWEN 0x0000200000000000ULL
144#define INFINIPATH_E_SDMAMISSINGDW 0x0000400000000000ULL
145#define INFINIPATH_E_SDMAUNEXPDATA 0x0000800000000000ULL
146#define INFINIPATH_E_IBSTATUSCHANGED 0x0001000000000000ULL
147#define INFINIPATH_E_INVALIDADDR 0x0002000000000000ULL
148#define INFINIPATH_E_RESET 0x0004000000000000ULL
149#define INFINIPATH_E_HARDWARE 0x0008000000000000ULL
150#define INFINIPATH_E_SDMADESCADDRMISALIGN 0x0010000000000000ULL
151#define INFINIPATH_E_INVALIDEEPCMD 0x0020000000000000ULL
127 152
128/* 153/*
129 * this is used to print "common" packet errors only when the 154 * this is used to print "common" packet errors only when the
@@ -134,6 +159,17 @@
134 | INFINIPATH_E_RICRC | INFINIPATH_E_RSHORTPKTLEN \ 159 | INFINIPATH_E_RICRC | INFINIPATH_E_RSHORTPKTLEN \
135 | INFINIPATH_E_REBP ) 160 | INFINIPATH_E_REBP )
136 161
162/* Convenience for decoding Send DMA errors */
163#define INFINIPATH_E_SDMAERRS ( \
164 INFINIPATH_E_SDMAGENMISMATCH | INFINIPATH_E_SDMAOUTOFBOUND | \
165 INFINIPATH_E_SDMATAILOUTOFBOUND | INFINIPATH_E_SDMABASE | \
166 INFINIPATH_E_SDMA1STDESC | INFINIPATH_E_SDMARPYTAG | \
167 INFINIPATH_E_SDMADWEN | INFINIPATH_E_SDMAMISSINGDW | \
168 INFINIPATH_E_SDMAUNEXPDATA | \
169 INFINIPATH_E_SDMADESCADDRMISALIGN | \
170 INFINIPATH_E_SDMADISABLED | \
171 INFINIPATH_E_SENDBUFMISUSE)
172
137/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */ 173/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
138/* TXEMEMPARITYERR bit 0: PIObuf, 1: PIOpbc, 2: launchfifo 174/* TXEMEMPARITYERR bit 0: PIObuf, 1: PIOpbc, 2: launchfifo
139 * RXEMEMPARITYERR bit 0: rcvbuf, 1: lookupq, 2: expTID, 3: eagerTID 175 * RXEMEMPARITYERR bit 0: rcvbuf, 1: lookupq, 2: expTID, 3: eagerTID
@@ -158,7 +194,7 @@
158#define INFINIPATH_HWE_RXEMEMPARITYERR_HDRINFO 0x40ULL 194#define INFINIPATH_HWE_RXEMEMPARITYERR_HDRINFO 0x40ULL
159/* waldo specific -- find the rest in ipath_6110.c */ 195/* waldo specific -- find the rest in ipath_6110.c */
160#define INFINIPATH_HWE_RXDSYNCMEMPARITYERR 0x0000000400000000ULL 196#define INFINIPATH_HWE_RXDSYNCMEMPARITYERR 0x0000000400000000ULL
161/* monty specific -- find the rest in ipath_6120.c */ 197/* 6120/7220 specific -- find the rest in ipath_6120.c and ipath_7220.c */
162#define INFINIPATH_HWE_MEMBISTFAILED 0x0040000000000000ULL 198#define INFINIPATH_HWE_MEMBISTFAILED 0x0040000000000000ULL
163 199
164/* kr_hwdiagctrl bits */ 200/* kr_hwdiagctrl bits */
@@ -185,8 +221,8 @@
185#define INFINIPATH_IBCC_LINKINITCMD_SLEEP 3 221#define INFINIPATH_IBCC_LINKINITCMD_SLEEP 3
186#define INFINIPATH_IBCC_LINKINITCMD_SHIFT 16 222#define INFINIPATH_IBCC_LINKINITCMD_SHIFT 16
187#define INFINIPATH_IBCC_LINKCMD_MASK 0x3ULL 223#define INFINIPATH_IBCC_LINKCMD_MASK 0x3ULL
188#define INFINIPATH_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */ 224#define INFINIPATH_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
189#define INFINIPATH_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */ 225#define INFINIPATH_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
190#define INFINIPATH_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */ 226#define INFINIPATH_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
191#define INFINIPATH_IBCC_LINKCMD_SHIFT 18 227#define INFINIPATH_IBCC_LINKCMD_SHIFT 18
192#define INFINIPATH_IBCC_MAXPKTLEN_MASK 0x7FFULL 228#define INFINIPATH_IBCC_MAXPKTLEN_MASK 0x7FFULL
@@ -201,10 +237,9 @@
201#define INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE 0x4000000000000000ULL 237#define INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE 0x4000000000000000ULL
202 238
203/* kr_ibcstatus bits */ 239/* kr_ibcstatus bits */
204#define INFINIPATH_IBCS_LINKTRAININGSTATE_MASK 0xF
205#define INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT 0 240#define INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT 0
206#define INFINIPATH_IBCS_LINKSTATE_MASK 0x7 241#define INFINIPATH_IBCS_LINKSTATE_MASK 0x7
207#define INFINIPATH_IBCS_LINKSTATE_SHIFT 4 242
208#define INFINIPATH_IBCS_TXREADY 0x40000000 243#define INFINIPATH_IBCS_TXREADY 0x40000000
209#define INFINIPATH_IBCS_TXCREDITOK 0x80000000 244#define INFINIPATH_IBCS_TXCREDITOK 0x80000000
210/* link training states (shift by 245/* link training states (shift by
@@ -222,30 +257,13 @@
222#define INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN 0x0c 257#define INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN 0x0c
223#define INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT 0x0e 258#define INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT 0x0e
224#define INFINIPATH_IBCS_LT_STATE_RECOVERIDLE 0x0f 259#define INFINIPATH_IBCS_LT_STATE_RECOVERIDLE 0x0f
225/* link state machine states (shift by INFINIPATH_IBCS_LINKSTATE_SHIFT) */ 260/* link state machine states (shift by ibcs_ls_shift) */
226#define INFINIPATH_IBCS_L_STATE_DOWN 0x0 261#define INFINIPATH_IBCS_L_STATE_DOWN 0x0
227#define INFINIPATH_IBCS_L_STATE_INIT 0x1 262#define INFINIPATH_IBCS_L_STATE_INIT 0x1
228#define INFINIPATH_IBCS_L_STATE_ARM 0x2 263#define INFINIPATH_IBCS_L_STATE_ARM 0x2
229#define INFINIPATH_IBCS_L_STATE_ACTIVE 0x3 264#define INFINIPATH_IBCS_L_STATE_ACTIVE 0x3
230#define INFINIPATH_IBCS_L_STATE_ACT_DEFER 0x4 265#define INFINIPATH_IBCS_L_STATE_ACT_DEFER 0x4
231 266
232/* combination link status states that we use with some frequency */
233#define IPATH_IBSTATE_MASK ((INFINIPATH_IBCS_LINKTRAININGSTATE_MASK \
234 << INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) | \
235 (INFINIPATH_IBCS_LINKSTATE_MASK \
236 <<INFINIPATH_IBCS_LINKSTATE_SHIFT))
237#define IPATH_IBSTATE_INIT ((INFINIPATH_IBCS_L_STATE_INIT \
238 << INFINIPATH_IBCS_LINKSTATE_SHIFT) | \
239 (INFINIPATH_IBCS_LT_STATE_LINKUP \
240 <<INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT))
241#define IPATH_IBSTATE_ARM ((INFINIPATH_IBCS_L_STATE_ARM \
242 << INFINIPATH_IBCS_LINKSTATE_SHIFT) | \
243 (INFINIPATH_IBCS_LT_STATE_LINKUP \
244 <<INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT))
245#define IPATH_IBSTATE_ACTIVE ((INFINIPATH_IBCS_L_STATE_ACTIVE \
246 << INFINIPATH_IBCS_LINKSTATE_SHIFT) | \
247 (INFINIPATH_IBCS_LT_STATE_LINKUP \
248 <<INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT))
249 267
250/* kr_extstatus bits */ 268/* kr_extstatus bits */
251#define INFINIPATH_EXTS_SERDESPLLLOCK 0x1 269#define INFINIPATH_EXTS_SERDESPLLLOCK 0x1
@@ -286,8 +304,7 @@
286/* L1 Power down; use with RXDETECT, Otherwise not used on IB side */ 304/* L1 Power down; use with RXDETECT, Otherwise not used on IB side */
287#define INFINIPATH_SERDC0_L1PWR_DN 0xF0ULL 305#define INFINIPATH_SERDC0_L1PWR_DN 0xF0ULL
288 306
289/* kr_xgxsconfig bits */ 307/* common kr_xgxsconfig bits (or safe in all, even if not implemented) */
290#define INFINIPATH_XGXS_RESET 0x7ULL
291#define INFINIPATH_XGXS_RX_POL_SHIFT 19 308#define INFINIPATH_XGXS_RX_POL_SHIFT 19
292#define INFINIPATH_XGXS_RX_POL_MASK 0xfULL 309#define INFINIPATH_XGXS_RX_POL_MASK 0xfULL
293 310
@@ -417,6 +434,29 @@ struct ipath_kregs {
417 ipath_kreg kr_pcieq1serdesconfig0; 434 ipath_kreg kr_pcieq1serdesconfig0;
418 ipath_kreg kr_pcieq1serdesconfig1; 435 ipath_kreg kr_pcieq1serdesconfig1;
419 ipath_kreg kr_pcieq1serdesstatus; 436 ipath_kreg kr_pcieq1serdesstatus;
437 ipath_kreg kr_hrtbt_guid;
438 ipath_kreg kr_ibcddrctrl;
439 ipath_kreg kr_ibcddrstatus;
440 ipath_kreg kr_jintreload;
441
442 /* send dma related regs */
443 ipath_kreg kr_senddmabase;
444 ipath_kreg kr_senddmalengen;
445 ipath_kreg kr_senddmatail;
446 ipath_kreg kr_senddmahead;
447 ipath_kreg kr_senddmaheadaddr;
448 ipath_kreg kr_senddmabufmask0;
449 ipath_kreg kr_senddmabufmask1;
450 ipath_kreg kr_senddmabufmask2;
451 ipath_kreg kr_senddmastatus;
452
453 /* SerDes related regs (IBA7220-only) */
454 ipath_kreg kr_ibserdesctrl;
455 ipath_kreg kr_ib_epbacc;
456 ipath_kreg kr_ib_epbtrans;
457 ipath_kreg kr_pcie_epbacc;
458 ipath_kreg kr_pcie_epbtrans;
459 ipath_kreg kr_ib_ddsrxeq;
420}; 460};
421 461
422struct ipath_cregs { 462struct ipath_cregs {
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
index a59bdbd0ed87..8ac5c1d82ccd 100644
--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
@@ -310,7 +310,7 @@ again:
310 switch (wqe->wr.opcode) { 310 switch (wqe->wr.opcode) {
311 case IB_WR_SEND_WITH_IMM: 311 case IB_WR_SEND_WITH_IMM:
312 wc.wc_flags = IB_WC_WITH_IMM; 312 wc.wc_flags = IB_WC_WITH_IMM;
313 wc.imm_data = wqe->wr.imm_data; 313 wc.imm_data = wqe->wr.ex.imm_data;
314 /* FALLTHROUGH */ 314 /* FALLTHROUGH */
315 case IB_WR_SEND: 315 case IB_WR_SEND:
316 if (!ipath_get_rwqe(qp, 0)) { 316 if (!ipath_get_rwqe(qp, 0)) {
@@ -339,7 +339,7 @@ again:
339 goto err; 339 goto err;
340 } 340 }
341 wc.wc_flags = IB_WC_WITH_IMM; 341 wc.wc_flags = IB_WC_WITH_IMM;
342 wc.imm_data = wqe->wr.imm_data; 342 wc.imm_data = wqe->wr.ex.imm_data;
343 if (!ipath_get_rwqe(qp, 1)) 343 if (!ipath_get_rwqe(qp, 1))
344 goto rnr_nak; 344 goto rnr_nak;
345 /* FALLTHROUGH */ 345 /* FALLTHROUGH */
@@ -483,14 +483,16 @@ done:
483 483
484static void want_buffer(struct ipath_devdata *dd) 484static void want_buffer(struct ipath_devdata *dd)
485{ 485{
486 unsigned long flags; 486 if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA)) {
487 487 unsigned long flags;
488 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); 488
489 dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL; 489 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
490 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 490 dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL;
491 dd->ipath_sendctrl); 491 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
492 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 492 dd->ipath_sendctrl);
493 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); 493 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
494 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
495 }
494} 496}
495 497
496/** 498/**
diff --git a/drivers/infiniband/hw/ipath/ipath_sd7220.c b/drivers/infiniband/hw/ipath/ipath_sd7220.c
new file mode 100644
index 000000000000..aa47eb549520
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_sd7220.c
@@ -0,0 +1,1462 @@
1/*
2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33/*
34 * This file contains all of the code that is specific to the SerDes
35 * on the InfiniPath 7220 chip.
36 */
37
38#include <linux/pci.h>
39#include <linux/delay.h>
40
41#include "ipath_kernel.h"
42#include "ipath_registers.h"
43#include "ipath_7220.h"
44
45/*
46 * The IBSerDesMappTable is a memory that holds values to be stored in
47 * various SerDes registers by IBC. It is not part of the normal kregs
48 * map and is used in exactly one place, hence the #define below.
49 */
50#define KR_IBSerDesMappTable (0x94000 / (sizeof(uint64_t)))
51
52/*
53 * Below used for sdnum parameter, selecting one of the two sections
54 * used for PCIe, or the single SerDes used for IB.
55 */
56#define PCIE_SERDES0 0
57#define PCIE_SERDES1 1
58
59/*
60 * The EPB requires addressing in a particular form. EPB_LOC() is intended
61 * to make #definitions a little more readable.
62 */
63#define EPB_ADDR_SHF 8
64#define EPB_LOC(chn, elt, reg) \
65 (((elt & 0xf) | ((chn & 7) << 4) | ((reg & 0x3f) << 9)) << \
66 EPB_ADDR_SHF)
67#define EPB_IB_QUAD0_CS_SHF (25)
68#define EPB_IB_QUAD0_CS (1U << EPB_IB_QUAD0_CS_SHF)
69#define EPB_IB_UC_CS_SHF (26)
70#define EPB_PCIE_UC_CS_SHF (27)
71#define EPB_GLOBAL_WR (1U << (EPB_ADDR_SHF + 8))
72
73/* Forward declarations. */
74static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc,
75 u32 data, u32 mask);
76static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val,
77 int mask);
78static int ipath_sd_trimdone_poll(struct ipath_devdata *dd);
79static void ipath_sd_trimdone_monitor(struct ipath_devdata *dd,
80 const char *where);
81static int ipath_sd_setvals(struct ipath_devdata *dd);
82static int ipath_sd_early(struct ipath_devdata *dd);
83static int ipath_sd_dactrim(struct ipath_devdata *dd);
84/* Set the registers that IBC may muck with to their default "preset" values */
85int ipath_sd7220_presets(struct ipath_devdata *dd);
86static int ipath_internal_presets(struct ipath_devdata *dd);
87/* Tweak the register (CMUCTRL5) that contains the TRIMSELF controls */
88static int ipath_sd_trimself(struct ipath_devdata *dd, int val);
89static int epb_access(struct ipath_devdata *dd, int sdnum, int claim);
90
91void ipath_set_relock_poll(struct ipath_devdata *dd, int ibup);
92
93/*
94 * Below keeps track of whether the "once per power-on" initialization has
95 * been done, because uC code Version 1.32.17 or higher allows the uC to
96 * be reset at will, and Automatic Equalization may require it. So the
97 * state of the reset "pin", as reflected in was_reset parameter to
98 * ipath_sd7220_init() is no longer valid. Instead, we check for the
99 * actual uC code having been loaded.
100 */
101static int ipath_ibsd_ucode_loaded(struct ipath_devdata *dd)
102{
103 if (!dd->serdes_first_init_done && (ipath_sd7220_ib_vfy(dd) > 0))
104 dd->serdes_first_init_done = 1;
105 return dd->serdes_first_init_done;
106}
107
108/* repeat #define for local use. "Real" #define is in ipath_iba7220.c */
109#define INFINIPATH_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL
110#define IB_MPREG5 (EPB_LOC(6, 0, 0xE) | (1L << EPB_IB_UC_CS_SHF))
111#define IB_MPREG6 (EPB_LOC(6, 0, 0xF) | (1U << EPB_IB_UC_CS_SHF))
112#define UC_PAR_CLR_D 8
113#define UC_PAR_CLR_M 0xC
114#define IB_CTRL2(chn) (EPB_LOC(chn, 7, 3) | EPB_IB_QUAD0_CS)
115#define START_EQ1(chan) EPB_LOC(chan, 7, 0x27)
116
117void ipath_sd7220_clr_ibpar(struct ipath_devdata *dd)
118{
119 int ret;
120
121 /* clear, then re-enable parity errs */
122 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6,
123 UC_PAR_CLR_D, UC_PAR_CLR_M);
124 if (ret < 0) {
125 ipath_dev_err(dd, "Failed clearing IBSerDes Parity err\n");
126 goto bail;
127 }
128 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0,
129 UC_PAR_CLR_M);
130
131 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
132 udelay(4);
133 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
134 INFINIPATH_HWE_IB_UC_MEMORYPARITYERR);
135 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
136bail:
137 return;
138}
139
140/*
141 * After a reset or other unusual event, the epb interface may need
142 * to be re-synchronized, between the host and the uC.
143 * returns <0 for failure to resync within IBSD_RESYNC_TRIES (not expected)
144 */
145#define IBSD_RESYNC_TRIES 3
146#define IB_PGUDP(chn) (EPB_LOC((chn), 2, 1) | EPB_IB_QUAD0_CS)
147#define IB_CMUDONE(chn) (EPB_LOC((chn), 7, 0xF) | EPB_IB_QUAD0_CS)
148
149static int ipath_resync_ibepb(struct ipath_devdata *dd)
150{
151 int ret, pat, tries, chn;
152 u32 loc;
153
154 ret = -1;
155 chn = 0;
156 for (tries = 0; tries < (4 * IBSD_RESYNC_TRIES); ++tries) {
157 loc = IB_PGUDP(chn);
158 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
159 if (ret < 0) {
160 ipath_dev_err(dd, "Failed read in resync\n");
161 continue;
162 }
163 if (ret != 0xF0 && ret != 0x55 && tries == 0)
164 ipath_dev_err(dd, "unexpected pattern in resync\n");
165 pat = ret ^ 0xA5; /* alternate F0 and 55 */
166 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, pat, 0xFF);
167 if (ret < 0) {
168 ipath_dev_err(dd, "Failed write in resync\n");
169 continue;
170 }
171 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
172 if (ret < 0) {
173 ipath_dev_err(dd, "Failed re-read in resync\n");
174 continue;
175 }
176 if (ret != pat) {
177 ipath_dev_err(dd, "Failed compare1 in resync\n");
178 continue;
179 }
180 loc = IB_CMUDONE(chn);
181 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
182 if (ret < 0) {
183 ipath_dev_err(dd, "Failed CMUDONE rd in resync\n");
184 continue;
185 }
186 if ((ret & 0x70) != ((chn << 4) | 0x40)) {
187 ipath_dev_err(dd, "Bad CMUDONE value %02X, chn %d\n",
188 ret, chn);
189 continue;
190 }
191 if (++chn == 4)
192 break; /* Success */
193 }
194 ipath_cdbg(VERBOSE, "Resync in %d tries\n", tries);
195 return (ret > 0) ? 0 : ret;
196}
197
198/*
199 * Localize the stuff that should be done to change IB uC reset
200 * returns <0 for errors.
201 */
202static int ipath_ibsd_reset(struct ipath_devdata *dd, int assert_rst)
203{
204 u64 rst_val;
205 int ret = 0;
206 unsigned long flags;
207
208 rst_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibserdesctrl);
209 if (assert_rst) {
210 /*
211 * Vendor recommends "interrupting" uC before reset, to
212 * minimize possible glitches.
213 */
214 spin_lock_irqsave(&dd->ipath_sdepb_lock, flags);
215 epb_access(dd, IB_7220_SERDES, 1);
216 rst_val |= 1ULL;
217 /* Squelch possible parity error from _asserting_ reset */
218 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
219 dd->ipath_hwerrmask &
220 ~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR);
221 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibserdesctrl, rst_val);
222 /* flush write, delay to ensure it took effect */
223 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
224 udelay(2);
225 /* once it's reset, can remove interrupt */
226 epb_access(dd, IB_7220_SERDES, -1);
227 spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
228 } else {
229 /*
230 * Before we de-assert reset, we need to deal with
231 * possible glitch on the Parity-error line.
232 * Suppress it around the reset, both in chip-level
233 * hwerrmask and in IB uC control reg. uC will allow
234 * it again during startup.
235 */
236 u64 val;
237 rst_val &= ~(1ULL);
238 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
239 dd->ipath_hwerrmask &
240 ~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR);
241
242 ret = ipath_resync_ibepb(dd);
243 if (ret < 0)
244 ipath_dev_err(dd, "unable to re-sync IB EPB\n");
245
246 /* set uC control regs to suppress parity errs */
247 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG5, 1, 1);
248 if (ret < 0)
249 goto bail;
250 /* IB uC code past Version 1.32.17 allow suppression of wdog */
251 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80,
252 0x80);
253 if (ret < 0) {
254 ipath_dev_err(dd, "Failed to set WDOG disable\n");
255 goto bail;
256 }
257 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibserdesctrl, rst_val);
258 /* flush write, delay for startup */
259 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
260 udelay(1);
261 /* clear, then re-enable parity errs */
262 ipath_sd7220_clr_ibpar(dd);
263 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
264 if (val & INFINIPATH_HWE_IB_UC_MEMORYPARITYERR) {
265 ipath_dev_err(dd, "IBUC Parity still set after RST\n");
266 dd->ipath_hwerrmask &=
267 ~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR;
268 }
269 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
270 dd->ipath_hwerrmask);
271 }
272
273bail:
274 return ret;
275}
276
277static void ipath_sd_trimdone_monitor(struct ipath_devdata *dd,
278 const char *where)
279{
280 int ret, chn, baduns;
281 u64 val;
282
283 if (!where)
284 where = "?";
285
286 /* give time for reset to settle out in EPB */
287 udelay(2);
288
289 ret = ipath_resync_ibepb(dd);
290 if (ret < 0)
291 ipath_dev_err(dd, "not able to re-sync IB EPB (%s)\n", where);
292
293 /* Do "sacrificial read" to get EPB in sane state after reset */
294 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(0), 0, 0);
295 if (ret < 0)
296 ipath_dev_err(dd, "Failed TRIMDONE 1st read, (%s)\n", where);
297
298 /* Check/show "summary" Trim-done bit in IBCStatus */
299 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
300 if (val & (1ULL << 11))
301 ipath_cdbg(VERBOSE, "IBCS TRIMDONE set (%s)\n", where);
302 else
303 ipath_dev_err(dd, "IBCS TRIMDONE clear (%s)\n", where);
304
305 udelay(2);
306
307 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80, 0x80);
308 if (ret < 0)
309 ipath_dev_err(dd, "Failed Dummy RMW, (%s)\n", where);
310 udelay(10);
311
312 baduns = 0;
313
314 for (chn = 3; chn >= 0; --chn) {
315 /* Read CTRL reg for each channel to check TRIMDONE */
316 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
317 IB_CTRL2(chn), 0, 0);
318 if (ret < 0)
319 ipath_dev_err(dd, "Failed checking TRIMDONE, chn %d"
320 " (%s)\n", chn, where);
321
322 if (!(ret & 0x10)) {
323 int probe;
324 baduns |= (1 << chn);
325 ipath_dev_err(dd, "TRIMDONE cleared on chn %d (%02X)."
326 " (%s)\n", chn, ret, where);
327 probe = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
328 IB_PGUDP(0), 0, 0);
329 ipath_dev_err(dd, "probe is %d (%02X)\n",
330 probe, probe);
331 probe = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
332 IB_CTRL2(chn), 0, 0);
333 ipath_dev_err(dd, "re-read: %d (%02X)\n",
334 probe, probe);
335 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
336 IB_CTRL2(chn), 0x10, 0x10);
337 if (ret < 0)
338 ipath_dev_err(dd,
339 "Err on TRIMDONE rewrite1\n");
340 }
341 }
342 for (chn = 3; chn >= 0; --chn) {
343 /* Read CTRL reg for each channel to check TRIMDONE */
344 if (baduns & (1 << chn)) {
345 ipath_dev_err(dd,
346 "Reseting TRIMDONE on chn %d (%s)\n",
347 chn, where);
348 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
349 IB_CTRL2(chn), 0x10, 0x10);
350 if (ret < 0)
351 ipath_dev_err(dd, "Failed re-setting "
352 "TRIMDONE, chn %d (%s)\n",
353 chn, where);
354 }
355 }
356}
357
358/*
359 * Below is portion of IBA7220-specific bringup_serdes() that actually
360 * deals with registers and memory within the SerDes itself.
361 * Post IB uC code version 1.32.17, was_reset being 1 is not really
362 * informative, so we double-check.
363 */
364int ipath_sd7220_init(struct ipath_devdata *dd, int was_reset)
365{
366 int ret = 1; /* default to failure */
367 int first_reset;
368 int val_stat;
369
370 if (!was_reset) {
371 /* entered with reset not asserted, we need to do it */
372 ipath_ibsd_reset(dd, 1);
373 ipath_sd_trimdone_monitor(dd, "Driver-reload");
374 }
375
376 /* Substitute our deduced value for was_reset */
377 ret = ipath_ibsd_ucode_loaded(dd);
378 if (ret < 0) {
379 ret = 1;
380 goto done;
381 }
382 first_reset = !ret; /* First reset if IBSD uCode not yet loaded */
383
384 /*
385 * Alter some regs per vendor latest doc, reset-defaults
386 * are not right for IB.
387 */
388 ret = ipath_sd_early(dd);
389 if (ret < 0) {
390 ipath_dev_err(dd, "Failed to set IB SERDES early defaults\n");
391 ret = 1;
392 goto done;
393 }
394
395 /*
396 * Set DAC manual trim IB.
397 * We only do this once after chip has been reset (usually
398 * same as once per system boot).
399 */
400 if (first_reset) {
401 ret = ipath_sd_dactrim(dd);
402 if (ret < 0) {
403 ipath_dev_err(dd, "Failed IB SERDES DAC trim\n");
404 ret = 1;
405 goto done;
406 }
407 }
408
409 /*
410 * Set various registers (DDS and RXEQ) that will be
411 * controlled by IBC (in 1.2 mode) to reasonable preset values
412 * Calling the "internal" version avoids the "check for needed"
413 * and "trimdone monitor" that might be counter-productive.
414 */
415 ret = ipath_internal_presets(dd);
416 if (ret < 0) {
417 ipath_dev_err(dd, "Failed to set IB SERDES presets\n");
418 ret = 1;
419 goto done;
420 }
421 ret = ipath_sd_trimself(dd, 0x80);
422 if (ret < 0) {
423 ipath_dev_err(dd, "Failed to set IB SERDES TRIMSELF\n");
424 ret = 1;
425 goto done;
426 }
427
428 /* Load image, then try to verify */
429 ret = 0; /* Assume success */
430 if (first_reset) {
431 int vfy;
432 int trim_done;
433 ipath_dbg("SerDes uC was reset, reloading PRAM\n");
434 ret = ipath_sd7220_ib_load(dd);
435 if (ret < 0) {
436 ipath_dev_err(dd, "Failed to load IB SERDES image\n");
437 ret = 1;
438 goto done;
439 }
440
441 /* Loaded image, try to verify */
442 vfy = ipath_sd7220_ib_vfy(dd);
443 if (vfy != ret) {
444 ipath_dev_err(dd, "SERDES PRAM VFY failed\n");
445 ret = 1;
446 goto done;
447 }
448 /*
449 * Loaded and verified. Almost good...
450 * hold "success" in ret
451 */
452 ret = 0;
453
454 /*
455 * Prev steps all worked, continue bringup
456 * De-assert RESET to uC, only in first reset, to allow
457 * trimming.
458 *
459 * Since our default setup sets START_EQ1 to
460 * PRESET, we need to clear that for this very first run.
461 */
462 ret = ibsd_mod_allchnls(dd, START_EQ1(0), 0, 0x38);
463 if (ret < 0) {
464 ipath_dev_err(dd, "Failed clearing START_EQ1\n");
465 ret = 1;
466 goto done;
467 }
468
469 ipath_ibsd_reset(dd, 0);
470 /*
471 * If this is not the first reset, trimdone should be set
472 * already.
473 */
474 trim_done = ipath_sd_trimdone_poll(dd);
475 /*
476 * Whether or not trimdone succeeded, we need to put the
477 * uC back into reset to avoid a possible fight with the
478 * IBC state-machine.
479 */
480 ipath_ibsd_reset(dd, 1);
481
482 if (!trim_done) {
483 ipath_dev_err(dd, "No TRIMDONE seen\n");
484 ret = 1;
485 goto done;
486 }
487
488 ipath_sd_trimdone_monitor(dd, "First-reset");
489 /* Remember so we do not re-do the load, dactrim, etc. */
490 dd->serdes_first_init_done = 1;
491 }
492 /*
493 * Setup for channel training and load values for
494 * RxEq and DDS in tables used by IBC in IB1.2 mode
495 */
496
497 val_stat = ipath_sd_setvals(dd);
498 if (val_stat < 0)
499 ret = 1;
500done:
501 /* start relock timer regardless, but start at 1 second */
502 ipath_set_relock_poll(dd, -1);
503 return ret;
504}
505
506#define EPB_ACC_REQ 1
507#define EPB_ACC_GNT 0x100
508#define EPB_DATA_MASK 0xFF
509#define EPB_RD (1ULL << 24)
510#define EPB_TRANS_RDY (1ULL << 31)
511#define EPB_TRANS_ERR (1ULL << 30)
512#define EPB_TRANS_TRIES 5
513
514/*
515 * query, claim, release ownership of the EPB (External Parallel Bus)
516 * for a specified SERDES.
517 * the "claim" parameter is >0 to claim, <0 to release, 0 to query.
518 * Returns <0 for errors, >0 if we had ownership, else 0.
519 */
520static int epb_access(struct ipath_devdata *dd, int sdnum, int claim)
521{
522 u16 acc;
523 u64 accval;
524 int owned = 0;
525 u64 oct_sel = 0;
526
527 switch (sdnum) {
528 case IB_7220_SERDES :
529 /*
530 * The IB SERDES "ownership" is fairly simple. A single each
531 * request/grant.
532 */
533 acc = dd->ipath_kregs->kr_ib_epbacc;
534 break;
535 case PCIE_SERDES0 :
536 case PCIE_SERDES1 :
537 /* PCIe SERDES has two "octants", need to select which */
538 acc = dd->ipath_kregs->kr_pcie_epbacc;
539 oct_sel = (2 << (sdnum - PCIE_SERDES0));
540 break;
541 default :
542 return 0;
543 }
544
545 /* Make sure any outstanding transaction was seen */
546 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
547 udelay(15);
548
549 accval = ipath_read_kreg32(dd, acc);
550
551 owned = !!(accval & EPB_ACC_GNT);
552 if (claim < 0) {
553 /* Need to release */
554 u64 pollval;
555 /*
556 * The only writeable bits are the request and CS.
557 * Both should be clear
558 */
559 u64 newval = 0;
560 ipath_write_kreg(dd, acc, newval);
561 /* First read after write is not trustworthy */
562 pollval = ipath_read_kreg32(dd, acc);
563 udelay(5);
564 pollval = ipath_read_kreg32(dd, acc);
565 if (pollval & EPB_ACC_GNT)
566 owned = -1;
567 } else if (claim > 0) {
568 /* Need to claim */
569 u64 pollval;
570 u64 newval = EPB_ACC_REQ | oct_sel;
571 ipath_write_kreg(dd, acc, newval);
572 /* First read after write is not trustworthy */
573 pollval = ipath_read_kreg32(dd, acc);
574 udelay(5);
575 pollval = ipath_read_kreg32(dd, acc);
576 if (!(pollval & EPB_ACC_GNT))
577 owned = -1;
578 }
579 return owned;
580}
581
582/*
583 * Lemma to deal with race condition of write..read to epb regs
584 */
585static int epb_trans(struct ipath_devdata *dd, u16 reg, u64 i_val, u64 *o_vp)
586{
587 int tries;
588 u64 transval;
589
590
591 ipath_write_kreg(dd, reg, i_val);
592 /* Throw away first read, as RDY bit may be stale */
593 transval = ipath_read_kreg64(dd, reg);
594
595 for (tries = EPB_TRANS_TRIES; tries; --tries) {
596 transval = ipath_read_kreg32(dd, reg);
597 if (transval & EPB_TRANS_RDY)
598 break;
599 udelay(5);
600 }
601 if (transval & EPB_TRANS_ERR)
602 return -1;
603 if (tries > 0 && o_vp)
604 *o_vp = transval;
605 return tries;
606}
607
608/**
609 *
610 * ipath_sd7220_reg_mod - modify SERDES register
611 * @dd: the infinipath device
612 * @sdnum: which SERDES to access
613 * @loc: location - channel, element, register, as packed by EPB_LOC() macro.
614 * @wd: Write Data - value to set in register
615 * @mask: ones where data should be spliced into reg.
616 *
617 * Basic register read/modify/write, with un-needed acesses elided. That is,
618 * a mask of zero will prevent write, while a mask of 0xFF will prevent read.
619 * returns current (presumed, if a write was done) contents of selected
620 * register, or <0 if errors.
621 */
622static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc,
623 u32 wd, u32 mask)
624{
625 u16 trans;
626 u64 transval;
627 int owned;
628 int tries, ret;
629 unsigned long flags;
630
631 switch (sdnum) {
632 case IB_7220_SERDES :
633 trans = dd->ipath_kregs->kr_ib_epbtrans;
634 break;
635 case PCIE_SERDES0 :
636 case PCIE_SERDES1 :
637 trans = dd->ipath_kregs->kr_pcie_epbtrans;
638 break;
639 default :
640 return -1;
641 }
642
643 /*
644 * All access is locked in software (vs other host threads) and
645 * hardware (vs uC access).
646 */
647 spin_lock_irqsave(&dd->ipath_sdepb_lock, flags);
648
649 owned = epb_access(dd, sdnum, 1);
650 if (owned < 0) {
651 spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
652 return -1;
653 }
654 ret = 0;
655 for (tries = EPB_TRANS_TRIES; tries; --tries) {
656 transval = ipath_read_kreg32(dd, trans);
657 if (transval & EPB_TRANS_RDY)
658 break;
659 udelay(5);
660 }
661
662 if (tries > 0) {
663 tries = 1; /* to make read-skip work */
664 if (mask != 0xFF) {
665 /*
666 * Not a pure write, so need to read.
667 * loc encodes chip-select as well as address
668 */
669 transval = loc | EPB_RD;
670 tries = epb_trans(dd, trans, transval, &transval);
671 }
672 if (tries > 0 && mask != 0) {
673 /*
674 * Not a pure read, so need to write.
675 */
676 wd = (wd & mask) | (transval & ~mask);
677 transval = loc | (wd & EPB_DATA_MASK);
678 tries = epb_trans(dd, trans, transval, &transval);
679 }
680 }
681 /* else, failed to see ready, what error-handling? */
682
683 /*
684 * Release bus. Failure is an error.
685 */
686 if (epb_access(dd, sdnum, -1) < 0)
687 ret = -1;
688 else
689 ret = transval & EPB_DATA_MASK;
690
691 spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
692 if (tries <= 0)
693 ret = -1;
694 return ret;
695}
696
697#define EPB_ROM_R (2)
698#define EPB_ROM_W (1)
699/*
700 * Below, all uC-related, use appropriate UC_CS, depending
701 * on which SerDes is used.
702 */
703#define EPB_UC_CTL EPB_LOC(6, 0, 0)
704#define EPB_MADDRL EPB_LOC(6, 0, 2)
705#define EPB_MADDRH EPB_LOC(6, 0, 3)
706#define EPB_ROMDATA EPB_LOC(6, 0, 4)
707#define EPB_RAMDATA EPB_LOC(6, 0, 5)
708
709/* Transfer date to/from uC Program RAM of IB or PCIe SerDes */
710static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
711 u8 *buf, int cnt, int rd_notwr)
712{
713 u16 trans;
714 u64 transval;
715 u64 csbit;
716 int owned;
717 int tries;
718 int sofar;
719 int addr;
720 int ret;
721 unsigned long flags;
722 const char *op;
723
724 /* Pick appropriate transaction reg and "Chip select" for this serdes */
725 switch (sdnum) {
726 case IB_7220_SERDES :
727 csbit = 1ULL << EPB_IB_UC_CS_SHF;
728 trans = dd->ipath_kregs->kr_ib_epbtrans;
729 break;
730 case PCIE_SERDES0 :
731 case PCIE_SERDES1 :
732 /* PCIe SERDES has uC "chip select" in different bit, too */
733 csbit = 1ULL << EPB_PCIE_UC_CS_SHF;
734 trans = dd->ipath_kregs->kr_pcie_epbtrans;
735 break;
736 default :
737 return -1;
738 }
739
740 op = rd_notwr ? "Rd" : "Wr";
741 spin_lock_irqsave(&dd->ipath_sdepb_lock, flags);
742
743 owned = epb_access(dd, sdnum, 1);
744 if (owned < 0) {
745 spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
746 ipath_dbg("Could not get %s access to %s EPB: %X, loc %X\n",
747 op, (sdnum == IB_7220_SERDES) ? "IB" : "PCIe",
748 owned, loc);
749 return -1;
750 }
751
752 /*
753 * In future code, we may need to distinguish several address ranges,
754 * and select various memories based on this. For now, just trim
755 * "loc" (location including address and memory select) to
756 * "addr" (address within memory). we will only support PRAM
757 * The memory is 8KB.
758 */
759 addr = loc & 0x1FFF;
760 for (tries = EPB_TRANS_TRIES; tries; --tries) {
761 transval = ipath_read_kreg32(dd, trans);
762 if (transval & EPB_TRANS_RDY)
763 break;
764 udelay(5);
765 }
766
767 sofar = 0;
768 if (tries <= 0)
769 ipath_dbg("No initial RDY on EPB access request\n");
770 else {
771 /*
772 * Every "memory" access is doubly-indirect.
773 * We set two bytes of address, then read/write
774 * one or mores bytes of data.
775 */
776
777 /* First, we set control to "Read" or "Write" */
778 transval = csbit | EPB_UC_CTL |
779 (rd_notwr ? EPB_ROM_R : EPB_ROM_W);
780 tries = epb_trans(dd, trans, transval, &transval);
781 if (tries <= 0)
782 ipath_dbg("No EPB response to uC %s cmd\n", op);
783 while (tries > 0 && sofar < cnt) {
784 if (!sofar) {
785 /* Only set address at start of chunk */
786 int addrbyte = (addr + sofar) >> 8;
787 transval = csbit | EPB_MADDRH | addrbyte;
788 tries = epb_trans(dd, trans, transval,
789 &transval);
790 if (tries <= 0) {
791 ipath_dbg("No EPB response ADDRH\n");
792 break;
793 }
794 addrbyte = (addr + sofar) & 0xFF;
795 transval = csbit | EPB_MADDRL | addrbyte;
796 tries = epb_trans(dd, trans, transval,
797 &transval);
798 if (tries <= 0) {
799 ipath_dbg("No EPB response ADDRL\n");
800 break;
801 }
802 }
803
804 if (rd_notwr)
805 transval = csbit | EPB_ROMDATA | EPB_RD;
806 else
807 transval = csbit | EPB_ROMDATA | buf[sofar];
808 tries = epb_trans(dd, trans, transval, &transval);
809 if (tries <= 0) {
810 ipath_dbg("No EPB response DATA\n");
811 break;
812 }
813 if (rd_notwr)
814 buf[sofar] = transval & EPB_DATA_MASK;
815 ++sofar;
816 }
817 /* Finally, clear control-bit for Read or Write */
818 transval = csbit | EPB_UC_CTL;
819 tries = epb_trans(dd, trans, transval, &transval);
820 if (tries <= 0)
821 ipath_dbg("No EPB response to drop of uC %s cmd\n", op);
822 }
823
824 ret = sofar;
825 /* Release bus. Failure is an error */
826 if (epb_access(dd, sdnum, -1) < 0)
827 ret = -1;
828
829 spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
830 if (tries <= 0) {
831 ipath_dbg("SERDES PRAM %s failed after %d bytes\n", op, sofar);
832 ret = -1;
833 }
834 return ret;
835}
836
837#define PROG_CHUNK 64
838
839int ipath_sd7220_prog_ld(struct ipath_devdata *dd, int sdnum,
840 u8 *img, int len, int offset)
841{
842 int cnt, sofar, req;
843
844 sofar = 0;
845 while (sofar < len) {
846 req = len - sofar;
847 if (req > PROG_CHUNK)
848 req = PROG_CHUNK;
849 cnt = ipath_sd7220_ram_xfer(dd, sdnum, offset + sofar,
850 img + sofar, req, 0);
851 if (cnt < req) {
852 sofar = -1;
853 break;
854 }
855 sofar += req;
856 }
857 return sofar;
858}
859
860#define VFY_CHUNK 64
861#define SD_PRAM_ERROR_LIMIT 42
862
863int ipath_sd7220_prog_vfy(struct ipath_devdata *dd, int sdnum,
864 const u8 *img, int len, int offset)
865{
866 int cnt, sofar, req, idx, errors;
867 unsigned char readback[VFY_CHUNK];
868
869 errors = 0;
870 sofar = 0;
871 while (sofar < len) {
872 req = len - sofar;
873 if (req > VFY_CHUNK)
874 req = VFY_CHUNK;
875 cnt = ipath_sd7220_ram_xfer(dd, sdnum, sofar + offset,
876 readback, req, 1);
877 if (cnt < req) {
878 /* failed in read itself */
879 sofar = -1;
880 break;
881 }
882 for (idx = 0; idx < cnt; ++idx) {
883 if (readback[idx] != img[idx+sofar])
884 ++errors;
885 }
886 sofar += cnt;
887 }
888 return errors ? -errors : sofar;
889}
890
891/* IRQ not set up at this point in init, so we poll. */
892#define IB_SERDES_TRIM_DONE (1ULL << 11)
893#define TRIM_TMO (30)
894
895static int ipath_sd_trimdone_poll(struct ipath_devdata *dd)
896{
897 int trim_tmo, ret;
898 uint64_t val;
899
900 /*
901 * Default to failure, so IBC will not start
902 * without IB_SERDES_TRIM_DONE.
903 */
904 ret = 0;
905 for (trim_tmo = 0; trim_tmo < TRIM_TMO; ++trim_tmo) {
906 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
907 if (val & IB_SERDES_TRIM_DONE) {
908 ipath_cdbg(VERBOSE, "TRIMDONE after %d\n", trim_tmo);
909 ret = 1;
910 break;
911 }
912 msleep(10);
913 }
914 if (trim_tmo >= TRIM_TMO) {
915 ipath_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo);
916 ret = 0;
917 }
918 return ret;
919}
920
921#define TX_FAST_ELT (9)
922
923/*
924 * Set the "negotiation" values for SERDES. These are used by the IB1.2
925 * link negotiation. Macros below are attempt to keep the values a
926 * little more human-editable.
927 * First, values related to Drive De-emphasis Settings.
928 */
929
930#define NUM_DDS_REGS 6
931#define DDS_REG_MAP 0x76A910 /* LSB-first list of regs (in elt 9) to mod */
932
933#define DDS_VAL(amp_d, main_d, ipst_d, ipre_d, amp_s, main_s, ipst_s, ipre_s) \
934 { { ((amp_d & 0x1F) << 1) | 1, ((amp_s & 0x1F) << 1) | 1, \
935 (main_d << 3) | 4 | (ipre_d >> 2), \
936 (main_s << 3) | 4 | (ipre_s >> 2), \
937 ((ipst_d & 0xF) << 1) | ((ipre_d & 3) << 6) | 0x21, \
938 ((ipst_s & 0xF) << 1) | ((ipre_s & 3) << 6) | 0x21 } }
939
940static struct dds_init {
941 uint8_t reg_vals[NUM_DDS_REGS];
942} dds_init_vals[] = {
943 /* DDR(FDR) SDR(HDR) */
944 /* Vendor recommends below for 3m cable */
945#define DDS_3M 0
946 DDS_VAL(31, 19, 12, 0, 29, 22, 9, 0),
947 DDS_VAL(31, 12, 15, 4, 31, 15, 15, 1),
948 DDS_VAL(31, 13, 15, 3, 31, 16, 15, 0),
949 DDS_VAL(31, 14, 15, 2, 31, 17, 14, 0),
950 DDS_VAL(31, 15, 15, 1, 31, 18, 13, 0),
951 DDS_VAL(31, 16, 15, 0, 31, 19, 12, 0),
952 DDS_VAL(31, 17, 14, 0, 31, 20, 11, 0),
953 DDS_VAL(31, 18, 13, 0, 30, 21, 10, 0),
954 DDS_VAL(31, 20, 11, 0, 28, 23, 8, 0),
955 DDS_VAL(31, 21, 10, 0, 27, 24, 7, 0),
956 DDS_VAL(31, 22, 9, 0, 26, 25, 6, 0),
957 DDS_VAL(30, 23, 8, 0, 25, 26, 5, 0),
958 DDS_VAL(29, 24, 7, 0, 23, 27, 4, 0),
959 /* Vendor recommends below for 1m cable */
960#define DDS_1M 13
961 DDS_VAL(28, 25, 6, 0, 21, 28, 3, 0),
962 DDS_VAL(27, 26, 5, 0, 19, 29, 2, 0),
963 DDS_VAL(25, 27, 4, 0, 17, 30, 1, 0)
964};
965
966/*
967 * Next, values related to Receive Equalization.
968 * In comments, FDR (Full) is IB DDR, HDR (Half) is IB SDR
969 */
970/* Hardware packs an element number and register address thus: */
971#define RXEQ_INIT_RDESC(elt, addr) (((elt) & 0xF) | ((addr) << 4))
972#define RXEQ_VAL(elt, adr, val0, val1, val2, val3) \
973 {RXEQ_INIT_RDESC((elt), (adr)), {(val0), (val1), (val2), (val3)} }
974
975#define RXEQ_VAL_ALL(elt, adr, val) \
976 {RXEQ_INIT_RDESC((elt), (adr)), {(val), (val), (val), (val)} }
977
978#define RXEQ_SDR_DFELTH 0
979#define RXEQ_SDR_TLTH 0
980#define RXEQ_SDR_G1CNT_Z1CNT 0x11
981#define RXEQ_SDR_ZCNT 23
982
983static struct rxeq_init {
984 u16 rdesc; /* in form used in SerDesDDSRXEQ */
985 u8 rdata[4];
986} rxeq_init_vals[] = {
987 /* Set Rcv Eq. to Preset node */
988 RXEQ_VAL_ALL(7, 0x27, 0x10),
989 /* Set DFELTHFDR/HDR thresholds */
990 RXEQ_VAL(7, 8, 0, 0, 0, 0), /* FDR */
991 RXEQ_VAL(7, 0x21, 0, 0, 0, 0), /* HDR */
992 /* Set TLTHFDR/HDR theshold */
993 RXEQ_VAL(7, 9, 2, 2, 2, 2), /* FDR */
994 RXEQ_VAL(7, 0x23, 2, 2, 2, 2), /* HDR */
995 /* Set Preamp setting 2 (ZFR/ZCNT) */
996 RXEQ_VAL(7, 0x1B, 12, 12, 12, 12), /* FDR */
997 RXEQ_VAL(7, 0x1C, 12, 12, 12, 12), /* HDR */
998 /* Set Preamp DC gain and Setting 1 (GFR/GHR) */
999 RXEQ_VAL(7, 0x1E, 0x10, 0x10, 0x10, 0x10), /* FDR */
1000 RXEQ_VAL(7, 0x1F, 0x10, 0x10, 0x10, 0x10), /* HDR */
1001 /* Toggle RELOCK (in VCDL_CTRL0) to lock to data */
1002 RXEQ_VAL_ALL(6, 6, 0x20), /* Set D5 High */
1003 RXEQ_VAL_ALL(6, 6, 0), /* Set D5 Low */
1004};
1005
1006/* There are 17 values from vendor, but IBC only accesses the first 16 */
1007#define DDS_ROWS (16)
1008#define RXEQ_ROWS ARRAY_SIZE(rxeq_init_vals)
1009
1010static int ipath_sd_setvals(struct ipath_devdata *dd)
1011{
1012 int idx, midx;
1013 int min_idx; /* Minimum index for this portion of table */
1014 uint32_t dds_reg_map;
1015 u64 __iomem *taddr, *iaddr;
1016 uint64_t data;
1017 uint64_t sdctl;
1018
1019 taddr = dd->ipath_kregbase + KR_IBSerDesMappTable;
1020 iaddr = dd->ipath_kregbase + dd->ipath_kregs->kr_ib_ddsrxeq;
1021
1022 /*
1023 * Init the DDS section of the table.
1024 * Each "row" of the table provokes NUM_DDS_REG writes, to the
1025 * registers indicated in DDS_REG_MAP.
1026 */
1027 sdctl = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibserdesctrl);
1028 sdctl = (sdctl & ~(0x1f << 8)) | (NUM_DDS_REGS << 8);
1029 sdctl = (sdctl & ~(0x1f << 13)) | (RXEQ_ROWS << 13);
1030 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibserdesctrl, sdctl);
1031
1032 /*
1033 * Iterate down table within loop for each register to store.
1034 */
1035 dds_reg_map = DDS_REG_MAP;
1036 for (idx = 0; idx < NUM_DDS_REGS; ++idx) {
1037 data = ((dds_reg_map & 0xF) << 4) | TX_FAST_ELT;
1038 writeq(data, iaddr + idx);
1039 mmiowb();
1040 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
1041 dds_reg_map >>= 4;
1042 for (midx = 0; midx < DDS_ROWS; ++midx) {
1043 u64 __iomem *daddr = taddr + ((midx << 4) + idx);
1044 data = dds_init_vals[midx].reg_vals[idx];
1045 writeq(data, daddr);
1046 mmiowb();
1047 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
1048 } /* End inner for (vals for this reg, each row) */
1049 } /* end outer for (regs to be stored) */
1050
1051 /*
1052 * Init the RXEQ section of the table. As explained above the table
1053 * rxeq_init_vals[], this runs in a different order, as the pattern
1054 * of register references is more complex, but there are only
1055 * four "data" values per register.
1056 */
1057 min_idx = idx; /* RXEQ indices pick up where DDS left off */
1058 taddr += 0x100; /* RXEQ data is in second half of table */
1059 /* Iterate through RXEQ register addresses */
1060 for (idx = 0; idx < RXEQ_ROWS; ++idx) {
1061 int didx; /* "destination" */
1062 int vidx;
1063
1064 /* didx is offset by min_idx to address RXEQ range of regs */
1065 didx = idx + min_idx;
1066 /* Store the next RXEQ register address */
1067 writeq(rxeq_init_vals[idx].rdesc, iaddr + didx);
1068 mmiowb();
1069 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
1070 /* Iterate through RXEQ values */
1071 for (vidx = 0; vidx < 4; vidx++) {
1072 data = rxeq_init_vals[idx].rdata[vidx];
1073 writeq(data, taddr + (vidx << 6) + idx);
1074 mmiowb();
1075 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
1076 }
1077 } /* end outer for (Reg-writes for RXEQ) */
1078 return 0;
1079}
1080
1081#define CMUCTRL5 EPB_LOC(7, 0, 0x15)
1082#define RXHSCTRL0(chan) EPB_LOC(chan, 6, 0)
1083#define VCDL_DAC2(chan) EPB_LOC(chan, 6, 5)
1084#define VCDL_CTRL0(chan) EPB_LOC(chan, 6, 6)
1085#define VCDL_CTRL2(chan) EPB_LOC(chan, 6, 8)
1086#define START_EQ2(chan) EPB_LOC(chan, 7, 0x28)
1087
1088static int ibsd_sto_noisy(struct ipath_devdata *dd, int loc, int val, int mask)
1089{
1090 int ret = -1;
1091 int sloc; /* shifted loc, for messages */
1092
1093 loc |= (1U << EPB_IB_QUAD0_CS_SHF);
1094 sloc = loc >> EPB_ADDR_SHF;
1095
1096 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, mask);
1097 if (ret < 0)
1098 ipath_dev_err(dd, "Write failed: elt %d,"
1099 " addr 0x%X, chnl %d, val 0x%02X, mask 0x%02X\n",
1100 (sloc & 0xF), (sloc >> 9) & 0x3f, (sloc >> 4) & 7,
1101 val & 0xFF, mask & 0xFF);
1102 return ret;
1103}
1104
1105/*
1106 * Repeat a "store" across all channels of the IB SerDes.
1107 * Although nominally it inherits the "read value" of the last
1108 * channel it modified, the only really useful return is <0 for
1109 * failure, >= 0 for success. The parameter 'loc' is assumed to
1110 * be the location for the channel-0 copy of the register to
1111 * be modified.
1112 */
1113static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val,
1114 int mask)
1115{
1116 int ret = -1;
1117 int chnl;
1118
1119 if (loc & EPB_GLOBAL_WR) {
1120 /*
1121 * Our caller has assured us that we can set all four
1122 * channels at once. Trust that. If mask is not 0xFF,
1123 * we will read the _specified_ channel for our starting
1124 * value.
1125 */
1126 loc |= (1U << EPB_IB_QUAD0_CS_SHF);
1127 chnl = (loc >> (4 + EPB_ADDR_SHF)) & 7;
1128 if (mask != 0xFF) {
1129 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
1130 loc & ~EPB_GLOBAL_WR, 0, 0);
1131 if (ret < 0) {
1132 int sloc = loc >> EPB_ADDR_SHF;
1133 ipath_dev_err(dd, "pre-read failed: elt %d,"
1134 " addr 0x%X, chnl %d\n", (sloc & 0xF),
1135 (sloc >> 9) & 0x3f, chnl);
1136 return ret;
1137 }
1138 val = (ret & ~mask) | (val & mask);
1139 }
1140 loc &= ~(7 << (4+EPB_ADDR_SHF));
1141 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF);
1142 if (ret < 0) {
1143 int sloc = loc >> EPB_ADDR_SHF;
1144 ipath_dev_err(dd, "Global WR failed: elt %d,"
1145 " addr 0x%X, val %02X\n",
1146 (sloc & 0xF), (sloc >> 9) & 0x3f, val);
1147 }
1148 return ret;
1149 }
1150 /* Clear "channel" and set CS so we can simply iterate */
1151 loc &= ~(7 << (4+EPB_ADDR_SHF));
1152 loc |= (1U << EPB_IB_QUAD0_CS_SHF);
1153 for (chnl = 0; chnl < 4; ++chnl) {
1154 int cloc;
1155 cloc = loc | (chnl << (4+EPB_ADDR_SHF));
1156 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, cloc, val, mask);
1157 if (ret < 0) {
1158 int sloc = loc >> EPB_ADDR_SHF;
1159 ipath_dev_err(dd, "Write failed: elt %d,"
1160 " addr 0x%X, chnl %d, val 0x%02X,"
1161 " mask 0x%02X\n",
1162 (sloc & 0xF), (sloc >> 9) & 0x3f, chnl,
1163 val & 0xFF, mask & 0xFF);
1164 break;
1165 }
1166 }
1167 return ret;
1168}
1169
1170/*
1171 * Set the Tx values normally modified by IBC in IB1.2 mode to default
1172 * values, as gotten from first row of init table.
1173 */
1174static int set_dds_vals(struct ipath_devdata *dd, struct dds_init *ddi)
1175{
1176 int ret;
1177 int idx, reg, data;
1178 uint32_t regmap;
1179
1180 regmap = DDS_REG_MAP;
1181 for (idx = 0; idx < NUM_DDS_REGS; ++idx) {
1182 reg = (regmap & 0xF);
1183 regmap >>= 4;
1184 data = ddi->reg_vals[idx];
1185 /* Vendor says RMW not needed for these regs, use 0xFF mask */
1186 ret = ibsd_mod_allchnls(dd, EPB_LOC(0, 9, reg), data, 0xFF);
1187 if (ret < 0)
1188 break;
1189 }
1190 return ret;
1191}
1192
1193/*
1194 * Set the Rx values normally modified by IBC in IB1.2 mode to default
1195 * values, as gotten from selected column of init table.
1196 */
1197static int set_rxeq_vals(struct ipath_devdata *dd, int vsel)
1198{
1199 int ret;
1200 int ridx;
1201 int cnt = ARRAY_SIZE(rxeq_init_vals);
1202
1203 for (ridx = 0; ridx < cnt; ++ridx) {
1204 int elt, reg, val, loc;
1205 elt = rxeq_init_vals[ridx].rdesc & 0xF;
1206 reg = rxeq_init_vals[ridx].rdesc >> 4;
1207 loc = EPB_LOC(0, elt, reg);
1208 val = rxeq_init_vals[ridx].rdata[vsel];
1209 /* mask of 0xFF, because hardware does full-byte store. */
1210 ret = ibsd_mod_allchnls(dd, loc, val, 0xFF);
1211 if (ret < 0)
1212 break;
1213 }
1214 return ret;
1215}
1216
1217/*
1218 * Set the default values (row 0) for DDR Driver Demphasis.
1219 * we do this initially and whenever we turn off IB-1.2
1220 * The "default" values for Rx equalization are also stored to
1221 * SerDes registers. Formerly (and still default), we used set 2.
1222 * For experimenting with cables and link-partners, we allow changing
1223 * that via a module parameter.
1224 */
1225static unsigned ipath_rxeq_set = 2;
1226module_param_named(rxeq_default_set, ipath_rxeq_set, uint,
1227 S_IWUSR | S_IRUGO);
1228MODULE_PARM_DESC(rxeq_default_set,
1229 "Which set [0..3] of Rx Equalization values is default");
1230
1231static int ipath_internal_presets(struct ipath_devdata *dd)
1232{
1233 int ret = 0;
1234
1235 ret = set_dds_vals(dd, dds_init_vals + DDS_3M);
1236
1237 if (ret < 0)
1238 ipath_dev_err(dd, "Failed to set default DDS values\n");
1239 ret = set_rxeq_vals(dd, ipath_rxeq_set & 3);
1240 if (ret < 0)
1241 ipath_dev_err(dd, "Failed to set default RXEQ values\n");
1242 return ret;
1243}
1244
1245int ipath_sd7220_presets(struct ipath_devdata *dd)
1246{
1247 int ret = 0;
1248
1249 if (!dd->ipath_presets_needed)
1250 return ret;
1251 dd->ipath_presets_needed = 0;
1252 /* Assert uC reset, so we don't clash with it. */
1253 ipath_ibsd_reset(dd, 1);
1254 udelay(2);
1255 ipath_sd_trimdone_monitor(dd, "link-down");
1256
1257 ret = ipath_internal_presets(dd);
1258return ret;
1259}
1260
1261static int ipath_sd_trimself(struct ipath_devdata *dd, int val)
1262{
1263 return ibsd_sto_noisy(dd, CMUCTRL5, val, 0xFF);
1264}
1265
1266static int ipath_sd_early(struct ipath_devdata *dd)
1267{
1268 int ret = -1; /* Default failed */
1269 int chnl;
1270
1271 for (chnl = 0; chnl < 4; ++chnl) {
1272 ret = ibsd_sto_noisy(dd, RXHSCTRL0(chnl), 0xD4, 0xFF);
1273 if (ret < 0)
1274 goto bail;
1275 }
1276 for (chnl = 0; chnl < 4; ++chnl) {
1277 ret = ibsd_sto_noisy(dd, VCDL_DAC2(chnl), 0x2D, 0xFF);
1278 if (ret < 0)
1279 goto bail;
1280 }
1281 /* more fine-tuning of what will be default */
1282 for (chnl = 0; chnl < 4; ++chnl) {
1283 ret = ibsd_sto_noisy(dd, VCDL_CTRL2(chnl), 3, 0xF);
1284 if (ret < 0)
1285 goto bail;
1286 }
1287 for (chnl = 0; chnl < 4; ++chnl) {
1288 ret = ibsd_sto_noisy(dd, START_EQ1(chnl), 0x10, 0xFF);
1289 if (ret < 0)
1290 goto bail;
1291 }
1292 for (chnl = 0; chnl < 4; ++chnl) {
1293 ret = ibsd_sto_noisy(dd, START_EQ2(chnl), 0x30, 0xFF);
1294 if (ret < 0)
1295 goto bail;
1296 }
1297bail:
1298 return ret;
1299}
1300
1301#define BACTRL(chnl) EPB_LOC(chnl, 6, 0x0E)
1302#define LDOUTCTRL1(chnl) EPB_LOC(chnl, 7, 6)
1303#define RXHSSTATUS(chnl) EPB_LOC(chnl, 6, 0xF)
1304
1305static int ipath_sd_dactrim(struct ipath_devdata *dd)
1306{
1307 int ret = -1; /* Default failed */
1308 int chnl;
1309
1310 for (chnl = 0; chnl < 4; ++chnl) {
1311 ret = ibsd_sto_noisy(dd, BACTRL(chnl), 0x40, 0xFF);
1312 if (ret < 0)
1313 goto bail;
1314 }
1315 for (chnl = 0; chnl < 4; ++chnl) {
1316 ret = ibsd_sto_noisy(dd, LDOUTCTRL1(chnl), 0x04, 0xFF);
1317 if (ret < 0)
1318 goto bail;
1319 }
1320 for (chnl = 0; chnl < 4; ++chnl) {
1321 ret = ibsd_sto_noisy(dd, RXHSSTATUS(chnl), 0x04, 0xFF);
1322 if (ret < 0)
1323 goto bail;
1324 }
1325 /*
1326 * delay for max possible number of steps, with slop.
1327 * Each step is about 4usec.
1328 */
1329 udelay(415);
1330 for (chnl = 0; chnl < 4; ++chnl) {
1331 ret = ibsd_sto_noisy(dd, LDOUTCTRL1(chnl), 0x00, 0xFF);
1332 if (ret < 0)
1333 goto bail;
1334 }
1335bail:
1336 return ret;
1337}
1338
1339#define RELOCK_FIRST_MS 3
1340#define RXLSPPM(chan) EPB_LOC(chan, 0, 2)
1341void ipath_toggle_rclkrls(struct ipath_devdata *dd)
1342{
1343 int loc = RXLSPPM(0) | EPB_GLOBAL_WR;
1344 int ret;
1345
1346 ret = ibsd_mod_allchnls(dd, loc, 0, 0x80);
1347 if (ret < 0)
1348 ipath_dev_err(dd, "RCLKRLS failed to clear D7\n");
1349 else {
1350 udelay(1);
1351 ibsd_mod_allchnls(dd, loc, 0x80, 0x80);
1352 }
1353 /* And again for good measure */
1354 udelay(1);
1355 ret = ibsd_mod_allchnls(dd, loc, 0, 0x80);
1356 if (ret < 0)
1357 ipath_dev_err(dd, "RCLKRLS failed to clear D7\n");
1358 else {
1359 udelay(1);
1360 ibsd_mod_allchnls(dd, loc, 0x80, 0x80);
1361 }
1362 /* Now reset xgxs and IBC to complete the recovery */
1363 dd->ipath_f_xgxs_reset(dd);
1364}
1365
1366/*
1367 * Shut down the timer that polls for relock occasions, if needed
1368 * this is "hooked" from ipath_7220_quiet_serdes(), which is called
1369 * just before ipath_shutdown_device() in ipath_driver.c shuts down all
1370 * the other timers
1371 */
1372void ipath_shutdown_relock_poll(struct ipath_devdata *dd)
1373{
1374 struct ipath_relock *irp = &dd->ipath_relock_singleton;
1375 if (atomic_read(&irp->ipath_relock_timer_active)) {
1376 del_timer_sync(&irp->ipath_relock_timer);
1377 atomic_set(&irp->ipath_relock_timer_active, 0);
1378 }
1379}
1380
1381static unsigned ipath_relock_by_timer = 1;
1382module_param_named(relock_by_timer, ipath_relock_by_timer, uint,
1383 S_IWUSR | S_IRUGO);
1384MODULE_PARM_DESC(relock_by_timer, "Allow relock attempt if link not up");
1385
1386static void ipath_run_relock(unsigned long opaque)
1387{
1388 struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
1389 struct ipath_relock *irp = &dd->ipath_relock_singleton;
1390 u64 val, ltstate;
1391
1392 if (!(dd->ipath_flags & IPATH_INITTED)) {
1393 /* Not yet up, just reenable the timer for later */
1394 irp->ipath_relock_interval = HZ;
1395 mod_timer(&irp->ipath_relock_timer, jiffies + HZ);
1396 return;
1397 }
1398
1399 /*
1400 * Check link-training state for "stuck" state.
1401 * if found, try relock and schedule another try at
1402 * exponentially growing delay, maxed at one second.
1403 * if not stuck, our work is done.
1404 */
1405 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
1406 ltstate = ipath_ib_linktrstate(dd, val);
1407
1408 if (ltstate <= INFINIPATH_IBCS_LT_STATE_CFGWAITRMT
1409 && ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP) {
1410 int timeoff;
1411 /* Not up yet. Try again, if allowed by module-param */
1412 if (ipath_relock_by_timer) {
1413 if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)
1414 ipath_cdbg(VERBOSE, "Skip RELOCK in AUTONEG\n");
1415 else if (!(dd->ipath_flags & IPATH_IB_LINK_DISABLED)) {
1416 ipath_cdbg(VERBOSE, "RELOCK\n");
1417 ipath_toggle_rclkrls(dd);
1418 }
1419 }
1420 /* re-set timer for next check */
1421 timeoff = irp->ipath_relock_interval << 1;
1422 if (timeoff > HZ)
1423 timeoff = HZ;
1424 irp->ipath_relock_interval = timeoff;
1425
1426 mod_timer(&irp->ipath_relock_timer, jiffies + timeoff);
1427 } else {
1428 /* Up, so no more need to check so often */
1429 mod_timer(&irp->ipath_relock_timer, jiffies + HZ);
1430 }
1431}
1432
1433void ipath_set_relock_poll(struct ipath_devdata *dd, int ibup)
1434{
1435 struct ipath_relock *irp = &dd->ipath_relock_singleton;
1436
1437 if (ibup > 0) {
1438 /* we are now up, so relax timer to 1 second interval */
1439 if (atomic_read(&irp->ipath_relock_timer_active))
1440 mod_timer(&irp->ipath_relock_timer, jiffies + HZ);
1441 } else {
1442 /* Transition to down, (re-)set timer to short interval. */
1443 int timeout;
1444 timeout = (HZ * ((ibup == -1) ? 1000 : RELOCK_FIRST_MS))/1000;
1445 if (timeout == 0)
1446 timeout = 1;
1447 /* If timer has not yet been started, do so. */
1448 if (atomic_inc_return(&irp->ipath_relock_timer_active) == 1) {
1449 init_timer(&irp->ipath_relock_timer);
1450 irp->ipath_relock_timer.function = ipath_run_relock;
1451 irp->ipath_relock_timer.data = (unsigned long) dd;
1452 irp->ipath_relock_interval = timeout;
1453 irp->ipath_relock_timer.expires = jiffies + timeout;
1454 add_timer(&irp->ipath_relock_timer);
1455 } else {
1456 irp->ipath_relock_interval = timeout;
1457 mod_timer(&irp->ipath_relock_timer, jiffies + timeout);
1458 atomic_dec(&irp->ipath_relock_timer_active);
1459 }
1460 }
1461}
1462
diff --git a/drivers/infiniband/hw/ipath/ipath_sd7220_img.c b/drivers/infiniband/hw/ipath/ipath_sd7220_img.c
new file mode 100644
index 000000000000..5ef59da9270a
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_sd7220_img.c
@@ -0,0 +1,1082 @@
1/*
2 * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33/*
34 * This file contains the memory image from the vendor, to be copied into
35 * the IB SERDES of the IBA7220 during initialization.
36 * The file also includes the two functions which use this image.
37 */
38#include <linux/pci.h>
39#include <linux/delay.h>
40
41#include "ipath_kernel.h"
42#include "ipath_registers.h"
43#include "ipath_7220.h"
44
45static unsigned char ipath_sd7220_ib_img[] = {
46/*0000*/0x02, 0x0A, 0x29, 0x02, 0x0A, 0x87, 0xE5, 0xE6,
47 0x30, 0xE6, 0x04, 0x7F, 0x01, 0x80, 0x02, 0x7F,
48/*0010*/0x00, 0xE5, 0xE2, 0x30, 0xE4, 0x04, 0x7E, 0x01,
49 0x80, 0x02, 0x7E, 0x00, 0xEE, 0x5F, 0x60, 0x08,
50/*0020*/0x53, 0xF9, 0xF7, 0xE4, 0xF5, 0xFE, 0x80, 0x08,
51 0x7F, 0x0A, 0x12, 0x17, 0x31, 0x12, 0x0E, 0xA2,
52/*0030*/0x75, 0xFC, 0x08, 0xE4, 0xF5, 0xFD, 0xE5, 0xE7,
53 0x20, 0xE7, 0x03, 0x43, 0xF9, 0x08, 0x22, 0x00,
54/*0040*/0x01, 0x20, 0x11, 0x00, 0x04, 0x20, 0x00, 0x75,
55 0x51, 0x01, 0xE4, 0xF5, 0x52, 0xF5, 0x53, 0xF5,
56/*0050*/0x52, 0xF5, 0x7E, 0x7F, 0x04, 0x02, 0x04, 0x38,
57 0xC2, 0x36, 0x05, 0x52, 0xE5, 0x52, 0xD3, 0x94,
58/*0060*/0x0C, 0x40, 0x05, 0x75, 0x52, 0x01, 0xD2, 0x36,
59 0x90, 0x07, 0x0C, 0x74, 0x07, 0xF0, 0xA3, 0x74,
60/*0070*/0xFF, 0xF0, 0xE4, 0xF5, 0x0C, 0xA3, 0xF0, 0x90,
61 0x07, 0x14, 0xF0, 0xA3, 0xF0, 0x75, 0x0B, 0x20,
62/*0080*/0xF5, 0x09, 0xE4, 0xF5, 0x08, 0xE5, 0x08, 0xD3,
63 0x94, 0x30, 0x40, 0x03, 0x02, 0x04, 0x04, 0x12,
64/*0090*/0x00, 0x06, 0x15, 0x0B, 0xE5, 0x08, 0x70, 0x04,
65 0x7F, 0x01, 0x80, 0x02, 0x7F, 0x00, 0xE5, 0x09,
66/*00A0*/0x70, 0x04, 0x7E, 0x01, 0x80, 0x02, 0x7E, 0x00,
67 0xEE, 0x5F, 0x60, 0x05, 0x12, 0x18, 0x71, 0xD2,
68/*00B0*/0x35, 0x53, 0xE1, 0xF7, 0xE5, 0x08, 0x45, 0x09,
69 0xFF, 0xE5, 0x0B, 0x25, 0xE0, 0x25, 0xE0, 0x24,
70/*00C0*/0x83, 0xF5, 0x82, 0xE4, 0x34, 0x07, 0xF5, 0x83,
71 0xEF, 0xF0, 0x85, 0xE2, 0x20, 0xE5, 0x52, 0xD3,
72/*00D0*/0x94, 0x01, 0x40, 0x0D, 0x12, 0x19, 0xF3, 0xE0,
73 0x54, 0xA0, 0x64, 0x40, 0x70, 0x03, 0x02, 0x03,
74/*00E0*/0xFB, 0x53, 0xF9, 0xF8, 0x90, 0x94, 0x70, 0xE4,
75 0xF0, 0xE0, 0xF5, 0x10, 0xAF, 0x09, 0x12, 0x1E,
76/*00F0*/0xB3, 0xAF, 0x08, 0xEF, 0x44, 0x08, 0xF5, 0x82,
77 0x75, 0x83, 0x80, 0xE0, 0xF5, 0x29, 0xEF, 0x44,
78/*0100*/0x07, 0x12, 0x1A, 0x3C, 0xF5, 0x22, 0x54, 0x40,
79 0xD3, 0x94, 0x00, 0x40, 0x1E, 0xE5, 0x29, 0x54,
80/*0110*/0xF0, 0x70, 0x21, 0x12, 0x19, 0xF3, 0xE0, 0x44,
81 0x80, 0xF0, 0xE5, 0x22, 0x54, 0x30, 0x65, 0x08,
82/*0120*/0x70, 0x09, 0x12, 0x19, 0xF3, 0xE0, 0x54, 0xBF,
83 0xF0, 0x80, 0x09, 0x12, 0x19, 0xF3, 0x74, 0x40,
84/*0130*/0xF0, 0x02, 0x03, 0xFB, 0x12, 0x1A, 0x12, 0x75,
85 0x83, 0xAE, 0x74, 0xFF, 0xF0, 0xAF, 0x08, 0x7E,
86/*0140*/0x00, 0xEF, 0x44, 0x07, 0xF5, 0x82, 0xE0, 0xFD,
87 0xE5, 0x0B, 0x25, 0xE0, 0x25, 0xE0, 0x24, 0x81,
88/*0150*/0xF5, 0x82, 0xE4, 0x34, 0x07, 0xF5, 0x83, 0xED,
89 0xF0, 0x90, 0x07, 0x0E, 0xE0, 0x04, 0xF0, 0xEF,
90/*0160*/0x44, 0x07, 0xF5, 0x82, 0x75, 0x83, 0x98, 0xE0,
91 0xF5, 0x28, 0x12, 0x1A, 0x23, 0x40, 0x0C, 0x12,
92/*0170*/0x19, 0xF3, 0xE0, 0x44, 0x01, 0x12, 0x1A, 0x32,
93 0x02, 0x03, 0xF6, 0xAF, 0x08, 0x7E, 0x00, 0x74,
94/*0180*/0x80, 0xCD, 0xEF, 0xCD, 0x8D, 0x82, 0xF5, 0x83,
95 0xE0, 0x30, 0xE0, 0x0A, 0x12, 0x19, 0xF3, 0xE0,
96/*0190*/0x44, 0x20, 0xF0, 0x02, 0x03, 0xFB, 0x12, 0x19,
97 0xF3, 0xE0, 0x54, 0xDF, 0xF0, 0xEE, 0x44, 0xAE,
98/*01A0*/0x12, 0x1A, 0x43, 0x30, 0xE4, 0x03, 0x02, 0x03,
99 0xFB, 0x74, 0x9E, 0x12, 0x1A, 0x05, 0x20, 0xE0,
100/*01B0*/0x03, 0x02, 0x03, 0xFB, 0x8F, 0x82, 0x8E, 0x83,
101 0xE0, 0x20, 0xE0, 0x03, 0x02, 0x03, 0xFB, 0x12,
102/*01C0*/0x19, 0xF3, 0xE0, 0x44, 0x10, 0xF0, 0xE5, 0xE3,
103 0x20, 0xE7, 0x08, 0xE5, 0x08, 0x12, 0x1A, 0x3A,
104/*01D0*/0x44, 0x04, 0xF0, 0xAF, 0x08, 0x7E, 0x00, 0xEF,
105 0x12, 0x1A, 0x3A, 0x20, 0xE2, 0x34, 0x12, 0x19,
106/*01E0*/0xF3, 0xE0, 0x44, 0x08, 0xF0, 0xE5, 0xE4, 0x30,
107 0xE6, 0x04, 0x7D, 0x01, 0x80, 0x02, 0x7D, 0x00,
108/*01F0*/0xE5, 0x7E, 0xC3, 0x94, 0x04, 0x50, 0x04, 0x7C,
109 0x01, 0x80, 0x02, 0x7C, 0x00, 0xEC, 0x4D, 0x60,
110/*0200*/0x05, 0xC2, 0x35, 0x02, 0x03, 0xFB, 0xEE, 0x44,
111 0xD2, 0x12, 0x1A, 0x43, 0x44, 0x40, 0xF0, 0x02,
112/*0210*/0x03, 0xFB, 0x12, 0x19, 0xF3, 0xE0, 0x54, 0xF7,
113 0xF0, 0x12, 0x1A, 0x12, 0x75, 0x83, 0xD2, 0xE0,
114/*0220*/0x54, 0xBF, 0xF0, 0x90, 0x07, 0x14, 0xE0, 0x04,
115 0xF0, 0xE5, 0x7E, 0x70, 0x03, 0x75, 0x7E, 0x01,
116/*0230*/0xAF, 0x08, 0x7E, 0x00, 0x12, 0x1A, 0x23, 0x40,
117 0x12, 0x12, 0x19, 0xF3, 0xE0, 0x44, 0x01, 0x12,
118/*0240*/0x19, 0xF2, 0xE0, 0x54, 0x02, 0x12, 0x1A, 0x32,
119 0x02, 0x03, 0xFB, 0x12, 0x19, 0xF3, 0xE0, 0x44,
120/*0250*/0x02, 0x12, 0x19, 0xF2, 0xE0, 0x54, 0xFE, 0xF0,
121 0xC2, 0x35, 0xEE, 0x44, 0x8A, 0x8F, 0x82, 0xF5,
122/*0260*/0x83, 0xE0, 0xF5, 0x17, 0x54, 0x8F, 0x44, 0x40,
123 0xF0, 0x74, 0x90, 0xFC, 0xE5, 0x08, 0x44, 0x07,
124/*0270*/0xFD, 0xF5, 0x82, 0x8C, 0x83, 0xE0, 0x54, 0x3F,
125 0x90, 0x07, 0x02, 0xF0, 0xE0, 0x54, 0xC0, 0x8D,
126/*0280*/0x82, 0x8C, 0x83, 0xF0, 0x74, 0x92, 0x12, 0x1A,
127 0x05, 0x90, 0x07, 0x03, 0x12, 0x1A, 0x19, 0x74,
128/*0290*/0x82, 0x12, 0x1A, 0x05, 0x90, 0x07, 0x04, 0x12,
129 0x1A, 0x19, 0x74, 0xB4, 0x12, 0x1A, 0x05, 0x90,
130/*02A0*/0x07, 0x05, 0x12, 0x1A, 0x19, 0x74, 0x94, 0xFE,
131 0xE5, 0x08, 0x44, 0x06, 0x12, 0x1A, 0x0A, 0xF5,
132/*02B0*/0x10, 0x30, 0xE0, 0x04, 0xD2, 0x37, 0x80, 0x02,
133 0xC2, 0x37, 0xE5, 0x10, 0x54, 0x7F, 0x8F, 0x82,
134/*02C0*/0x8E, 0x83, 0xF0, 0x30, 0x44, 0x30, 0x12, 0x1A,
135 0x03, 0x54, 0x80, 0xD3, 0x94, 0x00, 0x40, 0x04,
136/*02D0*/0xD2, 0x39, 0x80, 0x02, 0xC2, 0x39, 0x8F, 0x82,
137 0x8E, 0x83, 0xE0, 0x44, 0x80, 0xF0, 0x12, 0x1A,
138/*02E0*/0x03, 0x54, 0x40, 0xD3, 0x94, 0x00, 0x40, 0x04,
139 0xD2, 0x3A, 0x80, 0x02, 0xC2, 0x3A, 0x8F, 0x82,
140/*02F0*/0x8E, 0x83, 0xE0, 0x44, 0x40, 0xF0, 0x74, 0x92,
141 0xFE, 0xE5, 0x08, 0x44, 0x06, 0x12, 0x1A, 0x0A,
142/*0300*/0x30, 0xE7, 0x04, 0xD2, 0x38, 0x80, 0x02, 0xC2,
143 0x38, 0x8F, 0x82, 0x8E, 0x83, 0xE0, 0x54, 0x7F,
144/*0310*/0xF0, 0x12, 0x1E, 0x46, 0xE4, 0xF5, 0x0A, 0x20,
145 0x03, 0x02, 0x80, 0x03, 0x30, 0x43, 0x03, 0x12,
146/*0320*/0x19, 0x95, 0x20, 0x02, 0x02, 0x80, 0x03, 0x30,
147 0x42, 0x03, 0x12, 0x0C, 0x8F, 0x30, 0x30, 0x06,
148/*0330*/0x12, 0x19, 0x95, 0x12, 0x0C, 0x8F, 0x12, 0x0D,
149 0x47, 0x12, 0x19, 0xF3, 0xE0, 0x54, 0xFB, 0xF0,
150/*0340*/0xE5, 0x0A, 0xC3, 0x94, 0x01, 0x40, 0x46, 0x43,
151 0xE1, 0x08, 0x12, 0x19, 0xF3, 0xE0, 0x44, 0x04,
152/*0350*/0xF0, 0xE5, 0xE4, 0x20, 0xE7, 0x2A, 0x12, 0x1A,
153 0x12, 0x75, 0x83, 0xD2, 0xE0, 0x54, 0x08, 0xD3,
154/*0360*/0x94, 0x00, 0x40, 0x04, 0x7F, 0x01, 0x80, 0x02,
155 0x7F, 0x00, 0xE5, 0x0A, 0xC3, 0x94, 0x01, 0x40,
156/*0370*/0x04, 0x7E, 0x01, 0x80, 0x02, 0x7E, 0x00, 0xEF,
157 0x5E, 0x60, 0x05, 0x12, 0x1D, 0xD7, 0x80, 0x17,
158/*0380*/0x12, 0x1A, 0x12, 0x75, 0x83, 0xD2, 0xE0, 0x44,
159 0x08, 0xF0, 0x02, 0x03, 0xFB, 0x12, 0x1A, 0x12,
160/*0390*/0x75, 0x83, 0xD2, 0xE0, 0x54, 0xF7, 0xF0, 0x12,
161 0x1E, 0x46, 0x7F, 0x08, 0x12, 0x17, 0x31, 0x74,
162/*03A0*/0x8E, 0xFE, 0x12, 0x1A, 0x12, 0x8E, 0x83, 0xE0,
163 0xF5, 0x10, 0x54, 0xFE, 0xF0, 0xE5, 0x10, 0x44,
164/*03B0*/0x01, 0xFF, 0xE5, 0x08, 0xFD, 0xED, 0x44, 0x07,
165 0xF5, 0x82, 0xEF, 0xF0, 0xE5, 0x10, 0x54, 0xFE,
166/*03C0*/0xFF, 0xED, 0x44, 0x07, 0xF5, 0x82, 0xEF, 0x12,
167 0x1A, 0x11, 0x75, 0x83, 0x86, 0xE0, 0x44, 0x10,
168/*03D0*/0x12, 0x1A, 0x11, 0xE0, 0x44, 0x10, 0xF0, 0x12,
169 0x19, 0xF3, 0xE0, 0x54, 0xFD, 0x44, 0x01, 0xFF,
170/*03E0*/0x12, 0x19, 0xF3, 0xEF, 0x12, 0x1A, 0x32, 0x30,
171 0x32, 0x0C, 0xE5, 0x08, 0x44, 0x08, 0xF5, 0x82,
172/*03F0*/0x75, 0x83, 0x82, 0x74, 0x05, 0xF0, 0xAF, 0x0B,
173 0x12, 0x18, 0xD7, 0x74, 0x10, 0x25, 0x08, 0xF5,
174/*0400*/0x08, 0x02, 0x00, 0x85, 0x05, 0x09, 0xE5, 0x09,
175 0xD3, 0x94, 0x07, 0x50, 0x03, 0x02, 0x00, 0x82,
176/*0410*/0xE5, 0x7E, 0xD3, 0x94, 0x00, 0x40, 0x04, 0x7F,
177 0x01, 0x80, 0x02, 0x7F, 0x00, 0xE5, 0x7E, 0xC3,
178/*0420*/0x94, 0xFA, 0x50, 0x04, 0x7E, 0x01, 0x80, 0x02,
179 0x7E, 0x00, 0xEE, 0x5F, 0x60, 0x02, 0x05, 0x7E,
180/*0430*/0x30, 0x35, 0x0B, 0x43, 0xE1, 0x01, 0x7F, 0x09,
181 0x12, 0x17, 0x31, 0x02, 0x00, 0x58, 0x53, 0xE1,
182/*0440*/0xFE, 0x02, 0x00, 0x58, 0x8E, 0x6A, 0x8F, 0x6B,
183 0x8C, 0x6C, 0x8D, 0x6D, 0x75, 0x6E, 0x01, 0x75,
184/*0450*/0x6F, 0x01, 0x75, 0x70, 0x01, 0xE4, 0xF5, 0x73,
185 0xF5, 0x74, 0xF5, 0x75, 0x90, 0x07, 0x2F, 0xF0,
186/*0460*/0xF5, 0x3C, 0xF5, 0x3E, 0xF5, 0x46, 0xF5, 0x47,
187 0xF5, 0x3D, 0xF5, 0x3F, 0xF5, 0x6F, 0xE5, 0x6F,
188/*0470*/0x70, 0x0F, 0xE5, 0x6B, 0x45, 0x6A, 0x12, 0x07,
189 0x2A, 0x75, 0x83, 0x80, 0x74, 0x3A, 0xF0, 0x80,
190/*0480*/0x09, 0x12, 0x07, 0x2A, 0x75, 0x83, 0x80, 0x74,
191 0x1A, 0xF0, 0xE4, 0xF5, 0x6E, 0xC3, 0x74, 0x3F,
192/*0490*/0x95, 0x6E, 0xFF, 0x12, 0x08, 0x65, 0x75, 0x83,
193 0x82, 0xEF, 0xF0, 0x12, 0x1A, 0x4D, 0x12, 0x08,
194/*04A0*/0xC6, 0xE5, 0x33, 0xF0, 0x12, 0x08, 0xFA, 0x12,
195 0x08, 0xB1, 0x40, 0xE1, 0xE5, 0x6F, 0x70, 0x0B,
196/*04B0*/0x12, 0x07, 0x2A, 0x75, 0x83, 0x80, 0x74, 0x36,
197 0xF0, 0x80, 0x09, 0x12, 0x07, 0x2A, 0x75, 0x83,
198/*04C0*/0x80, 0x74, 0x16, 0xF0, 0x75, 0x6E, 0x01, 0x12,
199 0x07, 0x2A, 0x75, 0x83, 0xB4, 0xE5, 0x6E, 0xF0,
200/*04D0*/0x12, 0x1A, 0x4D, 0x74, 0x3F, 0x25, 0x6E, 0xF5,
201 0x82, 0xE4, 0x34, 0x00, 0xF5, 0x83, 0xE5, 0x33,
202/*04E0*/0xF0, 0x74, 0xBF, 0x25, 0x6E, 0xF5, 0x82, 0xE4,
203 0x34, 0x00, 0x12, 0x08, 0xB1, 0x40, 0xD8, 0xE4,
204/*04F0*/0xF5, 0x70, 0xF5, 0x46, 0xF5, 0x47, 0xF5, 0x6E,
205 0x12, 0x08, 0xFA, 0xF5, 0x83, 0xE0, 0xFE, 0x12,
206/*0500*/0x08, 0xC6, 0xE0, 0x7C, 0x00, 0x24, 0x00, 0xFF,
207 0xEC, 0x3E, 0xFE, 0xAD, 0x3B, 0xD3, 0xEF, 0x9D,
208/*0510*/0xEE, 0x9C, 0x50, 0x04, 0x7B, 0x01, 0x80, 0x02,
209 0x7B, 0x00, 0xE5, 0x70, 0x70, 0x04, 0x7A, 0x01,
210/*0520*/0x80, 0x02, 0x7A, 0x00, 0xEB, 0x5A, 0x60, 0x06,
211 0x85, 0x6E, 0x46, 0x75, 0x70, 0x01, 0xD3, 0xEF,
212/*0530*/0x9D, 0xEE, 0x9C, 0x50, 0x04, 0x7F, 0x01, 0x80,
213 0x02, 0x7F, 0x00, 0xE5, 0x70, 0xB4, 0x01, 0x04,
214/*0540*/0x7E, 0x01, 0x80, 0x02, 0x7E, 0x00, 0xEF, 0x5E,
215 0x60, 0x03, 0x85, 0x6E, 0x47, 0x05, 0x6E, 0xE5,
216/*0550*/0x6E, 0x64, 0x7F, 0x70, 0xA3, 0xE5, 0x46, 0x60,
217 0x05, 0xE5, 0x47, 0xB4, 0x7E, 0x03, 0x85, 0x46,
218/*0560*/0x47, 0xE5, 0x6F, 0x70, 0x08, 0x85, 0x46, 0x76,
219 0x85, 0x47, 0x77, 0x80, 0x0E, 0xC3, 0x74, 0x7F,
220/*0570*/0x95, 0x46, 0xF5, 0x78, 0xC3, 0x74, 0x7F, 0x95,
221 0x47, 0xF5, 0x79, 0xE5, 0x6F, 0x70, 0x37, 0xE5,
222/*0580*/0x46, 0x65, 0x47, 0x70, 0x0C, 0x75, 0x73, 0x01,
223 0x75, 0x74, 0x01, 0xF5, 0x3C, 0xF5, 0x3D, 0x80,
224/*0590*/0x35, 0xE4, 0xF5, 0x4E, 0xC3, 0xE5, 0x47, 0x95,
225 0x46, 0xF5, 0x3C, 0xC3, 0x13, 0xF5, 0x71, 0x25,
226/*05A0*/0x46, 0xF5, 0x72, 0xC3, 0x94, 0x3F, 0x40, 0x05,
227 0xE4, 0xF5, 0x3D, 0x80, 0x40, 0xC3, 0x74, 0x3F,
228/*05B0*/0x95, 0x72, 0xF5, 0x3D, 0x80, 0x37, 0xE5, 0x46,
229 0x65, 0x47, 0x70, 0x0F, 0x75, 0x73, 0x01, 0x75,
230/*05C0*/0x75, 0x01, 0xF5, 0x3E, 0xF5, 0x3F, 0x75, 0x4E,
231 0x01, 0x80, 0x22, 0xE4, 0xF5, 0x4E, 0xC3, 0xE5,
232/*05D0*/0x47, 0x95, 0x46, 0xF5, 0x3E, 0xC3, 0x13, 0xF5,
233 0x71, 0x25, 0x46, 0xF5, 0x72, 0xD3, 0x94, 0x3F,
234/*05E0*/0x50, 0x05, 0xE4, 0xF5, 0x3F, 0x80, 0x06, 0xE5,
235 0x72, 0x24, 0xC1, 0xF5, 0x3F, 0x05, 0x6F, 0xE5,
236/*05F0*/0x6F, 0xC3, 0x94, 0x02, 0x50, 0x03, 0x02, 0x04,
237 0x6E, 0xE5, 0x6D, 0x45, 0x6C, 0x70, 0x02, 0x80,
238/*0600*/0x04, 0xE5, 0x74, 0x45, 0x75, 0x90, 0x07, 0x2F,
239 0xF0, 0x7F, 0x01, 0xE5, 0x3E, 0x60, 0x04, 0xE5,
240/*0610*/0x3C, 0x70, 0x14, 0xE4, 0xF5, 0x3C, 0xF5, 0x3D,
241 0xF5, 0x3E, 0xF5, 0x3F, 0x12, 0x08, 0xD2, 0x70,
242/*0620*/0x04, 0xF0, 0x02, 0x06, 0xA4, 0x80, 0x7A, 0xE5,
243 0x3C, 0xC3, 0x95, 0x3E, 0x40, 0x07, 0xE5, 0x3C,
244/*0630*/0x95, 0x3E, 0xFF, 0x80, 0x06, 0xC3, 0xE5, 0x3E,
245 0x95, 0x3C, 0xFF, 0xE5, 0x76, 0xD3, 0x95, 0x79,
246/*0640*/0x40, 0x05, 0x85, 0x76, 0x7A, 0x80, 0x03, 0x85,
247 0x79, 0x7A, 0xE5, 0x77, 0xC3, 0x95, 0x78, 0x50,
248/*0650*/0x05, 0x85, 0x77, 0x7B, 0x80, 0x03, 0x85, 0x78,
249 0x7B, 0xE5, 0x7B, 0xD3, 0x95, 0x7A, 0x40, 0x30,
250/*0660*/0xE5, 0x7B, 0x95, 0x7A, 0xF5, 0x3C, 0xF5, 0x3E,
251 0xC3, 0xE5, 0x7B, 0x95, 0x7A, 0x90, 0x07, 0x19,
252/*0670*/0xF0, 0xE5, 0x3C, 0xC3, 0x13, 0xF5, 0x71, 0x25,
253 0x7A, 0xF5, 0x72, 0xC3, 0x94, 0x3F, 0x40, 0x05,
254/*0680*/0xE4, 0xF5, 0x3D, 0x80, 0x1F, 0xC3, 0x74, 0x3F,
255 0x95, 0x72, 0xF5, 0x3D, 0xF5, 0x3F, 0x80, 0x14,
256/*0690*/0xE4, 0xF5, 0x3C, 0xF5, 0x3E, 0x90, 0x07, 0x19,
257 0xF0, 0x12, 0x08, 0xD2, 0x70, 0x03, 0xF0, 0x80,
258/*06A0*/0x03, 0x74, 0x01, 0xF0, 0x12, 0x08, 0x65, 0x75,
259 0x83, 0xD0, 0xE0, 0x54, 0x0F, 0xFE, 0xAD, 0x3C,
260/*06B0*/0x70, 0x02, 0x7E, 0x07, 0xBE, 0x0F, 0x02, 0x7E,
261 0x80, 0xEE, 0xFB, 0xEF, 0xD3, 0x9B, 0x74, 0x80,
262/*06C0*/0xF8, 0x98, 0x40, 0x1F, 0xE4, 0xF5, 0x3C, 0xF5,
263 0x3E, 0x12, 0x08, 0xD2, 0x70, 0x03, 0xF0, 0x80,
264/*06D0*/0x12, 0x74, 0x01, 0xF0, 0xE5, 0x08, 0xFB, 0xEB,
265 0x44, 0x07, 0xF5, 0x82, 0x75, 0x83, 0xD2, 0xE0,
266/*06E0*/0x44, 0x10, 0xF0, 0xE5, 0x08, 0xFB, 0xEB, 0x44,
267 0x09, 0xF5, 0x82, 0x75, 0x83, 0x9E, 0xED, 0xF0,
268/*06F0*/0xEB, 0x44, 0x07, 0xF5, 0x82, 0x75, 0x83, 0xCA,
269 0xED, 0xF0, 0x12, 0x08, 0x65, 0x75, 0x83, 0xCC,
270/*0700*/0xEF, 0xF0, 0x22, 0xE5, 0x08, 0x44, 0x07, 0xF5,
271 0x82, 0x75, 0x83, 0xBC, 0xE0, 0x54, 0xF0, 0xF0,
272/*0710*/0xE5, 0x08, 0x44, 0x07, 0xF5, 0x82, 0x75, 0x83,
273 0xBE, 0xE0, 0x54, 0xF0, 0xF0, 0xE5, 0x08, 0x44,
274/*0720*/0x07, 0xF5, 0x82, 0x75, 0x83, 0xC0, 0xE0, 0x54,
275 0xF0, 0xF0, 0xE5, 0x08, 0x44, 0x07, 0xF5, 0x82,
276/*0730*/0x22, 0xF0, 0x90, 0x07, 0x28, 0xE0, 0xFE, 0xA3,
277 0xE0, 0xF5, 0x82, 0x8E, 0x83, 0x22, 0x85, 0x42,
278/*0740*/0x42, 0x85, 0x41, 0x41, 0x85, 0x40, 0x40, 0x74,
279 0xC0, 0x2F, 0xF5, 0x82, 0x74, 0x02, 0x3E, 0xF5,
280/*0750*/0x83, 0xE5, 0x42, 0xF0, 0x74, 0xE0, 0x2F, 0xF5,
281 0x82, 0x74, 0x02, 0x3E, 0xF5, 0x83, 0x22, 0xE5,
282/*0760*/0x42, 0x29, 0xFD, 0xE4, 0x33, 0xFC, 0xE5, 0x3C,
283 0xC3, 0x9D, 0xEC, 0x64, 0x80, 0xF8, 0x74, 0x80,
284/*0770*/0x98, 0x22, 0xF5, 0x83, 0xE0, 0x90, 0x07, 0x22,
285 0x54, 0x1F, 0xFD, 0xE0, 0xFA, 0xA3, 0xE0, 0xF5,
286/*0780*/0x82, 0x8A, 0x83, 0xED, 0xF0, 0x22, 0x90, 0x07,
287 0x22, 0xE0, 0xFC, 0xA3, 0xE0, 0xF5, 0x82, 0x8C,
288/*0790*/0x83, 0x22, 0x90, 0x07, 0x24, 0xFF, 0xED, 0x44,
289 0x07, 0xCF, 0xF0, 0xA3, 0xEF, 0xF0, 0x22, 0x85,
290/*07A0*/0x38, 0x38, 0x85, 0x39, 0x39, 0x85, 0x3A, 0x3A,
291 0x74, 0xC0, 0x2F, 0xF5, 0x82, 0x74, 0x02, 0x3E,
292/*07B0*/0xF5, 0x83, 0x22, 0x90, 0x07, 0x26, 0xFF, 0xED,
293 0x44, 0x07, 0xCF, 0xF0, 0xA3, 0xEF, 0xF0, 0x22,
294/*07C0*/0xF0, 0x74, 0xA0, 0x2F, 0xF5, 0x82, 0x74, 0x02,
295 0x3E, 0xF5, 0x83, 0x22, 0x74, 0xC0, 0x25, 0x11,
296/*07D0*/0xF5, 0x82, 0xE4, 0x34, 0x01, 0xF5, 0x83, 0x22,
297 0x74, 0x00, 0x25, 0x11, 0xF5, 0x82, 0xE4, 0x34,
298/*07E0*/0x02, 0xF5, 0x83, 0x22, 0x74, 0x60, 0x25, 0x11,
299 0xF5, 0x82, 0xE4, 0x34, 0x03, 0xF5, 0x83, 0x22,
300/*07F0*/0x74, 0x80, 0x25, 0x11, 0xF5, 0x82, 0xE4, 0x34,
301 0x03, 0xF5, 0x83, 0x22, 0x74, 0xE0, 0x25, 0x11,
302/*0800*/0xF5, 0x82, 0xE4, 0x34, 0x03, 0xF5, 0x83, 0x22,
303 0x74, 0x40, 0x25, 0x11, 0xF5, 0x82, 0xE4, 0x34,
304/*0810*/0x06, 0xF5, 0x83, 0x22, 0x74, 0x80, 0x2F, 0xF5,
305 0x82, 0x74, 0x02, 0x3E, 0xF5, 0x83, 0x22, 0xAF,
306/*0820*/0x08, 0x7E, 0x00, 0xEF, 0x44, 0x07, 0xF5, 0x82,
307 0x22, 0xF5, 0x83, 0xE5, 0x82, 0x44, 0x07, 0xF5,
308/*0830*/0x82, 0xE5, 0x40, 0xF0, 0x22, 0x74, 0x40, 0x25,
309 0x11, 0xF5, 0x82, 0xE4, 0x34, 0x02, 0xF5, 0x83,
310/*0840*/0x22, 0x74, 0xC0, 0x25, 0x11, 0xF5, 0x82, 0xE4,
311 0x34, 0x03, 0xF5, 0x83, 0x22, 0x74, 0x00, 0x25,
312/*0850*/0x11, 0xF5, 0x82, 0xE4, 0x34, 0x06, 0xF5, 0x83,
313 0x22, 0x74, 0x20, 0x25, 0x11, 0xF5, 0x82, 0xE4,
314/*0860*/0x34, 0x06, 0xF5, 0x83, 0x22, 0xE5, 0x08, 0xFD,
315 0xED, 0x44, 0x07, 0xF5, 0x82, 0x22, 0xE5, 0x41,
316/*0870*/0xF0, 0xE5, 0x65, 0x64, 0x01, 0x45, 0x64, 0x22,
317 0x7E, 0x00, 0xFB, 0x7A, 0x00, 0xFD, 0x7C, 0x00,
318/*0880*/0x22, 0x74, 0x20, 0x25, 0x11, 0xF5, 0x82, 0xE4,
319 0x34, 0x02, 0x22, 0x74, 0xA0, 0x25, 0x11, 0xF5,
320/*0890*/0x82, 0xE4, 0x34, 0x03, 0x22, 0x85, 0x3E, 0x42,
321 0x85, 0x3F, 0x41, 0x8F, 0x40, 0x22, 0x85, 0x3C,
322/*08A0*/0x42, 0x85, 0x3D, 0x41, 0x8F, 0x40, 0x22, 0x75,
323 0x45, 0x3F, 0x90, 0x07, 0x20, 0xE4, 0xF0, 0xA3,
324/*08B0*/0x22, 0xF5, 0x83, 0xE5, 0x32, 0xF0, 0x05, 0x6E,
325 0xE5, 0x6E, 0xC3, 0x94, 0x40, 0x22, 0xF0, 0xE5,
326/*08C0*/0x08, 0x44, 0x06, 0xF5, 0x82, 0x22, 0x74, 0x00,
327 0x25, 0x6E, 0xF5, 0x82, 0xE4, 0x34, 0x00, 0xF5,
328/*08D0*/0x83, 0x22, 0xE5, 0x6D, 0x45, 0x6C, 0x90, 0x07,
329 0x2F, 0x22, 0xE4, 0xF9, 0xE5, 0x3C, 0xD3, 0x95,
330/*08E0*/0x3E, 0x22, 0x74, 0x80, 0x2E, 0xF5, 0x82, 0xE4,
331 0x34, 0x02, 0xF5, 0x83, 0xE0, 0x22, 0x74, 0xA0,
332/*08F0*/0x2E, 0xF5, 0x82, 0xE4, 0x34, 0x02, 0xF5, 0x83,
333 0xE0, 0x22, 0x74, 0x80, 0x25, 0x6E, 0xF5, 0x82,
334/*0900*/0xE4, 0x34, 0x00, 0x22, 0x25, 0x42, 0xFD, 0xE4,
335 0x33, 0xFC, 0x22, 0x85, 0x42, 0x42, 0x85, 0x41,
336/*0910*/0x41, 0x85, 0x40, 0x40, 0x22, 0xED, 0x4C, 0x60,
337 0x03, 0x02, 0x09, 0xE5, 0xEF, 0x4E, 0x70, 0x37,
338/*0920*/0x90, 0x07, 0x26, 0x12, 0x07, 0x89, 0xE0, 0xFD,
339 0x12, 0x07, 0xCC, 0xED, 0xF0, 0x90, 0x07, 0x28,
340/*0930*/0x12, 0x07, 0x89, 0xE0, 0xFD, 0x12, 0x07, 0xD8,
341 0xED, 0xF0, 0x12, 0x07, 0x86, 0xE0, 0x54, 0x1F,
342/*0940*/0xFD, 0x12, 0x08, 0x81, 0xF5, 0x83, 0xED, 0xF0,
343 0x90, 0x07, 0x24, 0x12, 0x07, 0x89, 0xE0, 0x54,
344/*0950*/0x1F, 0xFD, 0x12, 0x08, 0x35, 0xED, 0xF0, 0xEF,
345 0x64, 0x04, 0x4E, 0x70, 0x37, 0x90, 0x07, 0x26,
346/*0960*/0x12, 0x07, 0x89, 0xE0, 0xFD, 0x12, 0x07, 0xE4,
347 0xED, 0xF0, 0x90, 0x07, 0x28, 0x12, 0x07, 0x89,
348/*0970*/0xE0, 0xFD, 0x12, 0x07, 0xF0, 0xED, 0xF0, 0x12,
349 0x07, 0x86, 0xE0, 0x54, 0x1F, 0xFD, 0x12, 0x08,
350/*0980*/0x8B, 0xF5, 0x83, 0xED, 0xF0, 0x90, 0x07, 0x24,
351 0x12, 0x07, 0x89, 0xE0, 0x54, 0x1F, 0xFD, 0x12,
352/*0990*/0x08, 0x41, 0xED, 0xF0, 0xEF, 0x64, 0x01, 0x4E,
353 0x70, 0x04, 0x7D, 0x01, 0x80, 0x02, 0x7D, 0x00,
354/*09A0*/0xEF, 0x64, 0x02, 0x4E, 0x70, 0x04, 0x7F, 0x01,
355 0x80, 0x02, 0x7F, 0x00, 0xEF, 0x4D, 0x60, 0x78,
356/*09B0*/0x90, 0x07, 0x26, 0x12, 0x07, 0x35, 0xE0, 0xFF,
357 0x12, 0x07, 0xFC, 0xEF, 0x12, 0x07, 0x31, 0xE0,
358/*09C0*/0xFF, 0x12, 0x08, 0x08, 0xEF, 0xF0, 0x90, 0x07,
359 0x22, 0x12, 0x07, 0x35, 0xE0, 0x54, 0x1F, 0xFF,
360/*09D0*/0x12, 0x08, 0x4D, 0xEF, 0xF0, 0x90, 0x07, 0x24,
361 0x12, 0x07, 0x35, 0xE0, 0x54, 0x1F, 0xFF, 0x12,
362/*09E0*/0x08, 0x59, 0xEF, 0xF0, 0x22, 0x12, 0x07, 0xCC,
363 0xE4, 0xF0, 0x12, 0x07, 0xD8, 0xE4, 0xF0, 0x12,
364/*09F0*/0x08, 0x81, 0xF5, 0x83, 0xE4, 0xF0, 0x12, 0x08,
365 0x35, 0x74, 0x14, 0xF0, 0x12, 0x07, 0xE4, 0xE4,
366/*0A00*/0xF0, 0x12, 0x07, 0xF0, 0xE4, 0xF0, 0x12, 0x08,
367 0x8B, 0xF5, 0x83, 0xE4, 0xF0, 0x12, 0x08, 0x41,
368/*0A10*/0x74, 0x14, 0xF0, 0x12, 0x07, 0xFC, 0xE4, 0xF0,
369 0x12, 0x08, 0x08, 0xE4, 0xF0, 0x12, 0x08, 0x4D,
370/*0A20*/0xE4, 0xF0, 0x12, 0x08, 0x59, 0x74, 0x14, 0xF0,
371 0x22, 0x53, 0xF9, 0xF7, 0x75, 0xFC, 0x10, 0xE4,
372/*0A30*/0xF5, 0xFD, 0x75, 0xFE, 0x30, 0xF5, 0xFF, 0xE5,
373 0xE7, 0x20, 0xE7, 0x03, 0x43, 0xF9, 0x08, 0xE5,
374/*0A40*/0xE6, 0x20, 0xE7, 0x0B, 0x78, 0xFF, 0xE4, 0xF6,
375 0xD8, 0xFD, 0x53, 0xE6, 0xFE, 0x80, 0x09, 0x78,
376/*0A50*/0x08, 0xE4, 0xF6, 0xD8, 0xFD, 0x53, 0xE6, 0xFE,
377 0x75, 0x81, 0x80, 0xE4, 0xF5, 0xA8, 0xD2, 0xA8,
378/*0A60*/0xC2, 0xA9, 0xD2, 0xAF, 0xE5, 0xE2, 0x20, 0xE5,
379 0x05, 0x20, 0xE6, 0x02, 0x80, 0x03, 0x43, 0xE1,
380/*0A70*/0x02, 0xE5, 0xE2, 0x20, 0xE0, 0x0E, 0x90, 0x00,
381 0x00, 0x7F, 0x00, 0x7E, 0x08, 0xE4, 0xF0, 0xA3,
382/*0A80*/0xDF, 0xFC, 0xDE, 0xFA, 0x02, 0x0A, 0xDB, 0x43,
383 0xFA, 0x01, 0xC0, 0xE0, 0xC0, 0xF0, 0xC0, 0x83,
384/*0A90*/0xC0, 0x82, 0xC0, 0xD0, 0x12, 0x1C, 0xE7, 0xD0,
385 0xD0, 0xD0, 0x82, 0xD0, 0x83, 0xD0, 0xF0, 0xD0,
386/*0AA0*/0xE0, 0x53, 0xFA, 0xFE, 0x32, 0x02, 0x1B, 0x55,
387 0xE4, 0x93, 0xA3, 0xF8, 0xE4, 0x93, 0xA3, 0xF6,
388/*0AB0*/0x08, 0xDF, 0xF9, 0x80, 0x29, 0xE4, 0x93, 0xA3,
389 0xF8, 0x54, 0x07, 0x24, 0x0C, 0xC8, 0xC3, 0x33,
390/*0AC0*/0xC4, 0x54, 0x0F, 0x44, 0x20, 0xC8, 0x83, 0x40,
391 0x04, 0xF4, 0x56, 0x80, 0x01, 0x46, 0xF6, 0xDF,
392/*0AD0*/0xE4, 0x80, 0x0B, 0x01, 0x02, 0x04, 0x08, 0x10,
393 0x20, 0x40, 0x80, 0x90, 0x00, 0x3F, 0xE4, 0x7E,
394/*0AE0*/0x01, 0x93, 0x60, 0xC1, 0xA3, 0xFF, 0x54, 0x3F,
395 0x30, 0xE5, 0x09, 0x54, 0x1F, 0xFE, 0xE4, 0x93,
396/*0AF0*/0xA3, 0x60, 0x01, 0x0E, 0xCF, 0x54, 0xC0, 0x25,
397 0xE0, 0x60, 0xAD, 0x40, 0xB8, 0x80, 0xFE, 0x8C,
398/*0B00*/0x64, 0x8D, 0x65, 0x8A, 0x66, 0x8B, 0x67, 0xE4,
399 0xF5, 0x69, 0xEF, 0x4E, 0x70, 0x03, 0x02, 0x1D,
400/*0B10*/0x55, 0xE4, 0xF5, 0x68, 0xE5, 0x67, 0x45, 0x66,
401 0x70, 0x32, 0x12, 0x07, 0x2A, 0x75, 0x83, 0x90,
402/*0B20*/0xE4, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC2, 0xE4,
403 0x12, 0x07, 0x29, 0x75, 0x83, 0xC4, 0xE4, 0x12,
404/*0B30*/0x08, 0x70, 0x70, 0x29, 0x12, 0x07, 0x2A, 0x75,
405 0x83, 0x92, 0xE4, 0x12, 0x07, 0x29, 0x75, 0x83,
406/*0B40*/0xC6, 0xE4, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC8,
407 0xE4, 0xF0, 0x80, 0x11, 0x90, 0x07, 0x26, 0x12,
408/*0B50*/0x07, 0x35, 0xE4, 0x12, 0x08, 0x70, 0x70, 0x05,
409 0x12, 0x07, 0x32, 0xE4, 0xF0, 0x12, 0x1D, 0x55,
410/*0B60*/0x12, 0x1E, 0xBF, 0xE5, 0x67, 0x45, 0x66, 0x70,
411 0x33, 0x12, 0x07, 0x2A, 0x75, 0x83, 0x90, 0xE5,
412/*0B70*/0x41, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC2, 0xE5,
413 0x41, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC4, 0x12,
414/*0B80*/0x08, 0x6E, 0x70, 0x29, 0x12, 0x07, 0x2A, 0x75,
415 0x83, 0x92, 0xE5, 0x40, 0x12, 0x07, 0x29, 0x75,
416/*0B90*/0x83, 0xC6, 0xE5, 0x40, 0x12, 0x07, 0x29, 0x75,
417 0x83, 0xC8, 0x80, 0x0E, 0x90, 0x07, 0x26, 0x12,
418/*0BA0*/0x07, 0x35, 0x12, 0x08, 0x6E, 0x70, 0x06, 0x12,
419 0x07, 0x32, 0xE5, 0x40, 0xF0, 0xAF, 0x69, 0x7E,
420/*0BB0*/0x00, 0xAD, 0x67, 0xAC, 0x66, 0x12, 0x04, 0x44,
421 0x12, 0x07, 0x2A, 0x75, 0x83, 0xCA, 0xE0, 0xD3,
422/*0BC0*/0x94, 0x00, 0x50, 0x0C, 0x05, 0x68, 0xE5, 0x68,
423 0xC3, 0x94, 0x05, 0x50, 0x03, 0x02, 0x0B, 0x14,
424/*0BD0*/0x22, 0x8C, 0x60, 0x8D, 0x61, 0x12, 0x08, 0xDA,
425 0x74, 0x20, 0x40, 0x0D, 0x2F, 0xF5, 0x82, 0x74,
426/*0BE0*/0x03, 0x3E, 0xF5, 0x83, 0xE5, 0x3E, 0xF0, 0x80,
427 0x0B, 0x2F, 0xF5, 0x82, 0x74, 0x03, 0x3E, 0xF5,
428/*0BF0*/0x83, 0xE5, 0x3C, 0xF0, 0xE5, 0x3C, 0xD3, 0x95,
429 0x3E, 0x40, 0x3C, 0xE5, 0x61, 0x45, 0x60, 0x70,
430/*0C00*/0x10, 0xE9, 0x12, 0x09, 0x04, 0xE5, 0x3E, 0x12,
431 0x07, 0x68, 0x40, 0x3B, 0x12, 0x08, 0x95, 0x80,
432/*0C10*/0x18, 0xE5, 0x3E, 0xC3, 0x95, 0x38, 0x40, 0x1D,
433 0x85, 0x3E, 0x38, 0xE5, 0x3E, 0x60, 0x05, 0x85,
434/*0C20*/0x3F, 0x39, 0x80, 0x03, 0x85, 0x39, 0x39, 0x8F,
435 0x3A, 0x12, 0x08, 0x14, 0xE5, 0x3E, 0x12, 0x07,
436/*0C30*/0xC0, 0xE5, 0x3F, 0xF0, 0x22, 0x80, 0x43, 0xE5,
437 0x61, 0x45, 0x60, 0x70, 0x19, 0x12, 0x07, 0x5F,
438/*0C40*/0x40, 0x05, 0x12, 0x08, 0x9E, 0x80, 0x27, 0x12,
439 0x09, 0x0B, 0x12, 0x08, 0x14, 0xE5, 0x42, 0x12,
440/*0C50*/0x07, 0xC0, 0xE5, 0x41, 0xF0, 0x22, 0xE5, 0x3C,
441 0xC3, 0x95, 0x38, 0x40, 0x1D, 0x85, 0x3C, 0x38,
442/*0C60*/0xE5, 0x3C, 0x60, 0x05, 0x85, 0x3D, 0x39, 0x80,
443 0x03, 0x85, 0x39, 0x39, 0x8F, 0x3A, 0x12, 0x08,
444/*0C70*/0x14, 0xE5, 0x3C, 0x12, 0x07, 0xC0, 0xE5, 0x3D,
445 0xF0, 0x22, 0x85, 0x38, 0x38, 0x85, 0x39, 0x39,
446/*0C80*/0x85, 0x3A, 0x3A, 0x12, 0x08, 0x14, 0xE5, 0x38,
447 0x12, 0x07, 0xC0, 0xE5, 0x39, 0xF0, 0x22, 0x7F,
448/*0C90*/0x06, 0x12, 0x17, 0x31, 0x12, 0x1D, 0x23, 0x12,
449 0x0E, 0x04, 0x12, 0x0E, 0x33, 0xE0, 0x44, 0x0A,
450/*0CA0*/0xF0, 0x74, 0x8E, 0xFE, 0x12, 0x0E, 0x04, 0x12,
451 0x0E, 0x0B, 0xEF, 0xF0, 0xE5, 0x28, 0x30, 0xE5,
452/*0CB0*/0x03, 0xD3, 0x80, 0x01, 0xC3, 0x40, 0x05, 0x75,
453 0x14, 0x20, 0x80, 0x03, 0x75, 0x14, 0x08, 0x12,
454/*0CC0*/0x0E, 0x04, 0x75, 0x83, 0x8A, 0xE5, 0x14, 0xF0,
455 0xB4, 0xFF, 0x05, 0x75, 0x12, 0x80, 0x80, 0x06,
456/*0CD0*/0xE5, 0x14, 0xC3, 0x13, 0xF5, 0x12, 0xE4, 0xF5,
457 0x16, 0xF5, 0x7F, 0x12, 0x19, 0x36, 0x12, 0x13,
458/*0CE0*/0xA3, 0xE5, 0x0A, 0xC3, 0x94, 0x01, 0x50, 0x09,
459 0x05, 0x16, 0xE5, 0x16, 0xC3, 0x94, 0x14, 0x40,
460/*0CF0*/0xEA, 0xE5, 0xE4, 0x20, 0xE7, 0x28, 0x12, 0x0E,
461 0x04, 0x75, 0x83, 0xD2, 0xE0, 0x54, 0x08, 0xD3,
462/*0D00*/0x94, 0x00, 0x40, 0x04, 0x7F, 0x01, 0x80, 0x02,
463 0x7F, 0x00, 0xE5, 0x0A, 0xC3, 0x94, 0x01, 0x40,
464/*0D10*/0x04, 0x7E, 0x01, 0x80, 0x02, 0x7E, 0x00, 0xEF,
465 0x5E, 0x60, 0x03, 0x12, 0x1D, 0xD7, 0xE5, 0x7F,
466/*0D20*/0xC3, 0x94, 0x11, 0x40, 0x14, 0x12, 0x0E, 0x04,
467 0x75, 0x83, 0xD2, 0xE0, 0x44, 0x80, 0xF0, 0xE5,
468/*0D30*/0xE4, 0x20, 0xE7, 0x0F, 0x12, 0x1D, 0xD7, 0x80,
469 0x0A, 0x12, 0x0E, 0x04, 0x75, 0x83, 0xD2, 0xE0,
470/*0D40*/0x54, 0x7F, 0xF0, 0x12, 0x1D, 0x23, 0x22, 0x74,
471 0x8A, 0x85, 0x08, 0x82, 0xF5, 0x83, 0xE5, 0x17,
472/*0D50*/0xF0, 0x12, 0x0E, 0x3A, 0xE4, 0xF0, 0x90, 0x07,
473 0x02, 0xE0, 0x12, 0x0E, 0x17, 0x75, 0x83, 0x90,
474/*0D60*/0xEF, 0xF0, 0x74, 0x92, 0xFE, 0xE5, 0x08, 0x44,
475 0x07, 0xFF, 0xF5, 0x82, 0x8E, 0x83, 0xE0, 0x54,
476/*0D70*/0xC0, 0xFD, 0x90, 0x07, 0x03, 0xE0, 0x54, 0x3F,
477 0x4D, 0x8F, 0x82, 0x8E, 0x83, 0xF0, 0x90, 0x07,
478/*0D80*/0x04, 0xE0, 0x12, 0x0E, 0x17, 0x75, 0x83, 0x82,
479 0xEF, 0xF0, 0x90, 0x07, 0x05, 0xE0, 0xFF, 0xED,
480/*0D90*/0x44, 0x07, 0xF5, 0x82, 0x75, 0x83, 0xB4, 0xEF,
481 0x12, 0x0E, 0x03, 0x75, 0x83, 0x80, 0xE0, 0x54,
482/*0DA0*/0xBF, 0xF0, 0x30, 0x37, 0x0A, 0x12, 0x0E, 0x91,
483 0x75, 0x83, 0x94, 0xE0, 0x44, 0x80, 0xF0, 0x30,
484/*0DB0*/0x38, 0x0A, 0x12, 0x0E, 0x91, 0x75, 0x83, 0x92,
485 0xE0, 0x44, 0x80, 0xF0, 0xE5, 0x28, 0x30, 0xE4,
486/*0DC0*/0x1A, 0x20, 0x39, 0x0A, 0x12, 0x0E, 0x04, 0x75,
487 0x83, 0x88, 0xE0, 0x54, 0x7F, 0xF0, 0x20, 0x3A,
488/*0DD0*/0x0A, 0x12, 0x0E, 0x04, 0x75, 0x83, 0x88, 0xE0,
489 0x54, 0xBF, 0xF0, 0x74, 0x8C, 0xFE, 0x12, 0x0E,
490/*0DE0*/0x04, 0x8E, 0x83, 0xE0, 0x54, 0x0F, 0x12, 0x0E,
491 0x03, 0x75, 0x83, 0x86, 0xE0, 0x54, 0xBF, 0xF0,
492/*0DF0*/0xE5, 0x08, 0x44, 0x06, 0x12, 0x0D, 0xFD, 0x75,
493 0x83, 0x8A, 0xE4, 0xF0, 0x22, 0xF5, 0x82, 0x75,
494/*0E00*/0x83, 0x82, 0xE4, 0xF0, 0xE5, 0x08, 0x44, 0x07,
495 0xF5, 0x82, 0x22, 0x8E, 0x83, 0xE0, 0xF5, 0x10,
496/*0E10*/0x54, 0xFE, 0xF0, 0xE5, 0x10, 0x44, 0x01, 0xFF,
497 0xE5, 0x08, 0xFD, 0xED, 0x44, 0x07, 0xF5, 0x82,
498/*0E20*/0x22, 0xE5, 0x15, 0xC4, 0x54, 0x07, 0xFF, 0xE5,
499 0x08, 0xFD, 0xED, 0x44, 0x08, 0xF5, 0x82, 0x75,
500/*0E30*/0x83, 0x82, 0x22, 0x75, 0x83, 0x80, 0xE0, 0x44,
501 0x40, 0xF0, 0xE5, 0x08, 0x44, 0x08, 0xF5, 0x82,
502/*0E40*/0x75, 0x83, 0x8A, 0x22, 0xE5, 0x16, 0x25, 0xE0,
503 0x25, 0xE0, 0x24, 0xAF, 0xF5, 0x82, 0xE4, 0x34,
504/*0E50*/0x1A, 0xF5, 0x83, 0xE4, 0x93, 0xF5, 0x0D, 0x22,
505 0x43, 0xE1, 0x10, 0x43, 0xE1, 0x80, 0x53, 0xE1,
506/*0E60*/0xFD, 0x85, 0xE1, 0x10, 0x22, 0xE5, 0x16, 0x25,
507 0xE0, 0x25, 0xE0, 0x24, 0xB2, 0xF5, 0x82, 0xE4,
508/*0E70*/0x34, 0x1A, 0xF5, 0x83, 0xE4, 0x93, 0x22, 0x85,
509 0x55, 0x82, 0x85, 0x54, 0x83, 0xE5, 0x15, 0xF0,
510/*0E80*/0x22, 0xE5, 0xE2, 0x54, 0x20, 0xD3, 0x94, 0x00,
511 0x22, 0xE5, 0xE2, 0x54, 0x40, 0xD3, 0x94, 0x00,
512/*0E90*/0x22, 0xE5, 0x08, 0x44, 0x06, 0xF5, 0x82, 0x22,
513 0xFD, 0xE5, 0x08, 0xFB, 0xEB, 0x44, 0x07, 0xF5,
514/*0EA0*/0x82, 0x22, 0x53, 0xF9, 0xF7, 0x75, 0xFE, 0x30,
515 0x22, 0xEF, 0x4E, 0x70, 0x26, 0x12, 0x07, 0xCC,
516/*0EB0*/0xE0, 0xFD, 0x90, 0x07, 0x26, 0x12, 0x07, 0x7B,
517 0x12, 0x07, 0xD8, 0xE0, 0xFD, 0x90, 0x07, 0x28,
518/*0EC0*/0x12, 0x07, 0x7B, 0x12, 0x08, 0x81, 0x12, 0x07,
519 0x72, 0x12, 0x08, 0x35, 0xE0, 0x90, 0x07, 0x24,
520/*0ED0*/0x12, 0x07, 0x78, 0xEF, 0x64, 0x04, 0x4E, 0x70,
521 0x29, 0x12, 0x07, 0xE4, 0xE0, 0xFD, 0x90, 0x07,
522/*0EE0*/0x26, 0x12, 0x07, 0x7B, 0x12, 0x07, 0xF0, 0xE0,
523 0xFD, 0x90, 0x07, 0x28, 0x12, 0x07, 0x7B, 0x12,
524/*0EF0*/0x08, 0x8B, 0x12, 0x07, 0x72, 0x12, 0x08, 0x41,
525 0xE0, 0x54, 0x1F, 0xFD, 0x90, 0x07, 0x24, 0x12,
526/*0F00*/0x07, 0x7B, 0xEF, 0x64, 0x01, 0x4E, 0x70, 0x04,
527 0x7D, 0x01, 0x80, 0x02, 0x7D, 0x00, 0xEF, 0x64,
528/*0F10*/0x02, 0x4E, 0x70, 0x04, 0x7F, 0x01, 0x80, 0x02,
529 0x7F, 0x00, 0xEF, 0x4D, 0x60, 0x35, 0x12, 0x07,
530/*0F20*/0xFC, 0xE0, 0xFF, 0x90, 0x07, 0x26, 0x12, 0x07,
531 0x89, 0xEF, 0xF0, 0x12, 0x08, 0x08, 0xE0, 0xFF,
532/*0F30*/0x90, 0x07, 0x28, 0x12, 0x07, 0x89, 0xEF, 0xF0,
533 0x12, 0x08, 0x4D, 0xE0, 0x54, 0x1F, 0xFF, 0x12,
534/*0F40*/0x07, 0x86, 0xEF, 0xF0, 0x12, 0x08, 0x59, 0xE0,
535 0x54, 0x1F, 0xFF, 0x90, 0x07, 0x24, 0x12, 0x07,
536/*0F50*/0x89, 0xEF, 0xF0, 0x22, 0xE4, 0xF5, 0x53, 0x12,
537 0x0E, 0x81, 0x40, 0x04, 0x7F, 0x01, 0x80, 0x02,
538/*0F60*/0x7F, 0x00, 0x12, 0x0E, 0x89, 0x40, 0x04, 0x7E,
539 0x01, 0x80, 0x02, 0x7E, 0x00, 0xEE, 0x4F, 0x70,
540/*0F70*/0x03, 0x02, 0x0F, 0xF6, 0x85, 0xE1, 0x10, 0x43,
541 0xE1, 0x02, 0x53, 0xE1, 0x0F, 0x85, 0xE1, 0x10,
542/*0F80*/0xE4, 0xF5, 0x51, 0xE5, 0xE3, 0x54, 0x3F, 0xF5,
543 0x52, 0x12, 0x0E, 0x89, 0x40, 0x1D, 0xAD, 0x52,
544/*0F90*/0xAF, 0x51, 0x12, 0x11, 0x18, 0xEF, 0x60, 0x08,
545 0x85, 0xE1, 0x10, 0x43, 0xE1, 0x40, 0x80, 0x0B,
546/*0FA0*/0x53, 0xE1, 0xBF, 0x12, 0x0E, 0x58, 0x12, 0x00,
547 0x06, 0x80, 0xFB, 0xE5, 0xE3, 0x54, 0x3F, 0xF5,
548/*0FB0*/0x51, 0xE5, 0xE4, 0x54, 0x3F, 0xF5, 0x52, 0x12,
549 0x0E, 0x81, 0x40, 0x1D, 0xAD, 0x52, 0xAF, 0x51,
550/*0FC0*/0x12, 0x11, 0x18, 0xEF, 0x60, 0x08, 0x85, 0xE1,
551 0x10, 0x43, 0xE1, 0x20, 0x80, 0x0B, 0x53, 0xE1,
552/*0FD0*/0xDF, 0x12, 0x0E, 0x58, 0x12, 0x00, 0x06, 0x80,
553 0xFB, 0x12, 0x0E, 0x81, 0x40, 0x04, 0x7F, 0x01,
554/*0FE0*/0x80, 0x02, 0x7F, 0x00, 0x12, 0x0E, 0x89, 0x40,
555 0x04, 0x7E, 0x01, 0x80, 0x02, 0x7E, 0x00, 0xEE,
556/*0FF0*/0x4F, 0x60, 0x03, 0x12, 0x0E, 0x5B, 0x22, 0x12,
557 0x0E, 0x21, 0xEF, 0xF0, 0x12, 0x10, 0x91, 0x22,
558/*1000*/0x02, 0x11, 0x00, 0x02, 0x10, 0x40, 0x02, 0x10,
559 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
560/*1010*/0x01, 0x20, 0x01, 0x20, 0xE4, 0xF5, 0x57, 0x12,
561 0x16, 0xBD, 0x12, 0x16, 0x44, 0xE4, 0x12, 0x10,
562/*1020*/0x56, 0x12, 0x14, 0xB7, 0x90, 0x07, 0x26, 0x12,
563 0x07, 0x35, 0xE4, 0x12, 0x07, 0x31, 0xE4, 0xF0,
564/*1030*/0x12, 0x10, 0x56, 0x12, 0x14, 0xB7, 0x90, 0x07,
565 0x26, 0x12, 0x07, 0x35, 0xE5, 0x41, 0x12, 0x07,
566/*1040*/0x31, 0xE5, 0x40, 0xF0, 0xAF, 0x57, 0x7E, 0x00,
567 0xAD, 0x56, 0x7C, 0x00, 0x12, 0x04, 0x44, 0xAF,
568/*1050*/0x56, 0x7E, 0x00, 0x02, 0x11, 0xEE, 0xFF, 0x90,
569 0x07, 0x20, 0xA3, 0xE0, 0xFD, 0xE4, 0xF5, 0x56,
570/*1060*/0xF5, 0x40, 0xFE, 0xFC, 0xAB, 0x56, 0xFA, 0x12,
571 0x11, 0x51, 0x7F, 0x0F, 0x7D, 0x18, 0xE4, 0xF5,
572/*1070*/0x56, 0xF5, 0x40, 0xFE, 0xFC, 0xAB, 0x56, 0xFA,
573 0x12, 0x15, 0x41, 0xAF, 0x56, 0x7E, 0x00, 0x12,
574/*1080*/0x1A, 0xFF, 0xE4, 0xFF, 0xF5, 0x56, 0x7D, 0x1F,
575 0xF5, 0x40, 0xFE, 0xFC, 0xAB, 0x56, 0xFA, 0x22,
576/*1090*/0x22, 0xE4, 0xF5, 0x55, 0xE5, 0x08, 0xFD, 0x74,
577 0xA0, 0xF5, 0x56, 0xED, 0x44, 0x07, 0xF5, 0x57,
578/*10A0*/0xE5, 0x28, 0x30, 0xE5, 0x03, 0xD3, 0x80, 0x01,
579 0xC3, 0x40, 0x05, 0x7F, 0x28, 0xEF, 0x80, 0x04,
580/*10B0*/0x7F, 0x14, 0xEF, 0xC3, 0x13, 0xF5, 0x54, 0xE4,
581 0xF9, 0x12, 0x0E, 0x18, 0x75, 0x83, 0x8E, 0xE0,
582/*10C0*/0xF5, 0x10, 0xCE, 0xEF, 0xCE, 0xEE, 0xD3, 0x94,
583 0x00, 0x40, 0x26, 0xE5, 0x10, 0x54, 0xFE, 0x12,
584/*10D0*/0x0E, 0x98, 0x75, 0x83, 0x8E, 0xED, 0xF0, 0xE5,
585 0x10, 0x44, 0x01, 0xFD, 0xEB, 0x44, 0x07, 0xF5,
586/*10E0*/0x82, 0xED, 0xF0, 0x85, 0x57, 0x82, 0x85, 0x56,
587 0x83, 0xE0, 0x30, 0xE3, 0x01, 0x09, 0x1E, 0x80,
588/*10F0*/0xD4, 0xC2, 0x34, 0xE9, 0xC3, 0x95, 0x54, 0x40,
589 0x02, 0xD2, 0x34, 0x22, 0x02, 0x00, 0x06, 0x22,
590/*1100*/0x30, 0x30, 0x11, 0x90, 0x10, 0x00, 0xE4, 0x93,
591 0xF5, 0x10, 0x90, 0x10, 0x10, 0xE4, 0x93, 0xF5,
592/*1110*/0x10, 0x12, 0x10, 0x90, 0x12, 0x11, 0x50, 0x22,
593 0xE4, 0xFC, 0xC3, 0xED, 0x9F, 0xFA, 0xEF, 0xF5,
594/*1120*/0x83, 0x75, 0x82, 0x00, 0x79, 0xFF, 0xE4, 0x93,
595 0xCC, 0x6C, 0xCC, 0xA3, 0xD9, 0xF8, 0xDA, 0xF6,
596/*1130*/0xE5, 0xE2, 0x30, 0xE4, 0x02, 0x8C, 0xE5, 0xED,
597 0x24, 0xFF, 0xFF, 0xEF, 0x75, 0x82, 0xFF, 0xF5,
598/*1140*/0x83, 0xE4, 0x93, 0x6C, 0x70, 0x03, 0x7F, 0x01,
599 0x22, 0x7F, 0x00, 0x22, 0x22, 0x11, 0x00, 0x00,
600/*1150*/0x22, 0x8E, 0x58, 0x8F, 0x59, 0x8C, 0x5A, 0x8D,
601 0x5B, 0x8A, 0x5C, 0x8B, 0x5D, 0x75, 0x5E, 0x01,
602/*1160*/0xE4, 0xF5, 0x5F, 0xF5, 0x60, 0xF5, 0x62, 0x12,
603 0x07, 0x2A, 0x75, 0x83, 0xD0, 0xE0, 0xFF, 0xC4,
604/*1170*/0x54, 0x0F, 0xF5, 0x61, 0x12, 0x1E, 0xA5, 0x85,
605 0x59, 0x5E, 0xD3, 0xE5, 0x5E, 0x95, 0x5B, 0xE5,
606/*1180*/0x5A, 0x12, 0x07, 0x6B, 0x50, 0x4B, 0x12, 0x07,
607 0x03, 0x75, 0x83, 0xBC, 0xE0, 0x45, 0x5E, 0x12,
608/*1190*/0x07, 0x29, 0x75, 0x83, 0xBE, 0xE0, 0x45, 0x5E,
609 0x12, 0x07, 0x29, 0x75, 0x83, 0xC0, 0xE0, 0x45,
610/*11A0*/0x5E, 0xF0, 0xAF, 0x5F, 0xE5, 0x60, 0x12, 0x08,
611 0x78, 0x12, 0x0A, 0xFF, 0xAF, 0x62, 0x7E, 0x00,
612/*11B0*/0xAD, 0x5D, 0xAC, 0x5C, 0x12, 0x04, 0x44, 0xE5,
613 0x61, 0xAF, 0x5E, 0x7E, 0x00, 0xB4, 0x03, 0x05,
614/*11C0*/0x12, 0x1E, 0x21, 0x80, 0x07, 0xAD, 0x5D, 0xAC,
615 0x5C, 0x12, 0x13, 0x17, 0x05, 0x5E, 0x02, 0x11,
616/*11D0*/0x7A, 0x12, 0x07, 0x03, 0x75, 0x83, 0xBC, 0xE0,
617 0x45, 0x40, 0x12, 0x07, 0x29, 0x75, 0x83, 0xBE,
618/*11E0*/0xE0, 0x45, 0x40, 0x12, 0x07, 0x29, 0x75, 0x83,
619 0xC0, 0xE0, 0x45, 0x40, 0xF0, 0x22, 0x8E, 0x58,
620/*11F0*/0x8F, 0x59, 0x75, 0x5A, 0x01, 0x79, 0x01, 0x75,
621 0x5B, 0x01, 0xE4, 0xFB, 0x12, 0x07, 0x2A, 0x75,
622/*1200*/0x83, 0xAE, 0xE0, 0x54, 0x1A, 0xFF, 0x12, 0x08,
623 0x65, 0xE0, 0xC4, 0x13, 0x54, 0x07, 0xFE, 0xEF,
624/*1210*/0x70, 0x0C, 0xEE, 0x65, 0x35, 0x70, 0x07, 0x90,
625 0x07, 0x2F, 0xE0, 0xB4, 0x01, 0x0D, 0xAF, 0x35,
626/*1220*/0x7E, 0x00, 0x12, 0x0E, 0xA9, 0xCF, 0xEB, 0xCF,
627 0x02, 0x1E, 0x60, 0xE5, 0x59, 0x64, 0x02, 0x45,
628/*1230*/0x58, 0x70, 0x04, 0x7F, 0x01, 0x80, 0x02, 0x7F,
629 0x00, 0xE5, 0x59, 0x45, 0x58, 0x70, 0x04, 0x7E,
630/*1240*/0x01, 0x80, 0x02, 0x7E, 0x00, 0xEE, 0x4F, 0x60,
631 0x23, 0x85, 0x41, 0x49, 0x85, 0x40, 0x4B, 0xE5,
632/*1250*/0x59, 0x45, 0x58, 0x70, 0x2C, 0xAF, 0x5A, 0xFE,
633 0xCD, 0xE9, 0xCD, 0xFC, 0xAB, 0x59, 0xAA, 0x58,
634/*1260*/0x12, 0x0A, 0xFF, 0xAF, 0x5B, 0x7E, 0x00, 0x12,
635 0x1E, 0x60, 0x80, 0x15, 0xAF, 0x5B, 0x7E, 0x00,
636/*1270*/0x12, 0x1E, 0x60, 0x90, 0x07, 0x26, 0x12, 0x07,
637 0x35, 0xE5, 0x49, 0x12, 0x07, 0x31, 0xE5, 0x4B,
638/*1280*/0xF0, 0xE4, 0xFD, 0xAF, 0x35, 0xFE, 0xFC, 0x12,
639 0x09, 0x15, 0x22, 0x8C, 0x64, 0x8D, 0x65, 0x12,
640/*1290*/0x08, 0xDA, 0x40, 0x3C, 0xE5, 0x65, 0x45, 0x64,
641 0x70, 0x10, 0x12, 0x09, 0x04, 0xC3, 0xE5, 0x3E,
642/*12A0*/0x12, 0x07, 0x69, 0x40, 0x3B, 0x12, 0x08, 0x95,
643 0x80, 0x18, 0xE5, 0x3E, 0xC3, 0x95, 0x38, 0x40,
644/*12B0*/0x1D, 0x85, 0x3E, 0x38, 0xE5, 0x3E, 0x60, 0x05,
645 0x85, 0x3F, 0x39, 0x80, 0x03, 0x85, 0x39, 0x39,
646/*12C0*/0x8F, 0x3A, 0x12, 0x07, 0xA8, 0xE5, 0x3E, 0x12,
647 0x07, 0x53, 0xE5, 0x3F, 0xF0, 0x22, 0x80, 0x3B,
648/*12D0*/0xE5, 0x65, 0x45, 0x64, 0x70, 0x11, 0x12, 0x07,
649 0x5F, 0x40, 0x05, 0x12, 0x08, 0x9E, 0x80, 0x1F,
650/*12E0*/0x12, 0x07, 0x3E, 0xE5, 0x41, 0xF0, 0x22, 0xE5,
651 0x3C, 0xC3, 0x95, 0x38, 0x40, 0x1D, 0x85, 0x3C,
652/*12F0*/0x38, 0xE5, 0x3C, 0x60, 0x05, 0x85, 0x3D, 0x39,
653 0x80, 0x03, 0x85, 0x39, 0x39, 0x8F, 0x3A, 0x12,
654/*1300*/0x07, 0xA8, 0xE5, 0x3C, 0x12, 0x07, 0x53, 0xE5,
655 0x3D, 0xF0, 0x22, 0x12, 0x07, 0x9F, 0xE5, 0x38,
656/*1310*/0x12, 0x07, 0x53, 0xE5, 0x39, 0xF0, 0x22, 0x8C,
657 0x63, 0x8D, 0x64, 0x12, 0x08, 0xDA, 0x40, 0x3C,
658/*1320*/0xE5, 0x64, 0x45, 0x63, 0x70, 0x10, 0x12, 0x09,
659 0x04, 0xC3, 0xE5, 0x3E, 0x12, 0x07, 0x69, 0x40,
660/*1330*/0x3B, 0x12, 0x08, 0x95, 0x80, 0x18, 0xE5, 0x3E,
661 0xC3, 0x95, 0x38, 0x40, 0x1D, 0x85, 0x3E, 0x38,
662/*1340*/0xE5, 0x3E, 0x60, 0x05, 0x85, 0x3F, 0x39, 0x80,
663 0x03, 0x85, 0x39, 0x39, 0x8F, 0x3A, 0x12, 0x07,
664/*1350*/0xA8, 0xE5, 0x3E, 0x12, 0x07, 0x53, 0xE5, 0x3F,
665 0xF0, 0x22, 0x80, 0x3B, 0xE5, 0x64, 0x45, 0x63,
666/*1360*/0x70, 0x11, 0x12, 0x07, 0x5F, 0x40, 0x05, 0x12,
667 0x08, 0x9E, 0x80, 0x1F, 0x12, 0x07, 0x3E, 0xE5,
668/*1370*/0x41, 0xF0, 0x22, 0xE5, 0x3C, 0xC3, 0x95, 0x38,
669 0x40, 0x1D, 0x85, 0x3C, 0x38, 0xE5, 0x3C, 0x60,
670/*1380*/0x05, 0x85, 0x3D, 0x39, 0x80, 0x03, 0x85, 0x39,
671 0x39, 0x8F, 0x3A, 0x12, 0x07, 0xA8, 0xE5, 0x3C,
672/*1390*/0x12, 0x07, 0x53, 0xE5, 0x3D, 0xF0, 0x22, 0x12,
673 0x07, 0x9F, 0xE5, 0x38, 0x12, 0x07, 0x53, 0xE5,
674/*13A0*/0x39, 0xF0, 0x22, 0xE5, 0x0D, 0xFE, 0xE5, 0x08,
675 0x8E, 0x54, 0x44, 0x05, 0xF5, 0x55, 0x75, 0x15,
676/*13B0*/0x0F, 0xF5, 0x82, 0x12, 0x0E, 0x7A, 0x12, 0x17,
677 0xA3, 0x20, 0x31, 0x05, 0x75, 0x15, 0x03, 0x80,
678/*13C0*/0x03, 0x75, 0x15, 0x0B, 0xE5, 0x0A, 0xC3, 0x94,
679 0x01, 0x50, 0x38, 0x12, 0x14, 0x20, 0x20, 0x31,
680/*13D0*/0x06, 0x05, 0x15, 0x05, 0x15, 0x80, 0x04, 0x15,
681 0x15, 0x15, 0x15, 0xE5, 0x0A, 0xC3, 0x94, 0x01,
682/*13E0*/0x50, 0x21, 0x12, 0x14, 0x20, 0x20, 0x31, 0x04,
683 0x05, 0x15, 0x80, 0x02, 0x15, 0x15, 0xE5, 0x0A,
684/*13F0*/0xC3, 0x94, 0x01, 0x50, 0x0E, 0x12, 0x0E, 0x77,
685 0x12, 0x17, 0xA3, 0x20, 0x31, 0x05, 0x05, 0x15,
686/*1400*/0x12, 0x0E, 0x77, 0xE5, 0x15, 0xB4, 0x08, 0x04,
687 0x7F, 0x01, 0x80, 0x02, 0x7F, 0x00, 0xE5, 0x15,
688/*1410*/0xB4, 0x07, 0x04, 0x7E, 0x01, 0x80, 0x02, 0x7E,
689 0x00, 0xEE, 0x4F, 0x60, 0x02, 0x05, 0x7F, 0x22,
690/*1420*/0x85, 0x55, 0x82, 0x85, 0x54, 0x83, 0xE5, 0x15,
691 0xF0, 0x12, 0x17, 0xA3, 0x22, 0x12, 0x07, 0x2A,
692/*1430*/0x75, 0x83, 0xAE, 0x74, 0xFF, 0x12, 0x07, 0x29,
693 0xE0, 0x54, 0x1A, 0xF5, 0x34, 0xE0, 0xC4, 0x13,
694/*1440*/0x54, 0x07, 0xF5, 0x35, 0x24, 0xFE, 0x60, 0x24,
695 0x24, 0xFE, 0x60, 0x3C, 0x24, 0x04, 0x70, 0x63,
696/*1450*/0x75, 0x31, 0x2D, 0xE5, 0x08, 0xFD, 0x74, 0xB6,
697 0x12, 0x07, 0x92, 0x74, 0xBC, 0x90, 0x07, 0x22,
698/*1460*/0x12, 0x07, 0x95, 0x74, 0x90, 0x12, 0x07, 0xB3,
699 0x74, 0x92, 0x80, 0x3C, 0x75, 0x31, 0x3A, 0xE5,
700/*1470*/0x08, 0xFD, 0x74, 0xBA, 0x12, 0x07, 0x92, 0x74,
701 0xC0, 0x90, 0x07, 0x22, 0x12, 0x07, 0xB6, 0x74,
702/*1480*/0xC4, 0x12, 0x07, 0xB3, 0x74, 0xC8, 0x80, 0x20,
703 0x75, 0x31, 0x35, 0xE5, 0x08, 0xFD, 0x74, 0xB8,
704/*1490*/0x12, 0x07, 0x92, 0x74, 0xBE, 0xFF, 0xED, 0x44,
705 0x07, 0x90, 0x07, 0x22, 0xCF, 0xF0, 0xA3, 0xEF,
706/*14A0*/0xF0, 0x74, 0xC2, 0x12, 0x07, 0xB3, 0x74, 0xC6,
707 0xFF, 0xED, 0x44, 0x07, 0xA3, 0xCF, 0xF0, 0xA3,
708/*14B0*/0xEF, 0xF0, 0x22, 0x75, 0x34, 0x01, 0x22, 0x8E,
709 0x58, 0x8F, 0x59, 0x8C, 0x5A, 0x8D, 0x5B, 0x8A,
710/*14C0*/0x5C, 0x8B, 0x5D, 0x75, 0x5E, 0x01, 0xE4, 0xF5,
711 0x5F, 0x12, 0x1E, 0xA5, 0x85, 0x59, 0x5E, 0xD3,
712/*14D0*/0xE5, 0x5E, 0x95, 0x5B, 0xE5, 0x5A, 0x12, 0x07,
713 0x6B, 0x50, 0x57, 0xE5, 0x5D, 0x45, 0x5C, 0x70,
714/*14E0*/0x30, 0x12, 0x07, 0x2A, 0x75, 0x83, 0x92, 0xE5,
715 0x5E, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC6, 0xE5,
716/*14F0*/0x5E, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC8, 0xE5,
717 0x5E, 0x12, 0x07, 0x29, 0x75, 0x83, 0x90, 0xE5,
718/*1500*/0x5E, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC2, 0xE5,
719 0x5E, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC4, 0x80,
720/*1510*/0x03, 0x12, 0x07, 0x32, 0xE5, 0x5E, 0xF0, 0xAF,
721 0x5F, 0x7E, 0x00, 0xAD, 0x5D, 0xAC, 0x5C, 0x12,
722/*1520*/0x04, 0x44, 0xAF, 0x5E, 0x7E, 0x00, 0xAD, 0x5D,
723 0xAC, 0x5C, 0x12, 0x0B, 0xD1, 0x05, 0x5E, 0x02,
724/*1530*/0x14, 0xCF, 0xAB, 0x5D, 0xAA, 0x5C, 0xAD, 0x5B,
725 0xAC, 0x5A, 0xAF, 0x59, 0xAE, 0x58, 0x02, 0x1B,
726/*1540*/0xFB, 0x8C, 0x5C, 0x8D, 0x5D, 0x8A, 0x5E, 0x8B,
727 0x5F, 0x75, 0x60, 0x01, 0xE4, 0xF5, 0x61, 0xF5,
728/*1550*/0x62, 0xF5, 0x63, 0x12, 0x1E, 0xA5, 0x8F, 0x60,
729 0xD3, 0xE5, 0x60, 0x95, 0x5D, 0xE5, 0x5C, 0x12,
730/*1560*/0x07, 0x6B, 0x50, 0x61, 0xE5, 0x5F, 0x45, 0x5E,
731 0x70, 0x27, 0x12, 0x07, 0x2A, 0x75, 0x83, 0xB6,
732/*1570*/0xE5, 0x60, 0x12, 0x07, 0x29, 0x75, 0x83, 0xB8,
733 0xE5, 0x60, 0x12, 0x07, 0x29, 0x75, 0x83, 0xBA,
734/*1580*/0xE5, 0x60, 0xF0, 0xAF, 0x61, 0x7E, 0x00, 0xE5,
735 0x62, 0x12, 0x08, 0x7A, 0x12, 0x0A, 0xFF, 0x80,
736/*1590*/0x19, 0x90, 0x07, 0x24, 0x12, 0x07, 0x35, 0xE5,
737 0x60, 0x12, 0x07, 0x29, 0x75, 0x83, 0x8E, 0xE4,
738/*15A0*/0x12, 0x07, 0x29, 0x74, 0x01, 0x12, 0x07, 0x29,
739 0xE4, 0xF0, 0xAF, 0x63, 0x7E, 0x00, 0xAD, 0x5F,
740/*15B0*/0xAC, 0x5E, 0x12, 0x04, 0x44, 0xAF, 0x60, 0x7E,
741 0x00, 0xAD, 0x5F, 0xAC, 0x5E, 0x12, 0x12, 0x8B,
742/*15C0*/0x05, 0x60, 0x02, 0x15, 0x58, 0x22, 0x90, 0x11,
743 0x4D, 0xE4, 0x93, 0x90, 0x07, 0x2E, 0xF0, 0x12,
744/*15D0*/0x08, 0x1F, 0x75, 0x83, 0xAE, 0xE0, 0x54, 0x1A,
745 0xF5, 0x34, 0x70, 0x67, 0xEF, 0x44, 0x07, 0xF5,
746/*15E0*/0x82, 0x75, 0x83, 0xCE, 0xE0, 0xFF, 0x13, 0x13,
747 0x13, 0x54, 0x07, 0xF5, 0x36, 0x54, 0x0F, 0xD3,
748/*15F0*/0x94, 0x00, 0x40, 0x06, 0x12, 0x14, 0x2D, 0x12,
749 0x1B, 0xA9, 0xE5, 0x36, 0x54, 0x0F, 0x24, 0xFE,
750/*1600*/0x60, 0x0C, 0x14, 0x60, 0x0C, 0x14, 0x60, 0x19,
751 0x24, 0x03, 0x70, 0x37, 0x80, 0x10, 0x02, 0x1E,
752/*1610*/0x91, 0x12, 0x1E, 0x91, 0x12, 0x07, 0x2A, 0x75,
753 0x83, 0xCE, 0xE0, 0x54, 0xEF, 0xF0, 0x02, 0x1D,
754/*1620*/0xAE, 0x12, 0x10, 0x14, 0xE4, 0xF5, 0x55, 0x12,
755 0x1D, 0x85, 0x05, 0x55, 0xE5, 0x55, 0xC3, 0x94,
756/*1630*/0x05, 0x40, 0xF4, 0x12, 0x07, 0x2A, 0x75, 0x83,
757 0xCE, 0xE0, 0x54, 0xC7, 0x12, 0x07, 0x29, 0xE0,
758/*1640*/0x44, 0x08, 0xF0, 0x22, 0xE4, 0xF5, 0x58, 0xF5,
759 0x59, 0xAF, 0x08, 0xEF, 0x44, 0x07, 0xF5, 0x82,
760/*1650*/0x75, 0x83, 0xD0, 0xE0, 0xFD, 0xC4, 0x54, 0x0F,
761 0xF5, 0x5A, 0xEF, 0x44, 0x07, 0xF5, 0x82, 0x75,
762/*1660*/0x83, 0x80, 0x74, 0x01, 0xF0, 0x12, 0x08, 0x21,
763 0x75, 0x83, 0x82, 0xE5, 0x45, 0xF0, 0xEF, 0x44,
764/*1670*/0x07, 0xF5, 0x82, 0x75, 0x83, 0x8A, 0x74, 0xFF,
765 0xF0, 0x12, 0x1A, 0x4D, 0x12, 0x07, 0x2A, 0x75,
766/*1680*/0x83, 0xBC, 0xE0, 0x54, 0xEF, 0x12, 0x07, 0x29,
767 0x75, 0x83, 0xBE, 0xE0, 0x54, 0xEF, 0x12, 0x07,
768/*1690*/0x29, 0x75, 0x83, 0xC0, 0xE0, 0x54, 0xEF, 0x12,
769 0x07, 0x29, 0x75, 0x83, 0xBC, 0xE0, 0x44, 0x10,
770/*16A0*/0x12, 0x07, 0x29, 0x75, 0x83, 0xBE, 0xE0, 0x44,
771 0x10, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC0, 0xE0,
772/*16B0*/0x44, 0x10, 0xF0, 0xAF, 0x58, 0xE5, 0x59, 0x12,
773 0x08, 0x78, 0x02, 0x0A, 0xFF, 0xE4, 0xF5, 0x58,
774/*16C0*/0x7D, 0x01, 0xF5, 0x59, 0xAF, 0x35, 0xFE, 0xFC,
775 0x12, 0x09, 0x15, 0x12, 0x07, 0x2A, 0x75, 0x83,
776/*16D0*/0xB6, 0x74, 0x10, 0x12, 0x07, 0x29, 0x75, 0x83,
777 0xB8, 0x74, 0x10, 0x12, 0x07, 0x29, 0x75, 0x83,
778/*16E0*/0xBA, 0x74, 0x10, 0x12, 0x07, 0x29, 0x75, 0x83,
779 0xBC, 0x74, 0x10, 0x12, 0x07, 0x29, 0x75, 0x83,
780/*16F0*/0xBE, 0x74, 0x10, 0x12, 0x07, 0x29, 0x75, 0x83,
781 0xC0, 0x74, 0x10, 0x12, 0x07, 0x29, 0x75, 0x83,
782/*1700*/0x90, 0xE4, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC2,
783 0xE4, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC4, 0xE4,
784/*1710*/0x12, 0x07, 0x29, 0x75, 0x83, 0x92, 0xE4, 0x12,
785 0x07, 0x29, 0x75, 0x83, 0xC6, 0xE4, 0x12, 0x07,
786/*1720*/0x29, 0x75, 0x83, 0xC8, 0xE4, 0xF0, 0xAF, 0x58,
787 0xFE, 0xE5, 0x59, 0x12, 0x08, 0x7A, 0x02, 0x0A,
788/*1730*/0xFF, 0xE5, 0xE2, 0x30, 0xE4, 0x6C, 0xE5, 0xE7,
789 0x54, 0xC0, 0x64, 0x40, 0x70, 0x64, 0xE5, 0x09,
790/*1740*/0xC4, 0x54, 0x30, 0xFE, 0xE5, 0x08, 0x25, 0xE0,
791 0x25, 0xE0, 0x54, 0xC0, 0x4E, 0xFE, 0xEF, 0x54,
792/*1750*/0x3F, 0x4E, 0xFD, 0xE5, 0x2B, 0xAE, 0x2A, 0x78,
793 0x02, 0xC3, 0x33, 0xCE, 0x33, 0xCE, 0xD8, 0xF9,
794/*1760*/0xF5, 0x82, 0x8E, 0x83, 0xED, 0xF0, 0xE5, 0x2B,
795 0xAE, 0x2A, 0x78, 0x02, 0xC3, 0x33, 0xCE, 0x33,
796/*1770*/0xCE, 0xD8, 0xF9, 0xFF, 0xF5, 0x82, 0x8E, 0x83,
797 0xA3, 0xE5, 0xFE, 0xF0, 0x8F, 0x82, 0x8E, 0x83,
798/*1780*/0xA3, 0xA3, 0xE5, 0xFD, 0xF0, 0x8F, 0x82, 0x8E,
799 0x83, 0xA3, 0xA3, 0xA3, 0xE5, 0xFC, 0xF0, 0xC3,
800/*1790*/0xE5, 0x2B, 0x94, 0xFA, 0xE5, 0x2A, 0x94, 0x00,
801 0x50, 0x08, 0x05, 0x2B, 0xE5, 0x2B, 0x70, 0x02,
802/*17A0*/0x05, 0x2A, 0x22, 0xE4, 0xFF, 0xE4, 0xF5, 0x58,
803 0xF5, 0x56, 0xF5, 0x57, 0x74, 0x82, 0xFC, 0x12,
804/*17B0*/0x0E, 0x04, 0x8C, 0x83, 0xE0, 0xF5, 0x10, 0x54,
805 0x7F, 0xF0, 0xE5, 0x10, 0x44, 0x80, 0x12, 0x0E,
806/*17C0*/0x98, 0xED, 0xF0, 0x7E, 0x0A, 0x12, 0x0E, 0x04,
807 0x75, 0x83, 0xA0, 0xE0, 0x20, 0xE0, 0x26, 0xDE,
808/*17D0*/0xF4, 0x05, 0x57, 0xE5, 0x57, 0x70, 0x02, 0x05,
809 0x56, 0xE5, 0x14, 0x24, 0x01, 0xFD, 0xE4, 0x33,
810/*17E0*/0xFC, 0xD3, 0xE5, 0x57, 0x9D, 0xE5, 0x56, 0x9C,
811 0x40, 0xD9, 0xE5, 0x0A, 0x94, 0x20, 0x50, 0x02,
812/*17F0*/0x05, 0x0A, 0x43, 0xE1, 0x08, 0xC2, 0x31, 0x12,
813 0x0E, 0x04, 0x75, 0x83, 0xA6, 0xE0, 0x55, 0x12,
814/*1800*/0x65, 0x12, 0x70, 0x03, 0xD2, 0x31, 0x22, 0xC2,
815 0x31, 0x22, 0x90, 0x07, 0x26, 0xE0, 0xFA, 0xA3,
816/*1810*/0xE0, 0xF5, 0x82, 0x8A, 0x83, 0xE0, 0xF5, 0x41,
817 0xE5, 0x39, 0xC3, 0x95, 0x41, 0x40, 0x26, 0xE5,
818/*1820*/0x39, 0x95, 0x41, 0xC3, 0x9F, 0xEE, 0x12, 0x07,
819 0x6B, 0x40, 0x04, 0x7C, 0x01, 0x80, 0x02, 0x7C,
820/*1830*/0x00, 0xE5, 0x41, 0x64, 0x3F, 0x60, 0x04, 0x7B,
821 0x01, 0x80, 0x02, 0x7B, 0x00, 0xEC, 0x5B, 0x60,
822/*1840*/0x29, 0x05, 0x41, 0x80, 0x28, 0xC3, 0xE5, 0x41,
823 0x95, 0x39, 0xC3, 0x9F, 0xEE, 0x12, 0x07, 0x6B,
824/*1850*/0x40, 0x04, 0x7F, 0x01, 0x80, 0x02, 0x7F, 0x00,
825 0xE5, 0x41, 0x60, 0x04, 0x7E, 0x01, 0x80, 0x02,
826/*1860*/0x7E, 0x00, 0xEF, 0x5E, 0x60, 0x04, 0x15, 0x41,
827 0x80, 0x03, 0x85, 0x39, 0x41, 0x85, 0x3A, 0x40,
828/*1870*/0x22, 0xE5, 0xE2, 0x30, 0xE4, 0x60, 0xE5, 0xE1,
829 0x30, 0xE2, 0x5B, 0xE5, 0x09, 0x70, 0x04, 0x7F,
830/*1880*/0x01, 0x80, 0x02, 0x7F, 0x00, 0xE5, 0x08, 0x70,
831 0x04, 0x7E, 0x01, 0x80, 0x02, 0x7E, 0x00, 0xEE,
832/*1890*/0x5F, 0x60, 0x43, 0x53, 0xF9, 0xF8, 0xE5, 0xE2,
833 0x30, 0xE4, 0x3B, 0xE5, 0xE1, 0x30, 0xE2, 0x2E,
834/*18A0*/0x43, 0xFA, 0x02, 0x53, 0xFA, 0xFB, 0xE4, 0xF5,
835 0x10, 0x90, 0x94, 0x70, 0xE5, 0x10, 0xF0, 0xE5,
836/*18B0*/0xE1, 0x30, 0xE2, 0xE7, 0x90, 0x94, 0x70, 0xE0,
837 0x65, 0x10, 0x60, 0x03, 0x43, 0xFA, 0x04, 0x05,
838/*18C0*/0x10, 0x90, 0x94, 0x70, 0xE5, 0x10, 0xF0, 0x70,
839 0xE6, 0x12, 0x00, 0x06, 0x80, 0xE1, 0x53, 0xFA,
840/*18D0*/0xFD, 0x53, 0xFA, 0xFB, 0x80, 0xC0, 0x22, 0x8F,
841 0x54, 0x12, 0x00, 0x06, 0xE5, 0xE1, 0x30, 0xE0,
842/*18E0*/0x04, 0x7F, 0x01, 0x80, 0x02, 0x7F, 0x00, 0xE5,
843 0x7E, 0xD3, 0x94, 0x05, 0x40, 0x04, 0x7E, 0x01,
844/*18F0*/0x80, 0x02, 0x7E, 0x00, 0xEE, 0x4F, 0x60, 0x3D,
845 0x85, 0x54, 0x11, 0xE5, 0xE2, 0x20, 0xE1, 0x32,
846/*1900*/0x74, 0xCE, 0x12, 0x1A, 0x05, 0x30, 0xE7, 0x04,
847 0x7D, 0x01, 0x80, 0x02, 0x7D, 0x00, 0x8F, 0x82,
848/*1910*/0x8E, 0x83, 0xE0, 0x30, 0xE6, 0x04, 0x7F, 0x01,
849 0x80, 0x02, 0x7F, 0x00, 0xEF, 0x5D, 0x70, 0x15,
850/*1920*/0x12, 0x15, 0xC6, 0x74, 0xCE, 0x12, 0x1A, 0x05,
851 0x30, 0xE6, 0x07, 0xE0, 0x44, 0x80, 0xF0, 0x43,
852/*1930*/0xF9, 0x80, 0x12, 0x18, 0x71, 0x22, 0x12, 0x0E,
853 0x44, 0xE5, 0x16, 0x25, 0xE0, 0x25, 0xE0, 0x24,
854/*1940*/0xB0, 0xF5, 0x82, 0xE4, 0x34, 0x1A, 0xF5, 0x83,
855 0xE4, 0x93, 0xF5, 0x0F, 0xE5, 0x16, 0x25, 0xE0,
856/*1950*/0x25, 0xE0, 0x24, 0xB1, 0xF5, 0x82, 0xE4, 0x34,
857 0x1A, 0xF5, 0x83, 0xE4, 0x93, 0xF5, 0x0E, 0x12,
858/*1960*/0x0E, 0x65, 0xF5, 0x10, 0xE5, 0x0F, 0x54, 0xF0,
859 0x12, 0x0E, 0x17, 0x75, 0x83, 0x8C, 0xEF, 0xF0,
860/*1970*/0xE5, 0x0F, 0x30, 0xE0, 0x0C, 0x12, 0x0E, 0x04,
861 0x75, 0x83, 0x86, 0xE0, 0x44, 0x40, 0xF0, 0x80,
862/*1980*/0x0A, 0x12, 0x0E, 0x04, 0x75, 0x83, 0x86, 0xE0,
863 0x54, 0xBF, 0xF0, 0x12, 0x0E, 0x91, 0x75, 0x83,
864/*1990*/0x82, 0xE5, 0x0E, 0xF0, 0x22, 0x7F, 0x05, 0x12,
865 0x17, 0x31, 0x12, 0x0E, 0x04, 0x12, 0x0E, 0x33,
866/*19A0*/0x74, 0x02, 0xF0, 0x74, 0x8E, 0xFE, 0x12, 0x0E,
867 0x04, 0x12, 0x0E, 0x0B, 0xEF, 0xF0, 0x75, 0x15,
868/*19B0*/0x70, 0x12, 0x0F, 0xF7, 0x20, 0x34, 0x05, 0x75,
869 0x15, 0x10, 0x80, 0x03, 0x75, 0x15, 0x50, 0x12,
870/*19C0*/0x0F, 0xF7, 0x20, 0x34, 0x04, 0x74, 0x10, 0x80,
871 0x02, 0x74, 0xF0, 0x25, 0x15, 0xF5, 0x15, 0x12,
872/*19D0*/0x0E, 0x21, 0xEF, 0xF0, 0x12, 0x10, 0x91, 0x20,
873 0x34, 0x17, 0xE5, 0x15, 0x64, 0x30, 0x60, 0x0C,
874/*19E0*/0x74, 0x10, 0x25, 0x15, 0xF5, 0x15, 0xB4, 0x80,
875 0x03, 0xE4, 0xF5, 0x15, 0x12, 0x0E, 0x21, 0xEF,
876/*19F0*/0xF0, 0x22, 0xF0, 0xE5, 0x0B, 0x25, 0xE0, 0x25,
877 0xE0, 0x24, 0x82, 0xF5, 0x82, 0xE4, 0x34, 0x07,
878/*1A00*/0xF5, 0x83, 0x22, 0x74, 0x88, 0xFE, 0xE5, 0x08,
879 0x44, 0x07, 0xFF, 0xF5, 0x82, 0x8E, 0x83, 0xE0,
880/*1A10*/0x22, 0xF0, 0xE5, 0x08, 0x44, 0x07, 0xF5, 0x82,
881 0x22, 0xF0, 0xE0, 0x54, 0xC0, 0x8F, 0x82, 0x8E,
882/*1A20*/0x83, 0xF0, 0x22, 0xEF, 0x44, 0x07, 0xF5, 0x82,
883 0x75, 0x83, 0x86, 0xE0, 0x54, 0x10, 0xD3, 0x94,
884/*1A30*/0x00, 0x22, 0xF0, 0x90, 0x07, 0x15, 0xE0, 0x04,
885 0xF0, 0x22, 0x44, 0x06, 0xF5, 0x82, 0x75, 0x83,
886/*1A40*/0x9E, 0xE0, 0x22, 0xFE, 0xEF, 0x44, 0x07, 0xF5,
887 0x82, 0x8E, 0x83, 0xE0, 0x22, 0xE4, 0x90, 0x07,
888/*1A50*/0x2A, 0xF0, 0xA3, 0xF0, 0x12, 0x07, 0x2A, 0x75,
889 0x83, 0x82, 0xE0, 0x54, 0x7F, 0x12, 0x07, 0x29,
890/*1A60*/0xE0, 0x44, 0x80, 0xF0, 0x12, 0x10, 0xFC, 0x12,
891 0x08, 0x1F, 0x75, 0x83, 0xA0, 0xE0, 0x20, 0xE0,
892/*1A70*/0x1A, 0x90, 0x07, 0x2B, 0xE0, 0x04, 0xF0, 0x70,
893 0x06, 0x90, 0x07, 0x2A, 0xE0, 0x04, 0xF0, 0x90,
894/*1A80*/0x07, 0x2A, 0xE0, 0xB4, 0x10, 0xE1, 0xA3, 0xE0,
895 0xB4, 0x00, 0xDC, 0xEE, 0x44, 0xA6, 0xFC, 0xEF,
896/*1A90*/0x44, 0x07, 0xF5, 0x82, 0x8C, 0x83, 0xE0, 0xF5,
897 0x32, 0xEE, 0x44, 0xA8, 0xFE, 0xEF, 0x44, 0x07,
898/*1AA0*/0xF5, 0x82, 0x8E, 0x83, 0xE0, 0xF5, 0x33, 0x22,
899 0x01, 0x20, 0x11, 0x00, 0x04, 0x20, 0x00, 0x90,
900/*1AB0*/0x00, 0x20, 0x0F, 0x92, 0x00, 0x21, 0x0F, 0x94,
901 0x00, 0x22, 0x0F, 0x96, 0x00, 0x23, 0x0F, 0x98,
902/*1AC0*/0x00, 0x24, 0x0F, 0x9A, 0x00, 0x25, 0x0F, 0x9C,
903 0x00, 0x26, 0x0F, 0x9E, 0x00, 0x27, 0x0F, 0xA0,
904/*1AD0*/0x01, 0x20, 0x01, 0xA2, 0x01, 0x21, 0x01, 0xA4,
905 0x01, 0x22, 0x01, 0xA6, 0x01, 0x23, 0x01, 0xA8,
906/*1AE0*/0x01, 0x24, 0x01, 0xAA, 0x01, 0x25, 0x01, 0xAC,
907 0x01, 0x26, 0x01, 0xAE, 0x01, 0x27, 0x01, 0xB0,
908/*1AF0*/0x01, 0x28, 0x01, 0xB4, 0x00, 0x28, 0x0F, 0xB6,
909 0x40, 0x28, 0x0F, 0xB8, 0x61, 0x28, 0x01, 0xCB,
910/*1B00*/0xEF, 0xCB, 0xCA, 0xEE, 0xCA, 0x7F, 0x01, 0xE4,
911 0xFD, 0xEB, 0x4A, 0x70, 0x24, 0xE5, 0x08, 0xF5,
912/*1B10*/0x82, 0x74, 0xB6, 0x12, 0x08, 0x29, 0xE5, 0x08,
913 0xF5, 0x82, 0x74, 0xB8, 0x12, 0x08, 0x29, 0xE5,
914/*1B20*/0x08, 0xF5, 0x82, 0x74, 0xBA, 0x12, 0x08, 0x29,
915 0x7E, 0x00, 0x7C, 0x00, 0x12, 0x0A, 0xFF, 0x80,
916/*1B30*/0x12, 0x90, 0x07, 0x26, 0x12, 0x07, 0x35, 0xE5,
917 0x41, 0xF0, 0x90, 0x07, 0x24, 0x12, 0x07, 0x35,
918/*1B40*/0xE5, 0x40, 0xF0, 0x12, 0x07, 0x2A, 0x75, 0x83,
919 0x8E, 0xE4, 0x12, 0x07, 0x29, 0x74, 0x01, 0x12,
920/*1B50*/0x07, 0x29, 0xE4, 0xF0, 0x22, 0xE4, 0xF5, 0x26,
921 0xF5, 0x27, 0x53, 0xE1, 0xFE, 0xF5, 0x2A, 0x75,
922/*1B60*/0x2B, 0x01, 0xF5, 0x08, 0x7F, 0x01, 0x12, 0x17,
923 0x31, 0x30, 0x30, 0x1C, 0x90, 0x1A, 0xA9, 0xE4,
924/*1B70*/0x93, 0xF5, 0x10, 0x90, 0x1F, 0xF9, 0xE4, 0x93,
925 0xF5, 0x10, 0x90, 0x00, 0x41, 0xE4, 0x93, 0xF5,
926/*1B80*/0x10, 0x90, 0x1E, 0xCA, 0xE4, 0x93, 0xF5, 0x10,
927 0x7F, 0x02, 0x12, 0x17, 0x31, 0x12, 0x0F, 0x54,
928/*1B90*/0x7F, 0x03, 0x12, 0x17, 0x31, 0x12, 0x00, 0x06,
929 0xE5, 0xE2, 0x30, 0xE7, 0x09, 0x12, 0x10, 0x00,
930/*1BA0*/0x30, 0x30, 0x03, 0x12, 0x11, 0x00, 0x02, 0x00,
931 0x47, 0x12, 0x08, 0x1F, 0x75, 0x83, 0xD0, 0xE0,
932/*1BB0*/0xC4, 0x54, 0x0F, 0xFD, 0x75, 0x43, 0x01, 0x75,
933 0x44, 0xFF, 0x12, 0x08, 0xAA, 0x74, 0x04, 0xF0,
934/*1BC0*/0x75, 0x3B, 0x01, 0xED, 0x14, 0x60, 0x0C, 0x14,
935 0x60, 0x0B, 0x14, 0x60, 0x0F, 0x24, 0x03, 0x70,
936/*1BD0*/0x0B, 0x80, 0x09, 0x80, 0x00, 0x12, 0x08, 0xA7,
937 0x04, 0xF0, 0x80, 0x06, 0x12, 0x08, 0xA7, 0x74,
938/*1BE0*/0x04, 0xF0, 0xEE, 0x44, 0x82, 0xFE, 0xEF, 0x44,
939 0x07, 0xF5, 0x82, 0x8E, 0x83, 0xE5, 0x45, 0x12,
940/*1BF0*/0x08, 0xBE, 0x75, 0x83, 0x82, 0xE5, 0x31, 0xF0,
941 0x02, 0x11, 0x4C, 0x8E, 0x60, 0x8F, 0x61, 0x12,
942/*1C00*/0x1E, 0xA5, 0xE4, 0xFF, 0xCE, 0xED, 0xCE, 0xEE,
943 0xD3, 0x95, 0x61, 0xE5, 0x60, 0x12, 0x07, 0x6B,
944/*1C10*/0x40, 0x39, 0x74, 0x20, 0x2E, 0xF5, 0x82, 0xE4,
945 0x34, 0x03, 0xF5, 0x83, 0xE0, 0x70, 0x03, 0xFF,
946/*1C20*/0x80, 0x26, 0x12, 0x08, 0xE2, 0xFD, 0xC3, 0x9F,
947 0x40, 0x1E, 0xCF, 0xED, 0xCF, 0xEB, 0x4A, 0x70,
948/*1C30*/0x0B, 0x8D, 0x42, 0x12, 0x08, 0xEE, 0xF5, 0x41,
949 0x8E, 0x40, 0x80, 0x0C, 0x12, 0x08, 0xE2, 0xF5,
950/*1C40*/0x38, 0x12, 0x08, 0xEE, 0xF5, 0x39, 0x8E, 0x3A,
951 0x1E, 0x80, 0xBC, 0x22, 0x75, 0x58, 0x01, 0xE5,
952/*1C50*/0x35, 0x70, 0x0C, 0x12, 0x07, 0xCC, 0xE0, 0xF5,
953 0x4A, 0x12, 0x07, 0xD8, 0xE0, 0xF5, 0x4C, 0xE5,
954/*1C60*/0x35, 0xB4, 0x04, 0x0C, 0x12, 0x07, 0xE4, 0xE0,
955 0xF5, 0x4A, 0x12, 0x07, 0xF0, 0xE0, 0xF5, 0x4C,
956/*1C70*/0xE5, 0x35, 0xB4, 0x01, 0x04, 0x7F, 0x01, 0x80,
957 0x02, 0x7F, 0x00, 0xE5, 0x35, 0xB4, 0x02, 0x04,
958/*1C80*/0x7E, 0x01, 0x80, 0x02, 0x7E, 0x00, 0xEE, 0x4F,
959 0x60, 0x0C, 0x12, 0x07, 0xFC, 0xE0, 0xF5, 0x4A,
960/*1C90*/0x12, 0x08, 0x08, 0xE0, 0xF5, 0x4C, 0x85, 0x41,
961 0x49, 0x85, 0x40, 0x4B, 0x22, 0x75, 0x5B, 0x01,
962/*1CA0*/0x90, 0x07, 0x24, 0x12, 0x07, 0x35, 0xE0, 0x54,
963 0x1F, 0xFF, 0xD3, 0x94, 0x02, 0x50, 0x04, 0x8F,
964/*1CB0*/0x58, 0x80, 0x05, 0xEF, 0x24, 0xFE, 0xF5, 0x58,
965 0xEF, 0xC3, 0x94, 0x18, 0x40, 0x05, 0x75, 0x59,
966/*1CC0*/0x18, 0x80, 0x04, 0xEF, 0x04, 0xF5, 0x59, 0x85,
967 0x43, 0x5A, 0xAF, 0x58, 0x7E, 0x00, 0xAD, 0x59,
968/*1CD0*/0x7C, 0x00, 0xAB, 0x5B, 0x7A, 0x00, 0x12, 0x15,
969 0x41, 0xAF, 0x5A, 0x7E, 0x00, 0x12, 0x18, 0x0A,
970/*1CE0*/0xAF, 0x5B, 0x7E, 0x00, 0x02, 0x1A, 0xFF, 0xE5,
971 0xE2, 0x30, 0xE7, 0x0E, 0x12, 0x10, 0x03, 0xC2,
972/*1CF0*/0x30, 0x30, 0x30, 0x03, 0x12, 0x10, 0xFF, 0x20,
973 0x33, 0x28, 0xE5, 0xE7, 0x30, 0xE7, 0x05, 0x12,
974/*1D00*/0x0E, 0xA2, 0x80, 0x0D, 0xE5, 0xFE, 0xC3, 0x94,
975 0x20, 0x50, 0x06, 0x12, 0x0E, 0xA2, 0x43, 0xF9,
976/*1D10*/0x08, 0xE5, 0xF2, 0x30, 0xE7, 0x03, 0x53, 0xF9,
977 0x7F, 0xE5, 0xF1, 0x54, 0x70, 0xD3, 0x94, 0x00,
978/*1D20*/0x50, 0xD8, 0x22, 0x12, 0x0E, 0x04, 0x75, 0x83,
979 0x80, 0xE4, 0xF0, 0xE5, 0x08, 0x44, 0x07, 0x12,
980/*1D30*/0x0D, 0xFD, 0x75, 0x83, 0x84, 0x12, 0x0E, 0x02,
981 0x75, 0x83, 0x86, 0x12, 0x0E, 0x02, 0x75, 0x83,
982/*1D40*/0x8C, 0xE0, 0x54, 0xF3, 0x12, 0x0E, 0x03, 0x75,
983 0x83, 0x8E, 0x12, 0x0E, 0x02, 0x75, 0x83, 0x94,
984/*1D50*/0xE0, 0x54, 0xFB, 0xF0, 0x22, 0x12, 0x07, 0x2A,
985 0x75, 0x83, 0x8E, 0xE4, 0x12, 0x07, 0x29, 0x74,
986/*1D60*/0x01, 0x12, 0x07, 0x29, 0xE4, 0x12, 0x08, 0xBE,
987 0x75, 0x83, 0x8C, 0xE0, 0x44, 0x20, 0x12, 0x08,
988/*1D70*/0xBE, 0xE0, 0x54, 0xDF, 0xF0, 0x74, 0x84, 0x85,
989 0x08, 0x82, 0xF5, 0x83, 0xE0, 0x54, 0x7F, 0xF0,
990/*1D80*/0xE0, 0x44, 0x80, 0xF0, 0x22, 0x75, 0x56, 0x01,
991 0xE4, 0xFD, 0xF5, 0x57, 0xAF, 0x35, 0xFE, 0xFC,
992/*1D90*/0x12, 0x09, 0x15, 0x12, 0x1C, 0x9D, 0x12, 0x1E,
993 0x7A, 0x12, 0x1C, 0x4C, 0xAF, 0x57, 0x7E, 0x00,
994/*1DA0*/0xAD, 0x56, 0x7C, 0x00, 0x12, 0x04, 0x44, 0xAF,
995 0x56, 0x7E, 0x00, 0x02, 0x11, 0xEE, 0x75, 0x56,
996/*1DB0*/0x01, 0xE4, 0xFD, 0xF5, 0x57, 0xAF, 0x35, 0xFE,
997 0xFC, 0x12, 0x09, 0x15, 0x12, 0x1C, 0x9D, 0x12,
998/*1DC0*/0x1E, 0x7A, 0x12, 0x1C, 0x4C, 0xAF, 0x57, 0x7E,
999 0x00, 0xAD, 0x56, 0x7C, 0x00, 0x12, 0x04, 0x44,
1000/*1DD0*/0xAF, 0x56, 0x7E, 0x00, 0x02, 0x11, 0xEE, 0xE4,
1001 0xF5, 0x16, 0x12, 0x0E, 0x44, 0xFE, 0xE5, 0x08,
1002/*1DE0*/0x44, 0x05, 0xFF, 0x12, 0x0E, 0x65, 0x8F, 0x82,
1003 0x8E, 0x83, 0xF0, 0x05, 0x16, 0xE5, 0x16, 0xC3,
1004/*1DF0*/0x94, 0x14, 0x40, 0xE6, 0xE5, 0x08, 0x12, 0x0E,
1005 0x2B, 0xE4, 0xF0, 0x22, 0xE4, 0xF5, 0x58, 0xF5,
1006/*1E00*/0x59, 0xF5, 0x5A, 0xFF, 0xFE, 0xAD, 0x58, 0xFC,
1007 0x12, 0x09, 0x15, 0x7F, 0x04, 0x7E, 0x00, 0xAD,
1008/*1E10*/0x58, 0x7C, 0x00, 0x12, 0x09, 0x15, 0x7F, 0x02,
1009 0x7E, 0x00, 0xAD, 0x58, 0x7C, 0x00, 0x02, 0x09,
1010/*1E20*/0x15, 0xE5, 0x3C, 0x25, 0x3E, 0xFC, 0xE5, 0x42,
1011 0x24, 0x00, 0xFB, 0xE4, 0x33, 0xFA, 0xEC, 0xC3,
1012/*1E30*/0x9B, 0xEA, 0x12, 0x07, 0x6B, 0x40, 0x0B, 0x8C,
1013 0x42, 0xE5, 0x3D, 0x25, 0x3F, 0xF5, 0x41, 0x8F,
1014/*1E40*/0x40, 0x22, 0x12, 0x09, 0x0B, 0x22, 0x74, 0x84,
1015 0xF5, 0x18, 0x85, 0x08, 0x19, 0x85, 0x19, 0x82,
1016/*1E50*/0x85, 0x18, 0x83, 0xE0, 0x54, 0x7F, 0xF0, 0xE0,
1017 0x44, 0x80, 0xF0, 0xE0, 0x44, 0x80, 0xF0, 0x22,
1018/*1E60*/0xEF, 0x4E, 0x70, 0x0B, 0x12, 0x07, 0x2A, 0x75,
1019 0x83, 0xD2, 0xE0, 0x54, 0xDF, 0xF0, 0x22, 0x12,
1020/*1E70*/0x07, 0x2A, 0x75, 0x83, 0xD2, 0xE0, 0x44, 0x20,
1021 0xF0, 0x22, 0x75, 0x58, 0x01, 0x90, 0x07, 0x26,
1022/*1E80*/0x12, 0x07, 0x35, 0xE0, 0x54, 0x3F, 0xF5, 0x41,
1023 0x12, 0x07, 0x32, 0xE0, 0x54, 0x3F, 0xF5, 0x40,
1024/*1E90*/0x22, 0x75, 0x56, 0x02, 0xE4, 0xF5, 0x57, 0x12,
1025 0x1D, 0xFC, 0xAF, 0x57, 0x7E, 0x00, 0xAD, 0x56,
1026/*1EA0*/0x7C, 0x00, 0x02, 0x04, 0x44, 0xE4, 0xF5, 0x42,
1027 0xF5, 0x41, 0xF5, 0x40, 0xF5, 0x38, 0xF5, 0x39,
1028/*1EB0*/0xF5, 0x3A, 0x22, 0xEF, 0x54, 0x07, 0xFF, 0xE5,
1029 0xF9, 0x54, 0xF8, 0x4F, 0xF5, 0xF9, 0x22, 0x7F,
1030/*1EC0*/0x01, 0xE4, 0xFE, 0x0F, 0x0E, 0xBE, 0xFF, 0xFB,
1031 0x22, 0x01, 0x20, 0x00, 0x01, 0x04, 0x20, 0x00,
1032/*1ED0*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1033 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1034/*1EE0*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1035 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1036/*1EF0*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1037 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1038/*1F00*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1039 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1040/*1F10*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1041 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1042/*1F20*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1043 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1044/*1F30*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1045 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1046/*1F40*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1047 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1048/*1F50*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1049 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1050/*1F60*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1051 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1052/*1F70*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1053 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1054/*1F80*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1055 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1056/*1F90*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1057 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1058/*1FA0*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1059 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1060/*1FB0*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1061 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1062/*1FC0*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1063 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1064/*1FD0*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1065 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1066/*1FE0*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1067 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1068/*1FF0*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1069 0x01, 0x20, 0x11, 0x00, 0x04, 0x20, 0x00, 0x81
1070};
1071
1072int ipath_sd7220_ib_load(struct ipath_devdata *dd)
1073{
1074 return ipath_sd7220_prog_ld(dd, IB_7220_SERDES, ipath_sd7220_ib_img,
1075 sizeof(ipath_sd7220_ib_img), 0);
1076}
1077
1078int ipath_sd7220_ib_vfy(struct ipath_devdata *dd)
1079{
1080 return ipath_sd7220_prog_vfy(dd, IB_7220_SERDES, ipath_sd7220_ib_img,
1081 sizeof(ipath_sd7220_ib_img), 0);
1082}
diff --git a/drivers/infiniband/hw/ipath/ipath_sdma.c b/drivers/infiniband/hw/ipath/ipath_sdma.c
new file mode 100644
index 000000000000..1974df7a9f78
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_sdma.c
@@ -0,0 +1,790 @@
1/*
2 * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/spinlock.h>
34
35#include "ipath_kernel.h"
36#include "ipath_verbs.h"
37#include "ipath_common.h"
38
39#define SDMA_DESCQ_SZ PAGE_SIZE /* 256 entries per 4KB page */
40
41static void vl15_watchdog_enq(struct ipath_devdata *dd)
42{
43 /* ipath_sdma_lock must already be held */
44 if (atomic_inc_return(&dd->ipath_sdma_vl15_count) == 1) {
45 unsigned long interval = (HZ + 19) / 20;
46 dd->ipath_sdma_vl15_timer.expires = jiffies + interval;
47 add_timer(&dd->ipath_sdma_vl15_timer);
48 }
49}
50
51static void vl15_watchdog_deq(struct ipath_devdata *dd)
52{
53 /* ipath_sdma_lock must already be held */
54 if (atomic_dec_return(&dd->ipath_sdma_vl15_count) != 0) {
55 unsigned long interval = (HZ + 19) / 20;
56 mod_timer(&dd->ipath_sdma_vl15_timer, jiffies + interval);
57 } else {
58 del_timer(&dd->ipath_sdma_vl15_timer);
59 }
60}
61
62static void vl15_watchdog_timeout(unsigned long opaque)
63{
64 struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
65
66 if (atomic_read(&dd->ipath_sdma_vl15_count) != 0) {
67 ipath_dbg("vl15 watchdog timeout - clearing\n");
68 ipath_cancel_sends(dd, 1);
69 ipath_hol_down(dd);
70 } else {
71 ipath_dbg("vl15 watchdog timeout - "
72 "condition already cleared\n");
73 }
74}
75
76static void unmap_desc(struct ipath_devdata *dd, unsigned head)
77{
78 __le64 *descqp = &dd->ipath_sdma_descq[head].qw[0];
79 u64 desc[2];
80 dma_addr_t addr;
81 size_t len;
82
83 desc[0] = le64_to_cpu(descqp[0]);
84 desc[1] = le64_to_cpu(descqp[1]);
85
86 addr = (desc[1] << 32) | (desc[0] >> 32);
87 len = (desc[0] >> 14) & (0x7ffULL << 2);
88 dma_unmap_single(&dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
89}
90
91/*
92 * ipath_sdma_lock should be locked before calling this.
93 */
94int ipath_sdma_make_progress(struct ipath_devdata *dd)
95{
96 struct list_head *lp = NULL;
97 struct ipath_sdma_txreq *txp = NULL;
98 u16 dmahead;
99 u16 start_idx = 0;
100 int progress = 0;
101
102 if (!list_empty(&dd->ipath_sdma_activelist)) {
103 lp = dd->ipath_sdma_activelist.next;
104 txp = list_entry(lp, struct ipath_sdma_txreq, list);
105 start_idx = txp->start_idx;
106 }
107
108 /*
109 * Read the SDMA head register in order to know that the
110 * interrupt clear has been written to the chip.
111 * Otherwise, we may not get an interrupt for the last
112 * descriptor in the queue.
113 */
114 dmahead = (u16)ipath_read_kreg32(dd, dd->ipath_kregs->kr_senddmahead);
115 /* sanity check return value for error handling (chip reset, etc.) */
116 if (dmahead >= dd->ipath_sdma_descq_cnt)
117 goto done;
118
119 while (dd->ipath_sdma_descq_head != dmahead) {
120 if (txp && txp->flags & IPATH_SDMA_TXREQ_F_FREEDESC &&
121 dd->ipath_sdma_descq_head == start_idx) {
122 unmap_desc(dd, dd->ipath_sdma_descq_head);
123 start_idx++;
124 if (start_idx == dd->ipath_sdma_descq_cnt)
125 start_idx = 0;
126 }
127
128 /* increment free count and head */
129 dd->ipath_sdma_descq_removed++;
130 if (++dd->ipath_sdma_descq_head == dd->ipath_sdma_descq_cnt)
131 dd->ipath_sdma_descq_head = 0;
132
133 if (txp && txp->next_descq_idx == dd->ipath_sdma_descq_head) {
134 /* move to notify list */
135 if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
136 vl15_watchdog_deq(dd);
137 list_move_tail(lp, &dd->ipath_sdma_notifylist);
138 if (!list_empty(&dd->ipath_sdma_activelist)) {
139 lp = dd->ipath_sdma_activelist.next;
140 txp = list_entry(lp, struct ipath_sdma_txreq,
141 list);
142 start_idx = txp->start_idx;
143 } else {
144 lp = NULL;
145 txp = NULL;
146 }
147 }
148 progress = 1;
149 }
150
151 if (progress)
152 tasklet_hi_schedule(&dd->ipath_sdma_notify_task);
153
154done:
155 return progress;
156}
157
158static void ipath_sdma_notify(struct ipath_devdata *dd, struct list_head *list)
159{
160 struct ipath_sdma_txreq *txp, *txp_next;
161
162 list_for_each_entry_safe(txp, txp_next, list, list) {
163 list_del_init(&txp->list);
164
165 if (txp->callback)
166 (*txp->callback)(txp->callback_cookie,
167 txp->callback_status);
168 }
169}
170
171static void sdma_notify_taskbody(struct ipath_devdata *dd)
172{
173 unsigned long flags;
174 struct list_head list;
175
176 INIT_LIST_HEAD(&list);
177
178 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
179
180 list_splice_init(&dd->ipath_sdma_notifylist, &list);
181
182 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
183
184 ipath_sdma_notify(dd, &list);
185
186 /*
187 * The IB verbs layer needs to see the callback before getting
188 * the call to ipath_ib_piobufavail() because the callback
189 * handles releasing resources the next send will need.
190 * Otherwise, we could do these calls in
191 * ipath_sdma_make_progress().
192 */
193 ipath_ib_piobufavail(dd->verbs_dev);
194}
195
196static void sdma_notify_task(unsigned long opaque)
197{
198 struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
199
200 if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
201 sdma_notify_taskbody(dd);
202}
203
204static void dump_sdma_state(struct ipath_devdata *dd)
205{
206 unsigned long reg;
207
208 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmastatus);
209 ipath_cdbg(VERBOSE, "kr_senddmastatus: 0x%016lx\n", reg);
210
211 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendctrl);
212 ipath_cdbg(VERBOSE, "kr_sendctrl: 0x%016lx\n", reg);
213
214 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask0);
215 ipath_cdbg(VERBOSE, "kr_senddmabufmask0: 0x%016lx\n", reg);
216
217 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask1);
218 ipath_cdbg(VERBOSE, "kr_senddmabufmask1: 0x%016lx\n", reg);
219
220 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask2);
221 ipath_cdbg(VERBOSE, "kr_senddmabufmask2: 0x%016lx\n", reg);
222
223 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmatail);
224 ipath_cdbg(VERBOSE, "kr_senddmatail: 0x%016lx\n", reg);
225
226 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmahead);
227 ipath_cdbg(VERBOSE, "kr_senddmahead: 0x%016lx\n", reg);
228}
229
230static void sdma_abort_task(unsigned long opaque)
231{
232 struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
233 u64 status;
234 unsigned long flags;
235
236 if (test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
237 return;
238
239 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
240
241 status = dd->ipath_sdma_status & IPATH_SDMA_ABORT_MASK;
242
243 /* nothing to do */
244 if (status == IPATH_SDMA_ABORT_NONE)
245 goto unlock;
246
247 /* ipath_sdma_abort() is done, waiting for interrupt */
248 if (status == IPATH_SDMA_ABORT_DISARMED) {
249 if (jiffies < dd->ipath_sdma_abort_intr_timeout)
250 goto resched_noprint;
251 /* give up, intr got lost somewhere */
252 ipath_dbg("give up waiting for SDMADISABLED intr\n");
253 __set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
254 status = IPATH_SDMA_ABORT_ABORTED;
255 }
256
257 /* everything is stopped, time to clean up and restart */
258 if (status == IPATH_SDMA_ABORT_ABORTED) {
259 struct ipath_sdma_txreq *txp, *txpnext;
260 u64 hwstatus;
261 int notify = 0;
262
263 hwstatus = ipath_read_kreg64(dd,
264 dd->ipath_kregs->kr_senddmastatus);
265
266 if (/* ScoreBoardDrainInProg */
267 test_bit(63, &hwstatus) ||
268 /* AbortInProg */
269 test_bit(62, &hwstatus) ||
270 /* InternalSDmaEnable */
271 test_bit(61, &hwstatus) ||
272 /* ScbEmpty */
273 !test_bit(30, &hwstatus)) {
274 if (dd->ipath_sdma_reset_wait > 0) {
275 /* not done shutting down sdma */
276 --dd->ipath_sdma_reset_wait;
277 goto resched;
278 }
279 ipath_cdbg(VERBOSE, "gave up waiting for quiescent "
280 "status after SDMA reset, continuing\n");
281 dump_sdma_state(dd);
282 }
283
284 /* dequeue all "sent" requests */
285 list_for_each_entry_safe(txp, txpnext,
286 &dd->ipath_sdma_activelist, list) {
287 txp->callback_status = IPATH_SDMA_TXREQ_S_ABORTED;
288 if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
289 vl15_watchdog_deq(dd);
290 list_move_tail(&txp->list, &dd->ipath_sdma_notifylist);
291 notify = 1;
292 }
293 if (notify)
294 tasklet_hi_schedule(&dd->ipath_sdma_notify_task);
295
296 /* reset our notion of head and tail */
297 dd->ipath_sdma_descq_tail = 0;
298 dd->ipath_sdma_descq_head = 0;
299 dd->ipath_sdma_head_dma[0] = 0;
300 dd->ipath_sdma_generation = 0;
301 dd->ipath_sdma_descq_removed = dd->ipath_sdma_descq_added;
302
303 /* Reset SendDmaLenGen */
304 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen,
305 (u64) dd->ipath_sdma_descq_cnt | (1ULL << 18));
306
307 /* done with sdma state for a bit */
308 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
309
310 /*
311 * Don't restart sdma here. Wait until link is up to ACTIVE.
312 * VL15 MADs used to bring the link up use PIO, and multiple
313 * link transitions otherwise cause the sdma engine to be
314 * stopped and started multiple times.
315 * The disable is done here, including the shadow, so the
316 * state is kept consistent.
317 * See ipath_restart_sdma() for the actual starting of sdma.
318 */
319 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
320 dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
321 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
322 dd->ipath_sendctrl);
323 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
324 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
325
326 /* make sure I see next message */
327 dd->ipath_sdma_abort_jiffies = 0;
328
329 goto done;
330 }
331
332resched:
333 /*
334 * for now, keep spinning
335 * JAG - this is bad to just have default be a loop without
336 * state change
337 */
338 if (jiffies > dd->ipath_sdma_abort_jiffies) {
339 ipath_dbg("looping with status 0x%016llx\n",
340 dd->ipath_sdma_status);
341 dd->ipath_sdma_abort_jiffies = jiffies + 5 * HZ;
342 }
343resched_noprint:
344 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
345 if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
346 tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
347 return;
348
349unlock:
350 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
351done:
352 return;
353}
354
355/*
356 * This is called from interrupt context.
357 */
358void ipath_sdma_intr(struct ipath_devdata *dd)
359{
360 unsigned long flags;
361
362 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
363
364 (void) ipath_sdma_make_progress(dd);
365
366 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
367}
368
369static int alloc_sdma(struct ipath_devdata *dd)
370{
371 int ret = 0;
372
373 /* Allocate memory for SendDMA descriptor FIFO */
374 dd->ipath_sdma_descq = dma_alloc_coherent(&dd->pcidev->dev,
375 SDMA_DESCQ_SZ, &dd->ipath_sdma_descq_phys, GFP_KERNEL);
376
377 if (!dd->ipath_sdma_descq) {
378 ipath_dev_err(dd, "failed to allocate SendDMA descriptor "
379 "FIFO memory\n");
380 ret = -ENOMEM;
381 goto done;
382 }
383
384 dd->ipath_sdma_descq_cnt =
385 SDMA_DESCQ_SZ / sizeof(struct ipath_sdma_desc);
386
387 /* Allocate memory for DMA of head register to memory */
388 dd->ipath_sdma_head_dma = dma_alloc_coherent(&dd->pcidev->dev,
389 PAGE_SIZE, &dd->ipath_sdma_head_phys, GFP_KERNEL);
390 if (!dd->ipath_sdma_head_dma) {
391 ipath_dev_err(dd, "failed to allocate SendDMA head memory\n");
392 ret = -ENOMEM;
393 goto cleanup_descq;
394 }
395 dd->ipath_sdma_head_dma[0] = 0;
396
397 init_timer(&dd->ipath_sdma_vl15_timer);
398 dd->ipath_sdma_vl15_timer.function = vl15_watchdog_timeout;
399 dd->ipath_sdma_vl15_timer.data = (unsigned long)dd;
400 atomic_set(&dd->ipath_sdma_vl15_count, 0);
401
402 goto done;
403
404cleanup_descq:
405 dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ,
406 (void *)dd->ipath_sdma_descq, dd->ipath_sdma_descq_phys);
407 dd->ipath_sdma_descq = NULL;
408 dd->ipath_sdma_descq_phys = 0;
409done:
410 return ret;
411}
412
413int setup_sdma(struct ipath_devdata *dd)
414{
415 int ret = 0;
416 unsigned i, n;
417 u64 tmp64;
418 u64 senddmabufmask[3] = { 0 };
419 unsigned long flags;
420
421 ret = alloc_sdma(dd);
422 if (ret)
423 goto done;
424
425 if (!dd->ipath_sdma_descq) {
426 ipath_dev_err(dd, "SendDMA memory not allocated\n");
427 goto done;
428 }
429
430 dd->ipath_sdma_status = 0;
431 dd->ipath_sdma_abort_jiffies = 0;
432 dd->ipath_sdma_generation = 0;
433 dd->ipath_sdma_descq_tail = 0;
434 dd->ipath_sdma_descq_head = 0;
435 dd->ipath_sdma_descq_removed = 0;
436 dd->ipath_sdma_descq_added = 0;
437
438 /* Set SendDmaBase */
439 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase,
440 dd->ipath_sdma_descq_phys);
441 /* Set SendDmaLenGen */
442 tmp64 = dd->ipath_sdma_descq_cnt;
443 tmp64 |= 1<<18; /* enable generation checking */
444 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, tmp64);
445 /* Set SendDmaTail */
446 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail,
447 dd->ipath_sdma_descq_tail);
448 /* Set SendDmaHeadAddr */
449 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr,
450 dd->ipath_sdma_head_phys);
451
452 /* Reserve all the former "kernel" piobufs */
453 n = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - dd->ipath_pioreserved;
454 for (i = dd->ipath_lastport_piobuf; i < n; ++i) {
455 unsigned word = i / 64;
456 unsigned bit = i & 63;
457 BUG_ON(word >= 3);
458 senddmabufmask[word] |= 1ULL << bit;
459 }
460 ipath_chg_pioavailkernel(dd, dd->ipath_lastport_piobuf,
461 n - dd->ipath_lastport_piobuf, 0);
462 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0,
463 senddmabufmask[0]);
464 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1,
465 senddmabufmask[1]);
466 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2,
467 senddmabufmask[2]);
468
469 INIT_LIST_HEAD(&dd->ipath_sdma_activelist);
470 INIT_LIST_HEAD(&dd->ipath_sdma_notifylist);
471
472 tasklet_init(&dd->ipath_sdma_notify_task, sdma_notify_task,
473 (unsigned long) dd);
474 tasklet_init(&dd->ipath_sdma_abort_task, sdma_abort_task,
475 (unsigned long) dd);
476
477 /*
478 * No use to turn on SDMA here, as link is probably not ACTIVE
479 * Just mark it RUNNING and enable the interrupt, and let the
480 * ipath_restart_sdma() on link transition to ACTIVE actually
481 * enable it.
482 */
483 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
484 dd->ipath_sendctrl |= INFINIPATH_S_SDMAINTENABLE;
485 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
486 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
487 __set_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status);
488 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
489
490done:
491 return ret;
492}
493
494void teardown_sdma(struct ipath_devdata *dd)
495{
496 struct ipath_sdma_txreq *txp, *txpnext;
497 unsigned long flags;
498 dma_addr_t sdma_head_phys = 0;
499 dma_addr_t sdma_descq_phys = 0;
500 void *sdma_descq = NULL;
501 void *sdma_head_dma = NULL;
502
503 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
504 __clear_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status);
505 __set_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
506 __set_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status);
507 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
508
509 tasklet_kill(&dd->ipath_sdma_abort_task);
510 tasklet_kill(&dd->ipath_sdma_notify_task);
511
512 /* turn off sdma */
513 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
514 dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
515 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
516 dd->ipath_sendctrl);
517 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
518 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
519
520 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
521 /* dequeue all "sent" requests */
522 list_for_each_entry_safe(txp, txpnext, &dd->ipath_sdma_activelist,
523 list) {
524 txp->callback_status = IPATH_SDMA_TXREQ_S_SHUTDOWN;
525 if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
526 vl15_watchdog_deq(dd);
527 list_move_tail(&txp->list, &dd->ipath_sdma_notifylist);
528 }
529 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
530
531 sdma_notify_taskbody(dd);
532
533 del_timer_sync(&dd->ipath_sdma_vl15_timer);
534
535 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
536
537 dd->ipath_sdma_abort_jiffies = 0;
538
539 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase, 0);
540 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, 0);
541 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, 0);
542 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr, 0);
543 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0, 0);
544 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1, 0);
545 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2, 0);
546
547 if (dd->ipath_sdma_head_dma) {
548 sdma_head_dma = (void *) dd->ipath_sdma_head_dma;
549 sdma_head_phys = dd->ipath_sdma_head_phys;
550 dd->ipath_sdma_head_dma = NULL;
551 dd->ipath_sdma_head_phys = 0;
552 }
553
554 if (dd->ipath_sdma_descq) {
555 sdma_descq = dd->ipath_sdma_descq;
556 sdma_descq_phys = dd->ipath_sdma_descq_phys;
557 dd->ipath_sdma_descq = NULL;
558 dd->ipath_sdma_descq_phys = 0;
559 }
560
561 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
562
563 if (sdma_head_dma)
564 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
565 sdma_head_dma, sdma_head_phys);
566
567 if (sdma_descq)
568 dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ,
569 sdma_descq, sdma_descq_phys);
570}
571
572/*
573 * [Re]start SDMA, if we use it, and it's not already OK.
574 * This is called on transition to link ACTIVE, either the first or
575 * subsequent times.
576 */
577void ipath_restart_sdma(struct ipath_devdata *dd)
578{
579 unsigned long flags;
580 int needed = 1;
581
582 if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA))
583 goto bail;
584
585 /*
586 * First, make sure we should, which is to say,
587 * check that we are "RUNNING" (not in teardown)
588 * and not "SHUTDOWN"
589 */
590 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
591 if (!test_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status)
592 || test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
593 needed = 0;
594 else {
595 __clear_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
596 __clear_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);
597 __clear_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
598 }
599 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
600 if (!needed) {
601 ipath_dbg("invalid attempt to restart SDMA, status 0x%016llx\n",
602 dd->ipath_sdma_status);
603 goto bail;
604 }
605 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
606 /*
607 * First clear, just to be safe. Enable is only done
608 * in chip on 0->1 transition
609 */
610 dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
611 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
612 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
613 dd->ipath_sendctrl |= INFINIPATH_S_SDMAENABLE;
614 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
615 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
616 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
617
618bail:
619 return;
620}
621
622static inline void make_sdma_desc(struct ipath_devdata *dd,
623 u64 *sdmadesc, u64 addr, u64 dwlen, u64 dwoffset)
624{
625 WARN_ON(addr & 3);
626 /* SDmaPhyAddr[47:32] */
627 sdmadesc[1] = addr >> 32;
628 /* SDmaPhyAddr[31:0] */
629 sdmadesc[0] = (addr & 0xfffffffcULL) << 32;
630 /* SDmaGeneration[1:0] */
631 sdmadesc[0] |= (dd->ipath_sdma_generation & 3ULL) << 30;
632 /* SDmaDwordCount[10:0] */
633 sdmadesc[0] |= (dwlen & 0x7ffULL) << 16;
634 /* SDmaBufOffset[12:2] */
635 sdmadesc[0] |= dwoffset & 0x7ffULL;
636}
637
638/*
639 * This function queues one IB packet onto the send DMA queue per call.
640 * The caller is responsible for checking:
641 * 1) The number of send DMA descriptor entries is less than the size of
642 * the descriptor queue.
643 * 2) The IB SGE addresses and lengths are 32-bit aligned
644 * (except possibly the last SGE's length)
645 * 3) The SGE addresses are suitable for passing to dma_map_single().
646 */
647int ipath_sdma_verbs_send(struct ipath_devdata *dd,
648 struct ipath_sge_state *ss, u32 dwords,
649 struct ipath_verbs_txreq *tx)
650{
651
652 unsigned long flags;
653 struct ipath_sge *sge;
654 int ret = 0;
655 u16 tail;
656 __le64 *descqp;
657 u64 sdmadesc[2];
658 u32 dwoffset;
659 dma_addr_t addr;
660
661 if ((tx->map_len + (dwords<<2)) > dd->ipath_ibmaxlen) {
662 ipath_dbg("packet size %X > ibmax %X, fail\n",
663 tx->map_len + (dwords<<2), dd->ipath_ibmaxlen);
664 ret = -EMSGSIZE;
665 goto fail;
666 }
667
668 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
669
670retry:
671 if (unlikely(test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status))) {
672 ret = -EBUSY;
673 goto unlock;
674 }
675
676 if (tx->txreq.sg_count > ipath_sdma_descq_freecnt(dd)) {
677 if (ipath_sdma_make_progress(dd))
678 goto retry;
679 ret = -ENOBUFS;
680 goto unlock;
681 }
682
683 addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
684 tx->map_len, DMA_TO_DEVICE);
685 if (dma_mapping_error(addr)) {
686 ret = -EIO;
687 goto unlock;
688 }
689
690 dwoffset = tx->map_len >> 2;
691 make_sdma_desc(dd, sdmadesc, (u64) addr, dwoffset, 0);
692
693 /* SDmaFirstDesc */
694 sdmadesc[0] |= 1ULL << 12;
695 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF)
696 sdmadesc[0] |= 1ULL << 14; /* SDmaUseLargeBuf */
697
698 /* write to the descq */
699 tail = dd->ipath_sdma_descq_tail;
700 descqp = &dd->ipath_sdma_descq[tail].qw[0];
701 *descqp++ = cpu_to_le64(sdmadesc[0]);
702 *descqp++ = cpu_to_le64(sdmadesc[1]);
703
704 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEDESC)
705 tx->txreq.start_idx = tail;
706
707 /* increment the tail */
708 if (++tail == dd->ipath_sdma_descq_cnt) {
709 tail = 0;
710 descqp = &dd->ipath_sdma_descq[0].qw[0];
711 ++dd->ipath_sdma_generation;
712 }
713
714 sge = &ss->sge;
715 while (dwords) {
716 u32 dw;
717 u32 len;
718
719 len = dwords << 2;
720 if (len > sge->length)
721 len = sge->length;
722 if (len > sge->sge_length)
723 len = sge->sge_length;
724 BUG_ON(len == 0);
725 dw = (len + 3) >> 2;
726 addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
727 DMA_TO_DEVICE);
728 make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
729 /* SDmaUseLargeBuf has to be set in every descriptor */
730 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF)
731 sdmadesc[0] |= 1ULL << 14;
732 /* write to the descq */
733 *descqp++ = cpu_to_le64(sdmadesc[0]);
734 *descqp++ = cpu_to_le64(sdmadesc[1]);
735
736 /* increment the tail */
737 if (++tail == dd->ipath_sdma_descq_cnt) {
738 tail = 0;
739 descqp = &dd->ipath_sdma_descq[0].qw[0];
740 ++dd->ipath_sdma_generation;
741 }
742 sge->vaddr += len;
743 sge->length -= len;
744 sge->sge_length -= len;
745 if (sge->sge_length == 0) {
746 if (--ss->num_sge)
747 *sge = *ss->sg_list++;
748 } else if (sge->length == 0 && sge->mr != NULL) {
749 if (++sge->n >= IPATH_SEGSZ) {
750 if (++sge->m >= sge->mr->mapsz)
751 break;
752 sge->n = 0;
753 }
754 sge->vaddr =
755 sge->mr->map[sge->m]->segs[sge->n].vaddr;
756 sge->length =
757 sge->mr->map[sge->m]->segs[sge->n].length;
758 }
759
760 dwoffset += dw;
761 dwords -= dw;
762 }
763
764 if (!tail)
765 descqp = &dd->ipath_sdma_descq[dd->ipath_sdma_descq_cnt].qw[0];
766 descqp -= 2;
767 /* SDmaLastDesc */
768 descqp[0] |= __constant_cpu_to_le64(1ULL << 11);
769 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_INTREQ) {
770 /* SDmaIntReq */
771 descqp[0] |= __constant_cpu_to_le64(1ULL << 15);
772 }
773
774 /* Commit writes to memory and advance the tail on the chip */
775 wmb();
776 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, tail);
777
778 tx->txreq.next_descq_idx = tail;
779 tx->txreq.callback_status = IPATH_SDMA_TXREQ_S_OK;
780 dd->ipath_sdma_descq_tail = tail;
781 dd->ipath_sdma_descq_added += tx->txreq.sg_count;
782 list_add_tail(&tx->txreq.list, &dd->ipath_sdma_activelist);
783 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_VL15)
784 vl15_watchdog_enq(dd);
785
786unlock:
787 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
788fail:
789 return ret;
790}
diff --git a/drivers/infiniband/hw/ipath/ipath_srq.c b/drivers/infiniband/hw/ipath/ipath_srq.c
index f772102e4713..e3d80ca84c1a 100644
--- a/drivers/infiniband/hw/ipath/ipath_srq.c
+++ b/drivers/infiniband/hw/ipath/ipath_srq.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -245,7 +245,8 @@ int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
245 sizeof(offset_addr)); 245 sizeof(offset_addr));
246 if (ret) 246 if (ret)
247 goto bail_free; 247 goto bail_free;
248 udata->outbuf = (void __user *) offset_addr; 248 udata->outbuf =
249 (void __user *) (unsigned long) offset_addr;
249 ret = ib_copy_to_udata(udata, &offset, 250 ret = ib_copy_to_udata(udata, &offset,
250 sizeof(offset)); 251 sizeof(offset));
251 if (ret) 252 if (ret)
diff --git a/drivers/infiniband/hw/ipath/ipath_stats.c b/drivers/infiniband/hw/ipath/ipath_stats.c
index d2725cd11bdc..c8e3d65f0de8 100644
--- a/drivers/infiniband/hw/ipath/ipath_stats.c
+++ b/drivers/infiniband/hw/ipath/ipath_stats.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -136,6 +136,7 @@ static void ipath_qcheck(struct ipath_devdata *dd)
136 struct ipath_portdata *pd = dd->ipath_pd[0]; 136 struct ipath_portdata *pd = dd->ipath_pd[0];
137 size_t blen = 0; 137 size_t blen = 0;
138 char buf[128]; 138 char buf[128];
139 u32 hdrqtail;
139 140
140 *buf = 0; 141 *buf = 0;
141 if (pd->port_hdrqfull != dd->ipath_p0_hdrqfull) { 142 if (pd->port_hdrqfull != dd->ipath_p0_hdrqfull) {
@@ -174,17 +175,18 @@ static void ipath_qcheck(struct ipath_devdata *dd)
174 if (blen) 175 if (blen)
175 ipath_dbg("%s\n", buf); 176 ipath_dbg("%s\n", buf);
176 177
177 if (pd->port_head != (u32) 178 hdrqtail = ipath_get_hdrqtail(pd);
178 le64_to_cpu(*dd->ipath_hdrqtailptr)) { 179 if (pd->port_head != hdrqtail) {
179 if (dd->ipath_lastport0rcv_cnt == 180 if (dd->ipath_lastport0rcv_cnt ==
180 ipath_stats.sps_port0pkts) { 181 ipath_stats.sps_port0pkts) {
181 ipath_cdbg(PKT, "missing rcv interrupts? " 182 ipath_cdbg(PKT, "missing rcv interrupts? "
182 "port0 hd=%llx tl=%x; port0pkts %llx\n", 183 "port0 hd=%x tl=%x; port0pkts %llx; write"
183 (unsigned long long) 184 " hd (w/intr)\n",
184 le64_to_cpu(*dd->ipath_hdrqtailptr), 185 pd->port_head, hdrqtail,
185 pd->port_head,
186 (unsigned long long) 186 (unsigned long long)
187 ipath_stats.sps_port0pkts); 187 ipath_stats.sps_port0pkts);
188 ipath_write_ureg(dd, ur_rcvhdrhead, hdrqtail |
189 dd->ipath_rhdrhead_intr_off, pd->port_port);
188 } 190 }
189 dd->ipath_lastport0rcv_cnt = ipath_stats.sps_port0pkts; 191 dd->ipath_lastport0rcv_cnt = ipath_stats.sps_port0pkts;
190 } 192 }
@@ -290,11 +292,11 @@ void ipath_get_faststats(unsigned long opaque)
290 && time_after(jiffies, dd->ipath_unmasktime)) { 292 && time_after(jiffies, dd->ipath_unmasktime)) {
291 char ebuf[256]; 293 char ebuf[256];
292 int iserr; 294 int iserr;
293 iserr = ipath_decode_err(ebuf, sizeof ebuf, 295 iserr = ipath_decode_err(dd, ebuf, sizeof ebuf,
294 dd->ipath_maskederrs); 296 dd->ipath_maskederrs);
295 if (dd->ipath_maskederrs & 297 if (dd->ipath_maskederrs &
296 ~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL | 298 ~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
297 INFINIPATH_E_PKTERRS )) 299 INFINIPATH_E_PKTERRS))
298 ipath_dev_err(dd, "Re-enabling masked errors " 300 ipath_dev_err(dd, "Re-enabling masked errors "
299 "(%s)\n", ebuf); 301 "(%s)\n", ebuf);
300 else { 302 else {
@@ -306,17 +308,18 @@ void ipath_get_faststats(unsigned long opaque)
306 * level. 308 * level.
307 */ 309 */
308 if (iserr) 310 if (iserr)
309 ipath_dbg("Re-enabling queue full errors (%s)\n", 311 ipath_dbg(
310 ebuf); 312 "Re-enabling queue full errors (%s)\n",
313 ebuf);
311 else 314 else
312 ipath_cdbg(ERRPKT, "Re-enabling packet" 315 ipath_cdbg(ERRPKT, "Re-enabling packet"
313 " problem interrupt (%s)\n", ebuf); 316 " problem interrupt (%s)\n", ebuf);
314 } 317 }
315 318
316 /* re-enable masked errors */ 319 /* re-enable masked errors */
317 dd->ipath_errormask |= dd->ipath_maskederrs; 320 dd->ipath_errormask |= dd->ipath_maskederrs;
318 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 321 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
319 dd->ipath_errormask); 322 dd->ipath_errormask);
320 dd->ipath_maskederrs = 0; 323 dd->ipath_maskederrs = 0;
321 } 324 }
322 325
diff --git a/drivers/infiniband/hw/ipath/ipath_sysfs.c b/drivers/infiniband/hw/ipath/ipath_sysfs.c
index 56dfc8a2344c..a6c8efbdc0c9 100644
--- a/drivers/infiniband/hw/ipath/ipath_sysfs.c
+++ b/drivers/infiniband/hw/ipath/ipath_sysfs.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -34,6 +34,7 @@
34#include <linux/ctype.h> 34#include <linux/ctype.h>
35 35
36#include "ipath_kernel.h" 36#include "ipath_kernel.h"
37#include "ipath_verbs.h"
37#include "ipath_common.h" 38#include "ipath_common.h"
38 39
39/** 40/**
@@ -163,6 +164,15 @@ static ssize_t show_boardversion(struct device *dev,
163 return scnprintf(buf, PAGE_SIZE, "%s", dd->ipath_boardversion); 164 return scnprintf(buf, PAGE_SIZE, "%s", dd->ipath_boardversion);
164} 165}
165 166
167static ssize_t show_localbus_info(struct device *dev,
168 struct device_attribute *attr,
169 char *buf)
170{
171 struct ipath_devdata *dd = dev_get_drvdata(dev);
172 /* The string printed here is already newline-terminated. */
173 return scnprintf(buf, PAGE_SIZE, "%s", dd->ipath_lbus_info);
174}
175
166static ssize_t show_lmc(struct device *dev, 176static ssize_t show_lmc(struct device *dev,
167 struct device_attribute *attr, 177 struct device_attribute *attr,
168 char *buf) 178 char *buf)
@@ -311,6 +321,8 @@ static ssize_t store_guid(struct device *dev,
311 321
312 dd->ipath_guid = new_guid; 322 dd->ipath_guid = new_guid;
313 dd->ipath_nguid = 1; 323 dd->ipath_nguid = 1;
324 if (dd->verbs_dev)
325 dd->verbs_dev->ibdev.node_guid = new_guid;
314 326
315 ret = strlen(buf); 327 ret = strlen(buf);
316 goto bail; 328 goto bail;
@@ -919,21 +931,21 @@ static ssize_t store_rx_polinv_enb(struct device *dev,
919 u16 val; 931 u16 val;
920 932
921 ret = ipath_parse_ushort(buf, &val); 933 ret = ipath_parse_ushort(buf, &val);
922 if (ret < 0 || val > 1) 934 if (ret >= 0 && val > 1) {
923 goto invalid; 935 ipath_dev_err(dd,
936 "attempt to set invalid Rx Polarity (enable)\n");
937 ret = -EINVAL;
938 goto bail;
939 }
924 940
925 r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_RXPOL_ENB, val); 941 r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_RXPOL_ENB, val);
926 if (r < 0) { 942 if (r < 0)
927 ret = r; 943 ret = r;
928 goto bail;
929 }
930 944
931 goto bail;
932invalid:
933 ipath_dev_err(dd, "attempt to set invalid Rx Polarity (enable)\n");
934bail: 945bail:
935 return ret; 946 return ret;
936} 947}
948
937/* 949/*
938 * Get/Set RX lane-reversal enable. 0=no, 1=yes. 950 * Get/Set RX lane-reversal enable. 0=no, 1=yes.
939 */ 951 */
@@ -988,6 +1000,75 @@ static struct attribute_group driver_attr_group = {
988 .attrs = driver_attributes 1000 .attrs = driver_attributes
989}; 1001};
990 1002
1003static ssize_t store_tempsense(struct device *dev,
1004 struct device_attribute *attr,
1005 const char *buf,
1006 size_t count)
1007{
1008 struct ipath_devdata *dd = dev_get_drvdata(dev);
1009 int ret, stat;
1010 u16 val;
1011
1012 ret = ipath_parse_ushort(buf, &val);
1013 if (ret <= 0) {
1014 ipath_dev_err(dd, "attempt to set invalid tempsense config\n");
1015 goto bail;
1016 }
1017 /* If anything but the highest limit, enable T_CRIT_A "interrupt" */
1018 stat = ipath_tempsense_write(dd, 9, (val == 0x7f7f) ? 0x80 : 0);
1019 if (stat) {
1020 ipath_dev_err(dd, "Unable to set tempsense config\n");
1021 ret = -1;
1022 goto bail;
1023 }
1024 stat = ipath_tempsense_write(dd, 0xB, (u8) (val & 0xFF));
1025 if (stat) {
1026 ipath_dev_err(dd, "Unable to set local Tcrit\n");
1027 ret = -1;
1028 goto bail;
1029 }
1030 stat = ipath_tempsense_write(dd, 0xD, (u8) (val >> 8));
1031 if (stat) {
1032 ipath_dev_err(dd, "Unable to set remote Tcrit\n");
1033 ret = -1;
1034 goto bail;
1035 }
1036
1037bail:
1038 return ret;
1039}
1040
1041/*
1042 * dump tempsense regs. in decimal, to ease shell-scripts.
1043 */
1044static ssize_t show_tempsense(struct device *dev,
1045 struct device_attribute *attr,
1046 char *buf)
1047{
1048 struct ipath_devdata *dd = dev_get_drvdata(dev);
1049 int ret;
1050 int idx;
1051 u8 regvals[8];
1052
1053 ret = -ENXIO;
1054 for (idx = 0; idx < 8; ++idx) {
1055 if (idx == 6)
1056 continue;
1057 ret = ipath_tempsense_read(dd, idx);
1058 if (ret < 0)
1059 break;
1060 regvals[idx] = ret;
1061 }
1062 if (idx == 8)
1063 ret = scnprintf(buf, PAGE_SIZE, "%d %d %02X %02X %d %d\n",
1064 *(signed char *)(regvals),
1065 *(signed char *)(regvals + 1),
1066 regvals[2], regvals[3],
1067 *(signed char *)(regvals + 5),
1068 *(signed char *)(regvals + 7));
1069 return ret;
1070}
1071
991struct attribute_group *ipath_driver_attr_groups[] = { 1072struct attribute_group *ipath_driver_attr_groups[] = {
992 &driver_attr_group, 1073 &driver_attr_group,
993 NULL, 1074 NULL,
@@ -1011,10 +1092,13 @@ static DEVICE_ATTR(unit, S_IRUGO, show_unit, NULL);
1011static DEVICE_ATTR(rx_pol_inv, S_IWUSR, NULL, store_rx_pol_inv); 1092static DEVICE_ATTR(rx_pol_inv, S_IWUSR, NULL, store_rx_pol_inv);
1012static DEVICE_ATTR(led_override, S_IWUSR, NULL, store_led_override); 1093static DEVICE_ATTR(led_override, S_IWUSR, NULL, store_led_override);
1013static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL); 1094static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
1095static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
1014static DEVICE_ATTR(jint_max_packets, S_IWUSR | S_IRUGO, 1096static DEVICE_ATTR(jint_max_packets, S_IWUSR | S_IRUGO,
1015 show_jint_max_packets, store_jint_max_packets); 1097 show_jint_max_packets, store_jint_max_packets);
1016static DEVICE_ATTR(jint_idle_ticks, S_IWUSR | S_IRUGO, 1098static DEVICE_ATTR(jint_idle_ticks, S_IWUSR | S_IRUGO,
1017 show_jint_idle_ticks, store_jint_idle_ticks); 1099 show_jint_idle_ticks, store_jint_idle_ticks);
1100static DEVICE_ATTR(tempsense, S_IWUSR | S_IRUGO,
1101 show_tempsense, store_tempsense);
1018 1102
1019static struct attribute *dev_attributes[] = { 1103static struct attribute *dev_attributes[] = {
1020 &dev_attr_guid.attr, 1104 &dev_attr_guid.attr,
@@ -1034,6 +1118,8 @@ static struct attribute *dev_attributes[] = {
1034 &dev_attr_rx_pol_inv.attr, 1118 &dev_attr_rx_pol_inv.attr,
1035 &dev_attr_led_override.attr, 1119 &dev_attr_led_override.attr,
1036 &dev_attr_logged_errors.attr, 1120 &dev_attr_logged_errors.attr,
1121 &dev_attr_tempsense.attr,
1122 &dev_attr_localbus_info.attr,
1037 NULL 1123 NULL
1038}; 1124};
1039 1125
diff --git a/drivers/infiniband/hw/ipath/ipath_uc.c b/drivers/infiniband/hw/ipath/ipath_uc.c
index 2dd8de20d221..bfe8926b5514 100644
--- a/drivers/infiniband/hw/ipath/ipath_uc.c
+++ b/drivers/infiniband/hw/ipath/ipath_uc.c
@@ -94,7 +94,7 @@ int ipath_make_uc_req(struct ipath_qp *qp)
94 qp->s_state = 94 qp->s_state =
95 OP(SEND_ONLY_WITH_IMMEDIATE); 95 OP(SEND_ONLY_WITH_IMMEDIATE);
96 /* Immediate data comes after the BTH */ 96 /* Immediate data comes after the BTH */
97 ohdr->u.imm_data = wqe->wr.imm_data; 97 ohdr->u.imm_data = wqe->wr.ex.imm_data;
98 hwords += 1; 98 hwords += 1;
99 } 99 }
100 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 100 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
@@ -123,7 +123,7 @@ int ipath_make_uc_req(struct ipath_qp *qp)
123 qp->s_state = 123 qp->s_state =
124 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE); 124 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
125 /* Immediate data comes after the RETH */ 125 /* Immediate data comes after the RETH */
126 ohdr->u.rc.imm_data = wqe->wr.imm_data; 126 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
127 hwords += 1; 127 hwords += 1;
128 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 128 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
129 bth0 |= 1 << 23; 129 bth0 |= 1 << 23;
@@ -152,7 +152,7 @@ int ipath_make_uc_req(struct ipath_qp *qp)
152 else { 152 else {
153 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); 153 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
154 /* Immediate data comes after the BTH */ 154 /* Immediate data comes after the BTH */
155 ohdr->u.imm_data = wqe->wr.imm_data; 155 ohdr->u.imm_data = wqe->wr.ex.imm_data;
156 hwords += 1; 156 hwords += 1;
157 } 157 }
158 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 158 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
@@ -177,7 +177,7 @@ int ipath_make_uc_req(struct ipath_qp *qp)
177 qp->s_state = 177 qp->s_state =
178 OP(RDMA_WRITE_LAST_WITH_IMMEDIATE); 178 OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
179 /* Immediate data comes after the BTH */ 179 /* Immediate data comes after the BTH */
180 ohdr->u.imm_data = wqe->wr.imm_data; 180 ohdr->u.imm_data = wqe->wr.ex.imm_data;
181 hwords += 1; 181 hwords += 1;
182 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 182 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
183 bth0 |= 1 << 23; 183 bth0 |= 1 << 23;
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c
index de67eed08ed0..8b6a261c89e3 100644
--- a/drivers/infiniband/hw/ipath/ipath_ud.c
+++ b/drivers/infiniband/hw/ipath/ipath_ud.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -95,7 +95,7 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_swqe *swqe)
95 95
96 if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) { 96 if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
97 wc.wc_flags = IB_WC_WITH_IMM; 97 wc.wc_flags = IB_WC_WITH_IMM;
98 wc.imm_data = swqe->wr.imm_data; 98 wc.imm_data = swqe->wr.ex.imm_data;
99 } else { 99 } else {
100 wc.wc_flags = 0; 100 wc.wc_flags = 0;
101 wc.imm_data = 0; 101 wc.imm_data = 0;
@@ -303,6 +303,7 @@ int ipath_make_ud_req(struct ipath_qp *qp)
303 qp->s_hdrwords = 7; 303 qp->s_hdrwords = 7;
304 qp->s_cur_size = wqe->length; 304 qp->s_cur_size = wqe->length;
305 qp->s_cur_sge = &qp->s_sge; 305 qp->s_cur_sge = &qp->s_sge;
306 qp->s_dmult = ah_attr->static_rate;
306 qp->s_wqe = wqe; 307 qp->s_wqe = wqe;
307 qp->s_sge.sge = wqe->sg_list[0]; 308 qp->s_sge.sge = wqe->sg_list[0];
308 qp->s_sge.sg_list = wqe->sg_list + 1; 309 qp->s_sge.sg_list = wqe->sg_list + 1;
@@ -326,7 +327,7 @@ int ipath_make_ud_req(struct ipath_qp *qp)
326 } 327 }
327 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { 328 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
328 qp->s_hdrwords++; 329 qp->s_hdrwords++;
329 ohdr->u.ud.imm_data = wqe->wr.imm_data; 330 ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
330 bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24; 331 bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
331 } else 332 } else
332 bth0 = IB_OPCODE_UD_SEND_ONLY << 24; 333 bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
diff --git a/drivers/infiniband/hw/ipath/ipath_user_sdma.c b/drivers/infiniband/hw/ipath/ipath_user_sdma.c
new file mode 100644
index 000000000000..86e016916cd1
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_user_sdma.c
@@ -0,0 +1,879 @@
1/*
2 * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/mm.h>
33#include <linux/types.h>
34#include <linux/device.h>
35#include <linux/dmapool.h>
36#include <linux/slab.h>
37#include <linux/list.h>
38#include <linux/highmem.h>
39#include <linux/io.h>
40#include <linux/uio.h>
41#include <linux/rbtree.h>
42#include <linux/spinlock.h>
43#include <linux/delay.h>
44
45#include "ipath_kernel.h"
46#include "ipath_user_sdma.h"
47
48/* minimum size of header */
49#define IPATH_USER_SDMA_MIN_HEADER_LENGTH 64
50/* expected size of headers (for dma_pool) */
51#define IPATH_USER_SDMA_EXP_HEADER_LENGTH 64
52/* length mask in PBC (lower 11 bits) */
53#define IPATH_PBC_LENGTH_MASK ((1 << 11) - 1)
54
55struct ipath_user_sdma_pkt {
56 u8 naddr; /* dimension of addr (1..3) ... */
57 u32 counter; /* sdma pkts queued counter for this entry */
58 u64 added; /* global descq number of entries */
59
60 struct {
61 u32 offset; /* offset for kvaddr, addr */
62 u32 length; /* length in page */
63 u8 put_page; /* should we put_page? */
64 u8 dma_mapped; /* is page dma_mapped? */
65 struct page *page; /* may be NULL (coherent mem) */
66 void *kvaddr; /* FIXME: only for pio hack */
67 dma_addr_t addr;
68 } addr[4]; /* max pages, any more and we coalesce */
69 struct list_head list; /* list element */
70};
71
72struct ipath_user_sdma_queue {
73 /*
74 * pkts sent to dma engine are queued on this
75 * list head. the type of the elements of this
76 * list are struct ipath_user_sdma_pkt...
77 */
78 struct list_head sent;
79
80 /* headers with expected length are allocated from here... */
81 char header_cache_name[64];
82 struct dma_pool *header_cache;
83
84 /* packets are allocated from the slab cache... */
85 char pkt_slab_name[64];
86 struct kmem_cache *pkt_slab;
87
88 /* as packets go on the queued queue, they are counted... */
89 u32 counter;
90 u32 sent_counter;
91
92 /* dma page table */
93 struct rb_root dma_pages_root;
94
95 /* protect everything above... */
96 struct mutex lock;
97};
98
99struct ipath_user_sdma_queue *
100ipath_user_sdma_queue_create(struct device *dev, int unit, int port, int sport)
101{
102 struct ipath_user_sdma_queue *pq =
103 kmalloc(sizeof(struct ipath_user_sdma_queue), GFP_KERNEL);
104
105 if (!pq)
106 goto done;
107
108 pq->counter = 0;
109 pq->sent_counter = 0;
110 INIT_LIST_HEAD(&pq->sent);
111
112 mutex_init(&pq->lock);
113
114 snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),
115 "ipath-user-sdma-pkts-%u-%02u.%02u", unit, port, sport);
116 pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name,
117 sizeof(struct ipath_user_sdma_pkt),
118 0, 0, NULL);
119
120 if (!pq->pkt_slab)
121 goto err_kfree;
122
123 snprintf(pq->header_cache_name, sizeof(pq->header_cache_name),
124 "ipath-user-sdma-headers-%u-%02u.%02u", unit, port, sport);
125 pq->header_cache = dma_pool_create(pq->header_cache_name,
126 dev,
127 IPATH_USER_SDMA_EXP_HEADER_LENGTH,
128 4, 0);
129 if (!pq->header_cache)
130 goto err_slab;
131
132 pq->dma_pages_root = RB_ROOT;
133
134 goto done;
135
136err_slab:
137 kmem_cache_destroy(pq->pkt_slab);
138err_kfree:
139 kfree(pq);
140 pq = NULL;
141
142done:
143 return pq;
144}
145
146static void ipath_user_sdma_init_frag(struct ipath_user_sdma_pkt *pkt,
147 int i, size_t offset, size_t len,
148 int put_page, int dma_mapped,
149 struct page *page,
150 void *kvaddr, dma_addr_t dma_addr)
151{
152 pkt->addr[i].offset = offset;
153 pkt->addr[i].length = len;
154 pkt->addr[i].put_page = put_page;
155 pkt->addr[i].dma_mapped = dma_mapped;
156 pkt->addr[i].page = page;
157 pkt->addr[i].kvaddr = kvaddr;
158 pkt->addr[i].addr = dma_addr;
159}
160
161static void ipath_user_sdma_init_header(struct ipath_user_sdma_pkt *pkt,
162 u32 counter, size_t offset,
163 size_t len, int dma_mapped,
164 struct page *page,
165 void *kvaddr, dma_addr_t dma_addr)
166{
167 pkt->naddr = 1;
168 pkt->counter = counter;
169 ipath_user_sdma_init_frag(pkt, 0, offset, len, 0, dma_mapped, page,
170 kvaddr, dma_addr);
171}
172
173/* we've too many pages in the iovec, coalesce to a single page */
174static int ipath_user_sdma_coalesce(const struct ipath_devdata *dd,
175 struct ipath_user_sdma_pkt *pkt,
176 const struct iovec *iov,
177 unsigned long niov) {
178 int ret = 0;
179 struct page *page = alloc_page(GFP_KERNEL);
180 void *mpage_save;
181 char *mpage;
182 int i;
183 int len = 0;
184 dma_addr_t dma_addr;
185
186 if (!page) {
187 ret = -ENOMEM;
188 goto done;
189 }
190
191 mpage = kmap(page);
192 mpage_save = mpage;
193 for (i = 0; i < niov; i++) {
194 int cfur;
195
196 cfur = copy_from_user(mpage,
197 iov[i].iov_base, iov[i].iov_len);
198 if (cfur) {
199 ret = -EFAULT;
200 goto free_unmap;
201 }
202
203 mpage += iov[i].iov_len;
204 len += iov[i].iov_len;
205 }
206
207 dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,
208 DMA_TO_DEVICE);
209 if (dma_mapping_error(dma_addr)) {
210 ret = -ENOMEM;
211 goto free_unmap;
212 }
213
214 ipath_user_sdma_init_frag(pkt, 1, 0, len, 0, 1, page, mpage_save,
215 dma_addr);
216 pkt->naddr = 2;
217
218 goto done;
219
220free_unmap:
221 kunmap(page);
222 __free_page(page);
223done:
224 return ret;
225}
226
227/* how many pages in this iovec element? */
228static int ipath_user_sdma_num_pages(const struct iovec *iov)
229{
230 const unsigned long addr = (unsigned long) iov->iov_base;
231 const unsigned long len = iov->iov_len;
232 const unsigned long spage = addr & PAGE_MASK;
233 const unsigned long epage = (addr + len - 1) & PAGE_MASK;
234
235 return 1 + ((epage - spage) >> PAGE_SHIFT);
236}
237
238/* truncate length to page boundry */
239static int ipath_user_sdma_page_length(unsigned long addr, unsigned long len)
240{
241 const unsigned long offset = addr & ~PAGE_MASK;
242
243 return ((offset + len) > PAGE_SIZE) ? (PAGE_SIZE - offset) : len;
244}
245
246static void ipath_user_sdma_free_pkt_frag(struct device *dev,
247 struct ipath_user_sdma_queue *pq,
248 struct ipath_user_sdma_pkt *pkt,
249 int frag)
250{
251 const int i = frag;
252
253 if (pkt->addr[i].page) {
254 if (pkt->addr[i].dma_mapped)
255 dma_unmap_page(dev,
256 pkt->addr[i].addr,
257 pkt->addr[i].length,
258 DMA_TO_DEVICE);
259
260 if (pkt->addr[i].kvaddr)
261 kunmap(pkt->addr[i].page);
262
263 if (pkt->addr[i].put_page)
264 put_page(pkt->addr[i].page);
265 else
266 __free_page(pkt->addr[i].page);
267 } else if (pkt->addr[i].kvaddr)
268 /* free coherent mem from cache... */
269 dma_pool_free(pq->header_cache,
270 pkt->addr[i].kvaddr, pkt->addr[i].addr);
271}
272
273/* return number of pages pinned... */
274static int ipath_user_sdma_pin_pages(const struct ipath_devdata *dd,
275 struct ipath_user_sdma_pkt *pkt,
276 unsigned long addr, int tlen, int npages)
277{
278 struct page *pages[2];
279 int j;
280 int ret;
281
282 ret = get_user_pages(current, current->mm, addr,
283 npages, 0, 1, pages, NULL);
284
285 if (ret != npages) {
286 int i;
287
288 for (i = 0; i < ret; i++)
289 put_page(pages[i]);
290
291 ret = -ENOMEM;
292 goto done;
293 }
294
295 for (j = 0; j < npages; j++) {
296 /* map the pages... */
297 const int flen =
298 ipath_user_sdma_page_length(addr, tlen);
299 dma_addr_t dma_addr =
300 dma_map_page(&dd->pcidev->dev,
301 pages[j], 0, flen, DMA_TO_DEVICE);
302 unsigned long fofs = addr & ~PAGE_MASK;
303
304 if (dma_mapping_error(dma_addr)) {
305 ret = -ENOMEM;
306 goto done;
307 }
308
309 ipath_user_sdma_init_frag(pkt, pkt->naddr, fofs, flen, 1, 1,
310 pages[j], kmap(pages[j]),
311 dma_addr);
312
313 pkt->naddr++;
314 addr += flen;
315 tlen -= flen;
316 }
317
318done:
319 return ret;
320}
321
322static int ipath_user_sdma_pin_pkt(const struct ipath_devdata *dd,
323 struct ipath_user_sdma_queue *pq,
324 struct ipath_user_sdma_pkt *pkt,
325 const struct iovec *iov,
326 unsigned long niov)
327{
328 int ret = 0;
329 unsigned long idx;
330
331 for (idx = 0; idx < niov; idx++) {
332 const int npages = ipath_user_sdma_num_pages(iov + idx);
333 const unsigned long addr = (unsigned long) iov[idx].iov_base;
334
335 ret = ipath_user_sdma_pin_pages(dd, pkt,
336 addr, iov[idx].iov_len,
337 npages);
338 if (ret < 0)
339 goto free_pkt;
340 }
341
342 goto done;
343
344free_pkt:
345 for (idx = 0; idx < pkt->naddr; idx++)
346 ipath_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
347
348done:
349 return ret;
350}
351
352static int ipath_user_sdma_init_payload(const struct ipath_devdata *dd,
353 struct ipath_user_sdma_queue *pq,
354 struct ipath_user_sdma_pkt *pkt,
355 const struct iovec *iov,
356 unsigned long niov, int npages)
357{
358 int ret = 0;
359
360 if (npages >= ARRAY_SIZE(pkt->addr))
361 ret = ipath_user_sdma_coalesce(dd, pkt, iov, niov);
362 else
363 ret = ipath_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
364
365 return ret;
366}
367
368/* free a packet list -- return counter value of last packet */
369static void ipath_user_sdma_free_pkt_list(struct device *dev,
370 struct ipath_user_sdma_queue *pq,
371 struct list_head *list)
372{
373 struct ipath_user_sdma_pkt *pkt, *pkt_next;
374
375 list_for_each_entry_safe(pkt, pkt_next, list, list) {
376 int i;
377
378 for (i = 0; i < pkt->naddr; i++)
379 ipath_user_sdma_free_pkt_frag(dev, pq, pkt, i);
380
381 kmem_cache_free(pq->pkt_slab, pkt);
382 }
383}
384
385/*
386 * copy headers, coalesce etc -- pq->lock must be held
387 *
388 * we queue all the packets to list, returning the
389 * number of bytes total. list must be empty initially,
390 * as, if there is an error we clean it...
391 */
392static int ipath_user_sdma_queue_pkts(const struct ipath_devdata *dd,
393 struct ipath_user_sdma_queue *pq,
394 struct list_head *list,
395 const struct iovec *iov,
396 unsigned long niov,
397 int maxpkts)
398{
399 unsigned long idx = 0;
400 int ret = 0;
401 int npkts = 0;
402 struct page *page = NULL;
403 __le32 *pbc;
404 dma_addr_t dma_addr;
405 struct ipath_user_sdma_pkt *pkt = NULL;
406 size_t len;
407 size_t nw;
408 u32 counter = pq->counter;
409 int dma_mapped = 0;
410
411 while (idx < niov && npkts < maxpkts) {
412 const unsigned long addr = (unsigned long) iov[idx].iov_base;
413 const unsigned long idx_save = idx;
414 unsigned pktnw;
415 unsigned pktnwc;
416 int nfrags = 0;
417 int npages = 0;
418 int cfur;
419
420 dma_mapped = 0;
421 len = iov[idx].iov_len;
422 nw = len >> 2;
423 page = NULL;
424
425 pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
426 if (!pkt) {
427 ret = -ENOMEM;
428 goto free_list;
429 }
430
431 if (len < IPATH_USER_SDMA_MIN_HEADER_LENGTH ||
432 len > PAGE_SIZE || len & 3 || addr & 3) {
433 ret = -EINVAL;
434 goto free_pkt;
435 }
436
437 if (len == IPATH_USER_SDMA_EXP_HEADER_LENGTH)
438 pbc = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
439 &dma_addr);
440 else
441 pbc = NULL;
442
443 if (!pbc) {
444 page = alloc_page(GFP_KERNEL);
445 if (!page) {
446 ret = -ENOMEM;
447 goto free_pkt;
448 }
449 pbc = kmap(page);
450 }
451
452 cfur = copy_from_user(pbc, iov[idx].iov_base, len);
453 if (cfur) {
454 ret = -EFAULT;
455 goto free_pbc;
456 }
457
458 /*
459 * this assignment is a bit strange. it's because the
460 * the pbc counts the number of 32 bit words in the full
461 * packet _except_ the first word of the pbc itself...
462 */
463 pktnwc = nw - 1;
464
465 /*
466 * pktnw computation yields the number of 32 bit words
467 * that the caller has indicated in the PBC. note that
468 * this is one less than the total number of words that
469 * goes to the send DMA engine as the first 32 bit word
470 * of the PBC itself is not counted. Armed with this count,
471 * we can verify that the packet is consistent with the
472 * iovec lengths.
473 */
474 pktnw = le32_to_cpu(*pbc) & IPATH_PBC_LENGTH_MASK;
475 if (pktnw < pktnwc || pktnw > pktnwc + (PAGE_SIZE >> 2)) {
476 ret = -EINVAL;
477 goto free_pbc;
478 }
479
480
481 idx++;
482 while (pktnwc < pktnw && idx < niov) {
483 const size_t slen = iov[idx].iov_len;
484 const unsigned long faddr =
485 (unsigned long) iov[idx].iov_base;
486
487 if (slen & 3 || faddr & 3 || !slen ||
488 slen > PAGE_SIZE) {
489 ret = -EINVAL;
490 goto free_pbc;
491 }
492
493 npages++;
494 if ((faddr & PAGE_MASK) !=
495 ((faddr + slen - 1) & PAGE_MASK))
496 npages++;
497
498 pktnwc += slen >> 2;
499 idx++;
500 nfrags++;
501 }
502
503 if (pktnwc != pktnw) {
504 ret = -EINVAL;
505 goto free_pbc;
506 }
507
508 if (page) {
509 dma_addr = dma_map_page(&dd->pcidev->dev,
510 page, 0, len, DMA_TO_DEVICE);
511 if (dma_mapping_error(dma_addr)) {
512 ret = -ENOMEM;
513 goto free_pbc;
514 }
515
516 dma_mapped = 1;
517 }
518
519 ipath_user_sdma_init_header(pkt, counter, 0, len, dma_mapped,
520 page, pbc, dma_addr);
521
522 if (nfrags) {
523 ret = ipath_user_sdma_init_payload(dd, pq, pkt,
524 iov + idx_save + 1,
525 nfrags, npages);
526 if (ret < 0)
527 goto free_pbc_dma;
528 }
529
530 counter++;
531 npkts++;
532
533 list_add_tail(&pkt->list, list);
534 }
535
536 ret = idx;
537 goto done;
538
539free_pbc_dma:
540 if (dma_mapped)
541 dma_unmap_page(&dd->pcidev->dev, dma_addr, len, DMA_TO_DEVICE);
542free_pbc:
543 if (page) {
544 kunmap(page);
545 __free_page(page);
546 } else
547 dma_pool_free(pq->header_cache, pbc, dma_addr);
548free_pkt:
549 kmem_cache_free(pq->pkt_slab, pkt);
550free_list:
551 ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
552done:
553 return ret;
554}
555
556static void ipath_user_sdma_set_complete_counter(struct ipath_user_sdma_queue *pq,
557 u32 c)
558{
559 pq->sent_counter = c;
560}
561
562/* try to clean out queue -- needs pq->lock */
563static int ipath_user_sdma_queue_clean(const struct ipath_devdata *dd,
564 struct ipath_user_sdma_queue *pq)
565{
566 struct list_head free_list;
567 struct ipath_user_sdma_pkt *pkt;
568 struct ipath_user_sdma_pkt *pkt_prev;
569 int ret = 0;
570
571 INIT_LIST_HEAD(&free_list);
572
573 list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
574 s64 descd = dd->ipath_sdma_descq_removed - pkt->added;
575
576 if (descd < 0)
577 break;
578
579 list_move_tail(&pkt->list, &free_list);
580
581 /* one more packet cleaned */
582 ret++;
583 }
584
585 if (!list_empty(&free_list)) {
586 u32 counter;
587
588 pkt = list_entry(free_list.prev,
589 struct ipath_user_sdma_pkt, list);
590 counter = pkt->counter;
591
592 ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
593 ipath_user_sdma_set_complete_counter(pq, counter);
594 }
595
596 return ret;
597}
598
599void ipath_user_sdma_queue_destroy(struct ipath_user_sdma_queue *pq)
600{
601 if (!pq)
602 return;
603
604 kmem_cache_destroy(pq->pkt_slab);
605 dma_pool_destroy(pq->header_cache);
606 kfree(pq);
607}
608
609/* clean descriptor queue, returns > 0 if some elements cleaned */
610static int ipath_user_sdma_hwqueue_clean(struct ipath_devdata *dd)
611{
612 int ret;
613 unsigned long flags;
614
615 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
616 ret = ipath_sdma_make_progress(dd);
617 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
618
619 return ret;
620}
621
622/* we're in close, drain packets so that we can cleanup successfully... */
623void ipath_user_sdma_queue_drain(struct ipath_devdata *dd,
624 struct ipath_user_sdma_queue *pq)
625{
626 int i;
627
628 if (!pq)
629 return;
630
631 for (i = 0; i < 100; i++) {
632 mutex_lock(&pq->lock);
633 if (list_empty(&pq->sent)) {
634 mutex_unlock(&pq->lock);
635 break;
636 }
637 ipath_user_sdma_hwqueue_clean(dd);
638 ipath_user_sdma_queue_clean(dd, pq);
639 mutex_unlock(&pq->lock);
640 msleep(10);
641 }
642
643 if (!list_empty(&pq->sent)) {
644 struct list_head free_list;
645
646 printk(KERN_INFO "drain: lists not empty: forcing!\n");
647 INIT_LIST_HEAD(&free_list);
648 mutex_lock(&pq->lock);
649 list_splice_init(&pq->sent, &free_list);
650 ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
651 mutex_unlock(&pq->lock);
652 }
653}
654
655static inline __le64 ipath_sdma_make_desc0(struct ipath_devdata *dd,
656 u64 addr, u64 dwlen, u64 dwoffset)
657{
658 return cpu_to_le64(/* SDmaPhyAddr[31:0] */
659 ((addr & 0xfffffffcULL) << 32) |
660 /* SDmaGeneration[1:0] */
661 ((dd->ipath_sdma_generation & 3ULL) << 30) |
662 /* SDmaDwordCount[10:0] */
663 ((dwlen & 0x7ffULL) << 16) |
664 /* SDmaBufOffset[12:2] */
665 (dwoffset & 0x7ffULL));
666}
667
668static inline __le64 ipath_sdma_make_first_desc0(__le64 descq)
669{
670 return descq | __constant_cpu_to_le64(1ULL << 12);
671}
672
673static inline __le64 ipath_sdma_make_last_desc0(__le64 descq)
674{
675 /* last */ /* dma head */
676 return descq | __constant_cpu_to_le64(1ULL << 11 | 1ULL << 13);
677}
678
679static inline __le64 ipath_sdma_make_desc1(u64 addr)
680{
681 /* SDmaPhyAddr[47:32] */
682 return cpu_to_le64(addr >> 32);
683}
684
685static void ipath_user_sdma_send_frag(struct ipath_devdata *dd,
686 struct ipath_user_sdma_pkt *pkt, int idx,
687 unsigned ofs, u16 tail)
688{
689 const u64 addr = (u64) pkt->addr[idx].addr +
690 (u64) pkt->addr[idx].offset;
691 const u64 dwlen = (u64) pkt->addr[idx].length / 4;
692 __le64 *descqp;
693 __le64 descq0;
694
695 descqp = &dd->ipath_sdma_descq[tail].qw[0];
696
697 descq0 = ipath_sdma_make_desc0(dd, addr, dwlen, ofs);
698 if (idx == 0)
699 descq0 = ipath_sdma_make_first_desc0(descq0);
700 if (idx == pkt->naddr - 1)
701 descq0 = ipath_sdma_make_last_desc0(descq0);
702
703 descqp[0] = descq0;
704 descqp[1] = ipath_sdma_make_desc1(addr);
705}
706
707/* pq->lock must be held, get packets on the wire... */
708static int ipath_user_sdma_push_pkts(struct ipath_devdata *dd,
709 struct ipath_user_sdma_queue *pq,
710 struct list_head *pktlist)
711{
712 int ret = 0;
713 unsigned long flags;
714 u16 tail;
715
716 if (list_empty(pktlist))
717 return 0;
718
719 if (unlikely(!(dd->ipath_flags & IPATH_LINKACTIVE)))
720 return -ECOMM;
721
722 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
723
724 if (unlikely(dd->ipath_sdma_status & IPATH_SDMA_ABORT_MASK)) {
725 ret = -ECOMM;
726 goto unlock;
727 }
728
729 tail = dd->ipath_sdma_descq_tail;
730 while (!list_empty(pktlist)) {
731 struct ipath_user_sdma_pkt *pkt =
732 list_entry(pktlist->next, struct ipath_user_sdma_pkt,
733 list);
734 int i;
735 unsigned ofs = 0;
736 u16 dtail = tail;
737
738 if (pkt->naddr > ipath_sdma_descq_freecnt(dd))
739 goto unlock_check_tail;
740
741 for (i = 0; i < pkt->naddr; i++) {
742 ipath_user_sdma_send_frag(dd, pkt, i, ofs, tail);
743 ofs += pkt->addr[i].length >> 2;
744
745 if (++tail == dd->ipath_sdma_descq_cnt) {
746 tail = 0;
747 ++dd->ipath_sdma_generation;
748 }
749 }
750
751 if ((ofs<<2) > dd->ipath_ibmaxlen) {
752 ipath_dbg("packet size %X > ibmax %X, fail\n",
753 ofs<<2, dd->ipath_ibmaxlen);
754 ret = -EMSGSIZE;
755 goto unlock;
756 }
757
758 /*
759 * if the packet is >= 2KB mtu equivalent, we have to use
760 * the large buffers, and have to mark each descriptor as
761 * part of a large buffer packet.
762 */
763 if (ofs >= IPATH_SMALLBUF_DWORDS) {
764 for (i = 0; i < pkt->naddr; i++) {
765 dd->ipath_sdma_descq[dtail].qw[0] |=
766 __constant_cpu_to_le64(1ULL << 14);
767 if (++dtail == dd->ipath_sdma_descq_cnt)
768 dtail = 0;
769 }
770 }
771
772 dd->ipath_sdma_descq_added += pkt->naddr;
773 pkt->added = dd->ipath_sdma_descq_added;
774 list_move_tail(&pkt->list, &pq->sent);
775 ret++;
776 }
777
778unlock_check_tail:
779 /* advance the tail on the chip if necessary */
780 if (dd->ipath_sdma_descq_tail != tail) {
781 wmb();
782 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, tail);
783 dd->ipath_sdma_descq_tail = tail;
784 }
785
786unlock:
787 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
788
789 return ret;
790}
791
792int ipath_user_sdma_writev(struct ipath_devdata *dd,
793 struct ipath_user_sdma_queue *pq,
794 const struct iovec *iov,
795 unsigned long dim)
796{
797 int ret = 0;
798 struct list_head list;
799 int npkts = 0;
800
801 INIT_LIST_HEAD(&list);
802
803 mutex_lock(&pq->lock);
804
805 if (dd->ipath_sdma_descq_added != dd->ipath_sdma_descq_removed) {
806 ipath_user_sdma_hwqueue_clean(dd);
807 ipath_user_sdma_queue_clean(dd, pq);
808 }
809
810 while (dim) {
811 const int mxp = 8;
812
813 down_write(&current->mm->mmap_sem);
814 ret = ipath_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
815 up_write(&current->mm->mmap_sem);
816
817 if (ret <= 0)
818 goto done_unlock;
819 else {
820 dim -= ret;
821 iov += ret;
822 }
823
824 /* force packets onto the sdma hw queue... */
825 if (!list_empty(&list)) {
826 /*
827 * lazily clean hw queue. the 4 is a guess of about
828 * how many sdma descriptors a packet will take (it
829 * doesn't have to be perfect).
830 */
831 if (ipath_sdma_descq_freecnt(dd) < ret * 4) {
832 ipath_user_sdma_hwqueue_clean(dd);
833 ipath_user_sdma_queue_clean(dd, pq);
834 }
835
836 ret = ipath_user_sdma_push_pkts(dd, pq, &list);
837 if (ret < 0)
838 goto done_unlock;
839 else {
840 npkts += ret;
841 pq->counter += ret;
842
843 if (!list_empty(&list))
844 goto done_unlock;
845 }
846 }
847 }
848
849done_unlock:
850 if (!list_empty(&list))
851 ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);
852 mutex_unlock(&pq->lock);
853
854 return (ret < 0) ? ret : npkts;
855}
856
857int ipath_user_sdma_make_progress(struct ipath_devdata *dd,
858 struct ipath_user_sdma_queue *pq)
859{
860 int ret = 0;
861
862 mutex_lock(&pq->lock);
863 ipath_user_sdma_hwqueue_clean(dd);
864 ret = ipath_user_sdma_queue_clean(dd, pq);
865 mutex_unlock(&pq->lock);
866
867 return ret;
868}
869
870u32 ipath_user_sdma_complete_counter(const struct ipath_user_sdma_queue *pq)
871{
872 return pq->sent_counter;
873}
874
875u32 ipath_user_sdma_inflight_counter(struct ipath_user_sdma_queue *pq)
876{
877 return pq->counter;
878}
879
diff --git a/drivers/infiniband/hw/ipath/ipath_user_sdma.h b/drivers/infiniband/hw/ipath/ipath_user_sdma.h
new file mode 100644
index 000000000000..e70946c1428c
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_user_sdma.h
@@ -0,0 +1,54 @@
1/*
2 * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/device.h>
33
34struct ipath_user_sdma_queue;
35
36struct ipath_user_sdma_queue *
37ipath_user_sdma_queue_create(struct device *dev, int unit, int port, int sport);
38void ipath_user_sdma_queue_destroy(struct ipath_user_sdma_queue *pq);
39
40int ipath_user_sdma_writev(struct ipath_devdata *dd,
41 struct ipath_user_sdma_queue *pq,
42 const struct iovec *iov,
43 unsigned long dim);
44
45int ipath_user_sdma_make_progress(struct ipath_devdata *dd,
46 struct ipath_user_sdma_queue *pq);
47
48int ipath_user_sdma_pkt_sent(const struct ipath_user_sdma_queue *pq,
49 u32 counter);
50void ipath_user_sdma_queue_drain(struct ipath_devdata *dd,
51 struct ipath_user_sdma_queue *pq);
52
53u32 ipath_user_sdma_complete_counter(const struct ipath_user_sdma_queue *pq);
54u32 ipath_user_sdma_inflight_counter(struct ipath_user_sdma_queue *pq);
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index 32d8f882e56c..320a6d018de7 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -242,6 +242,93 @@ static void ipath_flush_wqe(struct ipath_qp *qp, struct ib_send_wr *wr)
242 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1); 242 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
243} 243}
244 244
245/*
246 * Count the number of DMA descriptors needed to send length bytes of data.
247 * Don't modify the ipath_sge_state to get the count.
248 * Return zero if any of the segments is not aligned.
249 */
250static u32 ipath_count_sge(struct ipath_sge_state *ss, u32 length)
251{
252 struct ipath_sge *sg_list = ss->sg_list;
253 struct ipath_sge sge = ss->sge;
254 u8 num_sge = ss->num_sge;
255 u32 ndesc = 1; /* count the header */
256
257 while (length) {
258 u32 len = sge.length;
259
260 if (len > length)
261 len = length;
262 if (len > sge.sge_length)
263 len = sge.sge_length;
264 BUG_ON(len == 0);
265 if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
266 (len != length && (len & (sizeof(u32) - 1)))) {
267 ndesc = 0;
268 break;
269 }
270 ndesc++;
271 sge.vaddr += len;
272 sge.length -= len;
273 sge.sge_length -= len;
274 if (sge.sge_length == 0) {
275 if (--num_sge)
276 sge = *sg_list++;
277 } else if (sge.length == 0 && sge.mr != NULL) {
278 if (++sge.n >= IPATH_SEGSZ) {
279 if (++sge.m >= sge.mr->mapsz)
280 break;
281 sge.n = 0;
282 }
283 sge.vaddr =
284 sge.mr->map[sge.m]->segs[sge.n].vaddr;
285 sge.length =
286 sge.mr->map[sge.m]->segs[sge.n].length;
287 }
288 length -= len;
289 }
290 return ndesc;
291}
292
293/*
294 * Copy from the SGEs to the data buffer.
295 */
296static void ipath_copy_from_sge(void *data, struct ipath_sge_state *ss,
297 u32 length)
298{
299 struct ipath_sge *sge = &ss->sge;
300
301 while (length) {
302 u32 len = sge->length;
303
304 if (len > length)
305 len = length;
306 if (len > sge->sge_length)
307 len = sge->sge_length;
308 BUG_ON(len == 0);
309 memcpy(data, sge->vaddr, len);
310 sge->vaddr += len;
311 sge->length -= len;
312 sge->sge_length -= len;
313 if (sge->sge_length == 0) {
314 if (--ss->num_sge)
315 *sge = *ss->sg_list++;
316 } else if (sge->length == 0 && sge->mr != NULL) {
317 if (++sge->n >= IPATH_SEGSZ) {
318 if (++sge->m >= sge->mr->mapsz)
319 break;
320 sge->n = 0;
321 }
322 sge->vaddr =
323 sge->mr->map[sge->m]->segs[sge->n].vaddr;
324 sge->length =
325 sge->mr->map[sge->m]->segs[sge->n].length;
326 }
327 data += len;
328 length -= len;
329 }
330}
331
245/** 332/**
246 * ipath_post_one_send - post one RC, UC, or UD send work request 333 * ipath_post_one_send - post one RC, UC, or UD send work request
247 * @qp: the QP to post on 334 * @qp: the QP to post on
@@ -866,27 +953,257 @@ static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
866 __raw_writel(last, piobuf); 953 __raw_writel(last, piobuf);
867} 954}
868 955
869static int ipath_verbs_send_pio(struct ipath_qp *qp, u32 *hdr, u32 hdrwords, 956/*
957 * Convert IB rate to delay multiplier.
958 */
959unsigned ipath_ib_rate_to_mult(enum ib_rate rate)
960{
961 switch (rate) {
962 case IB_RATE_2_5_GBPS: return 8;
963 case IB_RATE_5_GBPS: return 4;
964 case IB_RATE_10_GBPS: return 2;
965 case IB_RATE_20_GBPS: return 1;
966 default: return 0;
967 }
968}
969
970/*
971 * Convert delay multiplier to IB rate
972 */
973static enum ib_rate ipath_mult_to_ib_rate(unsigned mult)
974{
975 switch (mult) {
976 case 8: return IB_RATE_2_5_GBPS;
977 case 4: return IB_RATE_5_GBPS;
978 case 2: return IB_RATE_10_GBPS;
979 case 1: return IB_RATE_20_GBPS;
980 default: return IB_RATE_PORT_CURRENT;
981 }
982}
983
984static inline struct ipath_verbs_txreq *get_txreq(struct ipath_ibdev *dev)
985{
986 struct ipath_verbs_txreq *tx = NULL;
987 unsigned long flags;
988
989 spin_lock_irqsave(&dev->pending_lock, flags);
990 if (!list_empty(&dev->txreq_free)) {
991 struct list_head *l = dev->txreq_free.next;
992
993 list_del(l);
994 tx = list_entry(l, struct ipath_verbs_txreq, txreq.list);
995 }
996 spin_unlock_irqrestore(&dev->pending_lock, flags);
997 return tx;
998}
999
1000static inline void put_txreq(struct ipath_ibdev *dev,
1001 struct ipath_verbs_txreq *tx)
1002{
1003 unsigned long flags;
1004
1005 spin_lock_irqsave(&dev->pending_lock, flags);
1006 list_add(&tx->txreq.list, &dev->txreq_free);
1007 spin_unlock_irqrestore(&dev->pending_lock, flags);
1008}
1009
1010static void sdma_complete(void *cookie, int status)
1011{
1012 struct ipath_verbs_txreq *tx = cookie;
1013 struct ipath_qp *qp = tx->qp;
1014 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
1015
1016 /* Generate a completion queue entry if needed */
1017 if (qp->ibqp.qp_type != IB_QPT_RC && tx->wqe) {
1018 enum ib_wc_status ibs = status == IPATH_SDMA_TXREQ_S_OK ?
1019 IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR;
1020
1021 ipath_send_complete(qp, tx->wqe, ibs);
1022 }
1023
1024 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF)
1025 kfree(tx->txreq.map_addr);
1026 put_txreq(dev, tx);
1027
1028 if (atomic_dec_and_test(&qp->refcount))
1029 wake_up(&qp->wait);
1030}
1031
1032/*
1033 * Compute the number of clock cycles of delay before sending the next packet.
1034 * The multipliers reflect the number of clocks for the fastest rate so
1035 * one tick at 4xDDR is 8 ticks at 1xSDR.
1036 * If the destination port will take longer to receive a packet than
1037 * the outgoing link can send it, we need to delay sending the next packet
1038 * by the difference in time it takes the receiver to receive and the sender
1039 * to send this packet.
1040 * Note that this delay is always correct for UC and RC but not always
1041 * optimal for UD. For UD, the destination HCA can be different for each
1042 * packet, in which case, we could send packets to a different destination
1043 * while "waiting" for the delay. The overhead for doing this without
1044 * HW support is more than just paying the cost of delaying some packets
1045 * unnecessarily.
1046 */
1047static inline unsigned ipath_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult)
1048{
1049 return (rcv_mult > snd_mult) ?
1050 (plen * (rcv_mult - snd_mult) + 1) >> 1 : 0;
1051}
1052
1053static int ipath_verbs_send_dma(struct ipath_qp *qp,
1054 struct ipath_ib_header *hdr, u32 hdrwords,
1055 struct ipath_sge_state *ss, u32 len,
1056 u32 plen, u32 dwords)
1057{
1058 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
1059 struct ipath_devdata *dd = dev->dd;
1060 struct ipath_verbs_txreq *tx;
1061 u32 *piobuf;
1062 u32 control;
1063 u32 ndesc;
1064 int ret;
1065
1066 tx = qp->s_tx;
1067 if (tx) {
1068 qp->s_tx = NULL;
1069 /* resend previously constructed packet */
1070 ret = ipath_sdma_verbs_send(dd, tx->ss, tx->len, tx);
1071 if (ret)
1072 qp->s_tx = tx;
1073 goto bail;
1074 }
1075
1076 tx = get_txreq(dev);
1077 if (!tx) {
1078 ret = -EBUSY;
1079 goto bail;
1080 }
1081
1082 /*
1083 * Get the saved delay count we computed for the previous packet
1084 * and save the delay count for this packet to be used next time
1085 * we get here.
1086 */
1087 control = qp->s_pkt_delay;
1088 qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult);
1089
1090 tx->qp = qp;
1091 atomic_inc(&qp->refcount);
1092 tx->wqe = qp->s_wqe;
1093 tx->txreq.callback = sdma_complete;
1094 tx->txreq.callback_cookie = tx;
1095 tx->txreq.flags = IPATH_SDMA_TXREQ_F_HEADTOHOST |
1096 IPATH_SDMA_TXREQ_F_INTREQ | IPATH_SDMA_TXREQ_F_FREEDESC;
1097 if (plen + 1 >= IPATH_SMALLBUF_DWORDS)
1098 tx->txreq.flags |= IPATH_SDMA_TXREQ_F_USELARGEBUF;
1099
1100 /* VL15 packets bypass credit check */
1101 if ((be16_to_cpu(hdr->lrh[0]) >> 12) == 15) {
1102 control |= 1ULL << 31;
1103 tx->txreq.flags |= IPATH_SDMA_TXREQ_F_VL15;
1104 }
1105
1106 if (len) {
1107 /*
1108 * Don't try to DMA if it takes more descriptors than
1109 * the queue holds.
1110 */
1111 ndesc = ipath_count_sge(ss, len);
1112 if (ndesc >= dd->ipath_sdma_descq_cnt)
1113 ndesc = 0;
1114 } else
1115 ndesc = 1;
1116 if (ndesc) {
1117 tx->hdr.pbc[0] = cpu_to_le32(plen);
1118 tx->hdr.pbc[1] = cpu_to_le32(control);
1119 memcpy(&tx->hdr.hdr, hdr, hdrwords << 2);
1120 tx->txreq.sg_count = ndesc;
1121 tx->map_len = (hdrwords + 2) << 2;
1122 tx->txreq.map_addr = &tx->hdr;
1123 ret = ipath_sdma_verbs_send(dd, ss, dwords, tx);
1124 if (ret) {
1125 /* save ss and length in dwords */
1126 tx->ss = ss;
1127 tx->len = dwords;
1128 qp->s_tx = tx;
1129 }
1130 goto bail;
1131 }
1132
1133 /* Allocate a buffer and copy the header and payload to it. */
1134 tx->map_len = (plen + 1) << 2;
1135 piobuf = kmalloc(tx->map_len, GFP_ATOMIC);
1136 if (unlikely(piobuf == NULL)) {
1137 ret = -EBUSY;
1138 goto err_tx;
1139 }
1140 tx->txreq.map_addr = piobuf;
1141 tx->txreq.flags |= IPATH_SDMA_TXREQ_F_FREEBUF;
1142 tx->txreq.sg_count = 1;
1143
1144 *piobuf++ = (__force u32) cpu_to_le32(plen);
1145 *piobuf++ = (__force u32) cpu_to_le32(control);
1146 memcpy(piobuf, hdr, hdrwords << 2);
1147 ipath_copy_from_sge(piobuf + hdrwords, ss, len);
1148
1149 ret = ipath_sdma_verbs_send(dd, NULL, 0, tx);
1150 /*
1151 * If we couldn't queue the DMA request, save the info
1152 * and try again later rather than destroying the
1153 * buffer and undoing the side effects of the copy.
1154 */
1155 if (ret) {
1156 tx->ss = NULL;
1157 tx->len = 0;
1158 qp->s_tx = tx;
1159 }
1160 dev->n_unaligned++;
1161 goto bail;
1162
1163err_tx:
1164 if (atomic_dec_and_test(&qp->refcount))
1165 wake_up(&qp->wait);
1166 put_txreq(dev, tx);
1167bail:
1168 return ret;
1169}
1170
1171static int ipath_verbs_send_pio(struct ipath_qp *qp,
1172 struct ipath_ib_header *ibhdr, u32 hdrwords,
870 struct ipath_sge_state *ss, u32 len, 1173 struct ipath_sge_state *ss, u32 len,
871 u32 plen, u32 dwords) 1174 u32 plen, u32 dwords)
872{ 1175{
873 struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd; 1176 struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd;
1177 u32 *hdr = (u32 *) ibhdr;
874 u32 __iomem *piobuf; 1178 u32 __iomem *piobuf;
875 unsigned flush_wc; 1179 unsigned flush_wc;
1180 u32 control;
876 int ret; 1181 int ret;
877 1182
878 piobuf = ipath_getpiobuf(dd, NULL); 1183 piobuf = ipath_getpiobuf(dd, plen, NULL);
879 if (unlikely(piobuf == NULL)) { 1184 if (unlikely(piobuf == NULL)) {
880 ret = -EBUSY; 1185 ret = -EBUSY;
881 goto bail; 1186 goto bail;
882 } 1187 }
883 1188
884 /* 1189 /*
885 * Write len to control qword, no flags. 1190 * Get the saved delay count we computed for the previous packet
1191 * and save the delay count for this packet to be used next time
1192 * we get here.
1193 */
1194 control = qp->s_pkt_delay;
1195 qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult);
1196
1197 /* VL15 packets bypass credit check */
1198 if ((be16_to_cpu(ibhdr->lrh[0]) >> 12) == 15)
1199 control |= 1ULL << 31;
1200
1201 /*
1202 * Write the length to the control qword plus any needed flags.
886 * We have to flush after the PBC for correctness on some cpus 1203 * We have to flush after the PBC for correctness on some cpus
887 * or WC buffer can be written out of order. 1204 * or WC buffer can be written out of order.
888 */ 1205 */
889 writeq(plen, piobuf); 1206 writeq(((u64) control << 32) | plen, piobuf);
890 piobuf += 2; 1207 piobuf += 2;
891 1208
892 flush_wc = dd->ipath_flags & IPATH_PIO_FLUSH_WC; 1209 flush_wc = dd->ipath_flags & IPATH_PIO_FLUSH_WC;
@@ -961,15 +1278,25 @@ int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,
961 */ 1278 */
962 plen = hdrwords + dwords + 1; 1279 plen = hdrwords + dwords + 1;
963 1280
964 /* Drop non-VL15 packets if we are not in the active state */ 1281 /*
965 if (!(dd->ipath_flags & IPATH_LINKACTIVE) && 1282 * VL15 packets (IB_QPT_SMI) will always use PIO, so we
966 qp->ibqp.qp_type != IB_QPT_SMI) { 1283 * can defer SDMA restart until link goes ACTIVE without
1284 * worrying about just how we got there.
1285 */
1286 if (qp->ibqp.qp_type == IB_QPT_SMI)
1287 ret = ipath_verbs_send_pio(qp, hdr, hdrwords, ss, len,
1288 plen, dwords);
1289 /* All non-VL15 packets are dropped if link is not ACTIVE */
1290 else if (!(dd->ipath_flags & IPATH_LINKACTIVE)) {
967 if (qp->s_wqe) 1291 if (qp->s_wqe)
968 ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS); 1292 ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
969 ret = 0; 1293 ret = 0;
970 } else 1294 } else if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
971 ret = ipath_verbs_send_pio(qp, (u32 *) hdr, hdrwords, 1295 ret = ipath_verbs_send_dma(qp, hdr, hdrwords, ss, len,
972 ss, len, plen, dwords); 1296 plen, dwords);
1297 else
1298 ret = ipath_verbs_send_pio(qp, hdr, hdrwords, ss, len,
1299 plen, dwords);
973 1300
974 return ret; 1301 return ret;
975} 1302}
@@ -1038,6 +1365,12 @@ int ipath_get_counters(struct ipath_devdata *dd,
1038 ipath_snap_cntr(dd, crp->cr_errlpcrccnt) + 1365 ipath_snap_cntr(dd, crp->cr_errlpcrccnt) +
1039 ipath_snap_cntr(dd, crp->cr_badformatcnt) + 1366 ipath_snap_cntr(dd, crp->cr_badformatcnt) +
1040 dd->ipath_rxfc_unsupvl_errs; 1367 dd->ipath_rxfc_unsupvl_errs;
1368 if (crp->cr_rxotherlocalphyerrcnt)
1369 cntrs->port_rcv_errors +=
1370 ipath_snap_cntr(dd, crp->cr_rxotherlocalphyerrcnt);
1371 if (crp->cr_rxvlerrcnt)
1372 cntrs->port_rcv_errors +=
1373 ipath_snap_cntr(dd, crp->cr_rxvlerrcnt);
1041 cntrs->port_rcv_remphys_errors = 1374 cntrs->port_rcv_remphys_errors =
1042 ipath_snap_cntr(dd, crp->cr_rcvebpcnt); 1375 ipath_snap_cntr(dd, crp->cr_rcvebpcnt);
1043 cntrs->port_xmit_discards = ipath_snap_cntr(dd, crp->cr_unsupvlcnt); 1376 cntrs->port_xmit_discards = ipath_snap_cntr(dd, crp->cr_unsupvlcnt);
@@ -1046,9 +1379,16 @@ int ipath_get_counters(struct ipath_devdata *dd,
1046 cntrs->port_xmit_packets = ipath_snap_cntr(dd, crp->cr_pktsendcnt); 1379 cntrs->port_xmit_packets = ipath_snap_cntr(dd, crp->cr_pktsendcnt);
1047 cntrs->port_rcv_packets = ipath_snap_cntr(dd, crp->cr_pktrcvcnt); 1380 cntrs->port_rcv_packets = ipath_snap_cntr(dd, crp->cr_pktrcvcnt);
1048 cntrs->local_link_integrity_errors = 1381 cntrs->local_link_integrity_errors =
1049 (dd->ipath_flags & IPATH_GPIO_ERRINTRS) ? 1382 crp->cr_locallinkintegrityerrcnt ?
1050 dd->ipath_lli_errs : dd->ipath_lli_errors; 1383 ipath_snap_cntr(dd, crp->cr_locallinkintegrityerrcnt) :
1051 cntrs->excessive_buffer_overrun_errors = dd->ipath_overrun_thresh_errs; 1384 ((dd->ipath_flags & IPATH_GPIO_ERRINTRS) ?
1385 dd->ipath_lli_errs : dd->ipath_lli_errors);
1386 cntrs->excessive_buffer_overrun_errors =
1387 crp->cr_excessbufferovflcnt ?
1388 ipath_snap_cntr(dd, crp->cr_excessbufferovflcnt) :
1389 dd->ipath_overrun_thresh_errs;
1390 cntrs->vl15_dropped = crp->cr_vl15droppedpktcnt ?
1391 ipath_snap_cntr(dd, crp->cr_vl15droppedpktcnt) : 0;
1052 1392
1053 ret = 0; 1393 ret = 0;
1054 1394
@@ -1183,7 +1523,9 @@ static int ipath_query_port(struct ib_device *ibdev,
1183 props->sm_lid = dev->sm_lid; 1523 props->sm_lid = dev->sm_lid;
1184 props->sm_sl = dev->sm_sl; 1524 props->sm_sl = dev->sm_sl;
1185 ibcstat = dd->ipath_lastibcstat; 1525 ibcstat = dd->ipath_lastibcstat;
1186 props->state = ((ibcstat >> 4) & 0x3) + 1; 1526 /* map LinkState to IB portinfo values. */
1527 props->state = ipath_ib_linkstate(dd, ibcstat) + 1;
1528
1187 /* See phys_state_show() */ 1529 /* See phys_state_show() */
1188 props->phys_state = /* MEA: assumes shift == 0 */ 1530 props->phys_state = /* MEA: assumes shift == 0 */
1189 ipath_cvt_physportstate[dd->ipath_lastibcstat & 1531 ipath_cvt_physportstate[dd->ipath_lastibcstat &
@@ -1195,18 +1537,13 @@ static int ipath_query_port(struct ib_device *ibdev,
1195 props->bad_pkey_cntr = ipath_get_cr_errpkey(dd) - 1537 props->bad_pkey_cntr = ipath_get_cr_errpkey(dd) -
1196 dev->z_pkey_violations; 1538 dev->z_pkey_violations;
1197 props->qkey_viol_cntr = dev->qkey_violations; 1539 props->qkey_viol_cntr = dev->qkey_violations;
1198 props->active_width = IB_WIDTH_4X; 1540 props->active_width = dd->ipath_link_width_active;
1199 /* See rate_show() */ 1541 /* See rate_show() */
1200 props->active_speed = 1; /* Regular 10Mbs speed. */ 1542 props->active_speed = dd->ipath_link_speed_active;
1201 props->max_vl_num = 1; /* VLCap = VL0 */ 1543 props->max_vl_num = 1; /* VLCap = VL0 */
1202 props->init_type_reply = 0; 1544 props->init_type_reply = 0;
1203 1545
1204 /* 1546 props->max_mtu = ipath_mtu4096 ? IB_MTU_4096 : IB_MTU_2048;
1205 * Note: the chip supports a maximum MTU of 4096, but the driver
1206 * hasn't implemented this feature yet, so set the maximum value
1207 * to 2048.
1208 */
1209 props->max_mtu = IB_MTU_2048;
1210 switch (dd->ipath_ibmtu) { 1547 switch (dd->ipath_ibmtu) {
1211 case 4096: 1548 case 4096:
1212 mtu = IB_MTU_4096; 1549 mtu = IB_MTU_4096;
@@ -1399,6 +1736,7 @@ static struct ib_ah *ipath_create_ah(struct ib_pd *pd,
1399 1736
1400 /* ib_create_ah() will initialize ah->ibah. */ 1737 /* ib_create_ah() will initialize ah->ibah. */
1401 ah->attr = *ah_attr; 1738 ah->attr = *ah_attr;
1739 ah->attr.static_rate = ipath_ib_rate_to_mult(ah_attr->static_rate);
1402 1740
1403 ret = &ah->ibah; 1741 ret = &ah->ibah;
1404 1742
@@ -1432,6 +1770,7 @@ static int ipath_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
1432 struct ipath_ah *ah = to_iah(ibah); 1770 struct ipath_ah *ah = to_iah(ibah);
1433 1771
1434 *ah_attr = ah->attr; 1772 *ah_attr = ah->attr;
1773 ah_attr->static_rate = ipath_mult_to_ib_rate(ah->attr.static_rate);
1435 1774
1436 return 0; 1775 return 0;
1437} 1776}
@@ -1581,6 +1920,8 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
1581 struct ipath_verbs_counters cntrs; 1920 struct ipath_verbs_counters cntrs;
1582 struct ipath_ibdev *idev; 1921 struct ipath_ibdev *idev;
1583 struct ib_device *dev; 1922 struct ib_device *dev;
1923 struct ipath_verbs_txreq *tx;
1924 unsigned i;
1584 int ret; 1925 int ret;
1585 1926
1586 idev = (struct ipath_ibdev *)ib_alloc_device(sizeof *idev); 1927 idev = (struct ipath_ibdev *)ib_alloc_device(sizeof *idev);
@@ -1591,6 +1932,17 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
1591 1932
1592 dev = &idev->ibdev; 1933 dev = &idev->ibdev;
1593 1934
1935 if (dd->ipath_sdma_descq_cnt) {
1936 tx = kmalloc(dd->ipath_sdma_descq_cnt * sizeof *tx,
1937 GFP_KERNEL);
1938 if (tx == NULL) {
1939 ret = -ENOMEM;
1940 goto err_tx;
1941 }
1942 } else
1943 tx = NULL;
1944 idev->txreq_bufs = tx;
1945
1594 /* Only need to initialize non-zero fields. */ 1946 /* Only need to initialize non-zero fields. */
1595 spin_lock_init(&idev->n_pds_lock); 1947 spin_lock_init(&idev->n_pds_lock);
1596 spin_lock_init(&idev->n_ahs_lock); 1948 spin_lock_init(&idev->n_ahs_lock);
@@ -1631,15 +1983,17 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
1631 INIT_LIST_HEAD(&idev->pending[2]); 1983 INIT_LIST_HEAD(&idev->pending[2]);
1632 INIT_LIST_HEAD(&idev->piowait); 1984 INIT_LIST_HEAD(&idev->piowait);
1633 INIT_LIST_HEAD(&idev->rnrwait); 1985 INIT_LIST_HEAD(&idev->rnrwait);
1986 INIT_LIST_HEAD(&idev->txreq_free);
1634 idev->pending_index = 0; 1987 idev->pending_index = 0;
1635 idev->port_cap_flags = 1988 idev->port_cap_flags =
1636 IB_PORT_SYS_IMAGE_GUID_SUP | IB_PORT_CLIENT_REG_SUP; 1989 IB_PORT_SYS_IMAGE_GUID_SUP | IB_PORT_CLIENT_REG_SUP;
1990 if (dd->ipath_flags & IPATH_HAS_LINK_LATENCY)
1991 idev->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
1637 idev->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA; 1992 idev->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
1638 idev->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA; 1993 idev->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
1639 idev->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS; 1994 idev->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
1640 idev->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS; 1995 idev->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
1641 idev->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT; 1996 idev->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
1642 idev->link_width_enabled = 3; /* 1x or 4x */
1643 1997
1644 /* Snapshot current HW counters to "clear" them. */ 1998 /* Snapshot current HW counters to "clear" them. */
1645 ipath_get_counters(dd, &cntrs); 1999 ipath_get_counters(dd, &cntrs);
@@ -1661,6 +2015,9 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
1661 cntrs.excessive_buffer_overrun_errors; 2015 cntrs.excessive_buffer_overrun_errors;
1662 idev->z_vl15_dropped = cntrs.vl15_dropped; 2016 idev->z_vl15_dropped = cntrs.vl15_dropped;
1663 2017
2018 for (i = 0; i < dd->ipath_sdma_descq_cnt; i++, tx++)
2019 list_add(&tx->txreq.list, &idev->txreq_free);
2020
1664 /* 2021 /*
1665 * The system image GUID is supposed to be the same for all 2022 * The system image GUID is supposed to be the same for all
1666 * IB HCAs in a single system but since there can be other 2023 * IB HCAs in a single system but since there can be other
@@ -1710,6 +2067,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
1710 dev->phys_port_cnt = 1; 2067 dev->phys_port_cnt = 1;
1711 dev->num_comp_vectors = 1; 2068 dev->num_comp_vectors = 1;
1712 dev->dma_device = &dd->pcidev->dev; 2069 dev->dma_device = &dd->pcidev->dev;
2070 dev->class_dev.dev = dev->dma_device;
1713 dev->query_device = ipath_query_device; 2071 dev->query_device = ipath_query_device;
1714 dev->modify_device = ipath_modify_device; 2072 dev->modify_device = ipath_modify_device;
1715 dev->query_port = ipath_query_port; 2073 dev->query_port = ipath_query_port;
@@ -1774,6 +2132,8 @@ err_reg:
1774err_lk: 2132err_lk:
1775 kfree(idev->qp_table.table); 2133 kfree(idev->qp_table.table);
1776err_qp: 2134err_qp:
2135 kfree(idev->txreq_bufs);
2136err_tx:
1777 ib_dealloc_device(dev); 2137 ib_dealloc_device(dev);
1778 ipath_dev_err(dd, "cannot register verbs: %d!\n", -ret); 2138 ipath_dev_err(dd, "cannot register verbs: %d!\n", -ret);
1779 idev = NULL; 2139 idev = NULL;
@@ -1808,6 +2168,7 @@ void ipath_unregister_ib_device(struct ipath_ibdev *dev)
1808 ipath_free_all_qps(&dev->qp_table); 2168 ipath_free_all_qps(&dev->qp_table);
1809 kfree(dev->qp_table.table); 2169 kfree(dev->qp_table.table);
1810 kfree(dev->lk_table.table); 2170 kfree(dev->lk_table.table);
2171 kfree(dev->txreq_bufs);
1811 ib_dealloc_device(ibdev); 2172 ib_dealloc_device(ibdev);
1812} 2173}
1813 2174
@@ -1855,13 +2216,15 @@ static ssize_t show_stats(struct class_device *cdev, char *buf)
1855 "RC stalls %d\n" 2216 "RC stalls %d\n"
1856 "piobuf wait %d\n" 2217 "piobuf wait %d\n"
1857 "no piobuf %d\n" 2218 "no piobuf %d\n"
2219 "unaligned %d\n"
1858 "PKT drops %d\n" 2220 "PKT drops %d\n"
1859 "WQE errs %d\n", 2221 "WQE errs %d\n",
1860 dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks, 2222 dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks,
1861 dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks, 2223 dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks,
1862 dev->n_other_naks, dev->n_timeouts, 2224 dev->n_other_naks, dev->n_timeouts,
1863 dev->n_rdma_dup_busy, dev->n_rc_stalls, dev->n_piowait, 2225 dev->n_rdma_dup_busy, dev->n_rc_stalls, dev->n_piowait,
1864 dev->n_no_piobuf, dev->n_pkt_drops, dev->n_wqe_errs); 2226 dev->n_no_piobuf, dev->n_unaligned,
2227 dev->n_pkt_drops, dev->n_wqe_errs);
1865 for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) { 2228 for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) {
1866 const struct ipath_opcode_stats *si = &dev->opstats[i]; 2229 const struct ipath_opcode_stats *si = &dev->opstats[i];
1867 2230
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index 3d59736b49b2..6514aa8306cd 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -138,6 +138,11 @@ struct ipath_ib_header {
138 } u; 138 } u;
139} __attribute__ ((packed)); 139} __attribute__ ((packed));
140 140
141struct ipath_pio_header {
142 __le32 pbc[2];
143 struct ipath_ib_header hdr;
144} __attribute__ ((packed));
145
141/* 146/*
142 * There is one struct ipath_mcast for each multicast GID. 147 * There is one struct ipath_mcast for each multicast GID.
143 * All attached QPs are then stored as a list of 148 * All attached QPs are then stored as a list of
@@ -319,6 +324,7 @@ struct ipath_sge_state {
319 struct ipath_sge *sg_list; /* next SGE to be used if any */ 324 struct ipath_sge *sg_list; /* next SGE to be used if any */
320 struct ipath_sge sge; /* progress state for the current SGE */ 325 struct ipath_sge sge; /* progress state for the current SGE */
321 u8 num_sge; 326 u8 num_sge;
327 u8 static_rate;
322}; 328};
323 329
324/* 330/*
@@ -356,6 +362,7 @@ struct ipath_qp {
356 struct tasklet_struct s_task; 362 struct tasklet_struct s_task;
357 struct ipath_mmap_info *ip; 363 struct ipath_mmap_info *ip;
358 struct ipath_sge_state *s_cur_sge; 364 struct ipath_sge_state *s_cur_sge;
365 struct ipath_verbs_txreq *s_tx;
359 struct ipath_sge_state s_sge; /* current send request data */ 366 struct ipath_sge_state s_sge; /* current send request data */
360 struct ipath_ack_entry s_ack_queue[IPATH_MAX_RDMA_ATOMIC + 1]; 367 struct ipath_ack_entry s_ack_queue[IPATH_MAX_RDMA_ATOMIC + 1];
361 struct ipath_sge_state s_ack_rdma_sge; 368 struct ipath_sge_state s_ack_rdma_sge;
@@ -363,7 +370,8 @@ struct ipath_qp {
363 struct ipath_sge_state r_sge; /* current receive data */ 370 struct ipath_sge_state r_sge; /* current receive data */
364 spinlock_t s_lock; 371 spinlock_t s_lock;
365 unsigned long s_busy; 372 unsigned long s_busy;
366 u32 s_hdrwords; /* size of s_hdr in 32 bit words */ 373 u16 s_pkt_delay;
374 u16 s_hdrwords; /* size of s_hdr in 32 bit words */
367 u32 s_cur_size; /* size of send packet in bytes */ 375 u32 s_cur_size; /* size of send packet in bytes */
368 u32 s_len; /* total length of s_sge */ 376 u32 s_len; /* total length of s_sge */
369 u32 s_rdma_read_len; /* total length of s_rdma_read_sge */ 377 u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
@@ -387,7 +395,6 @@ struct ipath_qp {
387 u8 r_nak_state; /* non-zero if NAK is pending */ 395 u8 r_nak_state; /* non-zero if NAK is pending */
388 u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */ 396 u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
389 u8 r_reuse_sge; /* for UC receive errors */ 397 u8 r_reuse_sge; /* for UC receive errors */
390 u8 r_sge_inx; /* current index into sg_list */
391 u8 r_wrid_valid; /* r_wrid set but CQ entry not yet made */ 398 u8 r_wrid_valid; /* r_wrid set but CQ entry not yet made */
392 u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */ 399 u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
393 u8 r_head_ack_queue; /* index into s_ack_queue[] */ 400 u8 r_head_ack_queue; /* index into s_ack_queue[] */
@@ -403,6 +410,7 @@ struct ipath_qp {
403 u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */ 410 u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
404 u8 s_tail_ack_queue; /* index into s_ack_queue[] */ 411 u8 s_tail_ack_queue; /* index into s_ack_queue[] */
405 u8 s_flags; 412 u8 s_flags;
413 u8 s_dmult;
406 u8 timeout; /* Timeout for this QP */ 414 u8 timeout; /* Timeout for this QP */
407 enum ib_mtu path_mtu; 415 enum ib_mtu path_mtu;
408 u32 remote_qpn; 416 u32 remote_qpn;
@@ -510,6 +518,8 @@ struct ipath_ibdev {
510 struct ipath_lkey_table lk_table; 518 struct ipath_lkey_table lk_table;
511 struct list_head pending[3]; /* FIFO of QPs waiting for ACKs */ 519 struct list_head pending[3]; /* FIFO of QPs waiting for ACKs */
512 struct list_head piowait; /* list for wait PIO buf */ 520 struct list_head piowait; /* list for wait PIO buf */
521 struct list_head txreq_free;
522 void *txreq_bufs;
513 /* list of QPs waiting for RNR timer */ 523 /* list of QPs waiting for RNR timer */
514 struct list_head rnrwait; 524 struct list_head rnrwait;
515 spinlock_t pending_lock; 525 spinlock_t pending_lock;
@@ -570,6 +580,7 @@ struct ipath_ibdev {
570 u32 n_rdma_dup_busy; 580 u32 n_rdma_dup_busy;
571 u32 n_piowait; 581 u32 n_piowait;
572 u32 n_no_piobuf; 582 u32 n_no_piobuf;
583 u32 n_unaligned;
573 u32 port_cap_flags; 584 u32 port_cap_flags;
574 u32 pma_sample_start; 585 u32 pma_sample_start;
575 u32 pma_sample_interval; 586 u32 pma_sample_interval;
@@ -581,7 +592,6 @@ struct ipath_ibdev {
581 u16 pending_index; /* which pending queue is active */ 592 u16 pending_index; /* which pending queue is active */
582 u8 pma_sample_status; 593 u8 pma_sample_status;
583 u8 subnet_timeout; 594 u8 subnet_timeout;
584 u8 link_width_enabled;
585 u8 vl_high_limit; 595 u8 vl_high_limit;
586 struct ipath_opcode_stats opstats[128]; 596 struct ipath_opcode_stats opstats[128];
587}; 597};
@@ -602,6 +612,16 @@ struct ipath_verbs_counters {
602 u32 vl15_dropped; 612 u32 vl15_dropped;
603}; 613};
604 614
615struct ipath_verbs_txreq {
616 struct ipath_qp *qp;
617 struct ipath_swqe *wqe;
618 u32 map_len;
619 u32 len;
620 struct ipath_sge_state *ss;
621 struct ipath_pio_header hdr;
622 struct ipath_sdma_txreq txreq;
623};
624
605static inline struct ipath_mr *to_imr(struct ib_mr *ibmr) 625static inline struct ipath_mr *to_imr(struct ib_mr *ibmr)
606{ 626{
607 return container_of(ibmr, struct ipath_mr, ibmr); 627 return container_of(ibmr, struct ipath_mr, ibmr);
@@ -694,11 +714,11 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc);
694 714
695void ipath_get_credit(struct ipath_qp *qp, u32 aeth); 715void ipath_get_credit(struct ipath_qp *qp, u32 aeth);
696 716
717unsigned ipath_ib_rate_to_mult(enum ib_rate rate);
718
697int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr, 719int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,
698 u32 hdrwords, struct ipath_sge_state *ss, u32 len); 720 u32 hdrwords, struct ipath_sge_state *ss, u32 len);
699 721
700void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig);
701
702void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length); 722void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length);
703 723
704void ipath_skip_sge(struct ipath_sge_state *ss, u32 length); 724void ipath_skip_sge(struct ipath_sge_state *ss, u32 length);
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 7360bbafbe84..3557e7edc9b6 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -85,6 +85,82 @@ static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq)
85 return get_sw_cqe(cq, cq->mcq.cons_index); 85 return get_sw_cqe(cq, cq->mcq.cons_index);
86} 86}
87 87
88int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
89{
90 struct mlx4_ib_cq *mcq = to_mcq(cq);
91 struct mlx4_ib_dev *dev = to_mdev(cq->device);
92
93 return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period);
94}
95
96static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent)
97{
98 int err;
99
100 err = mlx4_buf_alloc(dev->dev, nent * sizeof(struct mlx4_cqe),
101 PAGE_SIZE * 2, &buf->buf);
102
103 if (err)
104 goto out;
105
106 err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift,
107 &buf->mtt);
108 if (err)
109 goto err_buf;
110
111 err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
112 if (err)
113 goto err_mtt;
114
115 return 0;
116
117err_mtt:
118 mlx4_mtt_cleanup(dev->dev, &buf->mtt);
119
120err_buf:
121 mlx4_buf_free(dev->dev, nent * sizeof(struct mlx4_cqe),
122 &buf->buf);
123
124out:
125 return err;
126}
127
128static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe)
129{
130 mlx4_buf_free(dev->dev, (cqe + 1) * sizeof(struct mlx4_cqe), &buf->buf);
131}
132
133static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context,
134 struct mlx4_ib_cq_buf *buf, struct ib_umem **umem,
135 u64 buf_addr, int cqe)
136{
137 int err;
138
139 *umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe),
140 IB_ACCESS_LOCAL_WRITE);
141 if (IS_ERR(*umem))
142 return PTR_ERR(*umem);
143
144 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem),
145 ilog2((*umem)->page_size), &buf->mtt);
146 if (err)
147 goto err_buf;
148
149 err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem);
150 if (err)
151 goto err_mtt;
152
153 return 0;
154
155err_mtt:
156 mlx4_mtt_cleanup(dev->dev, &buf->mtt);
157
158err_buf:
159 ib_umem_release(*umem);
160
161 return err;
162}
163
88struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector, 164struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector,
89 struct ib_ucontext *context, 165 struct ib_ucontext *context,
90 struct ib_udata *udata) 166 struct ib_udata *udata)
@@ -92,7 +168,6 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
92 struct mlx4_ib_dev *dev = to_mdev(ibdev); 168 struct mlx4_ib_dev *dev = to_mdev(ibdev);
93 struct mlx4_ib_cq *cq; 169 struct mlx4_ib_cq *cq;
94 struct mlx4_uar *uar; 170 struct mlx4_uar *uar;
95 int buf_size;
96 int err; 171 int err;
97 172
98 if (entries < 1 || entries > dev->dev->caps.max_cqes) 173 if (entries < 1 || entries > dev->dev->caps.max_cqes)
@@ -104,8 +179,10 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
104 179
105 entries = roundup_pow_of_two(entries + 1); 180 entries = roundup_pow_of_two(entries + 1);
106 cq->ibcq.cqe = entries - 1; 181 cq->ibcq.cqe = entries - 1;
107 buf_size = entries * sizeof (struct mlx4_cqe); 182 mutex_init(&cq->resize_mutex);
108 spin_lock_init(&cq->lock); 183 spin_lock_init(&cq->lock);
184 cq->resize_buf = NULL;
185 cq->resize_umem = NULL;
109 186
110 if (context) { 187 if (context) {
111 struct mlx4_ib_create_cq ucmd; 188 struct mlx4_ib_create_cq ucmd;
@@ -115,21 +192,10 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
115 goto err_cq; 192 goto err_cq;
116 } 193 }
117 194
118 cq->umem = ib_umem_get(context, ucmd.buf_addr, buf_size, 195 err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem,
119 IB_ACCESS_LOCAL_WRITE); 196 ucmd.buf_addr, entries);
120 if (IS_ERR(cq->umem)) {
121 err = PTR_ERR(cq->umem);
122 goto err_cq;
123 }
124
125 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(cq->umem),
126 ilog2(cq->umem->page_size), &cq->buf.mtt);
127 if (err)
128 goto err_buf;
129
130 err = mlx4_ib_umem_write_mtt(dev, &cq->buf.mtt, cq->umem);
131 if (err) 197 if (err)
132 goto err_mtt; 198 goto err_cq;
133 199
134 err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr, 200 err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
135 &cq->db); 201 &cq->db);
@@ -147,19 +213,9 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
147 *cq->mcq.set_ci_db = 0; 213 *cq->mcq.set_ci_db = 0;
148 *cq->mcq.arm_db = 0; 214 *cq->mcq.arm_db = 0;
149 215
150 if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &cq->buf.buf)) { 216 err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries);
151 err = -ENOMEM;
152 goto err_db;
153 }
154
155 err = mlx4_mtt_init(dev->dev, cq->buf.buf.npages, cq->buf.buf.page_shift,
156 &cq->buf.mtt);
157 if (err) 217 if (err)
158 goto err_buf; 218 goto err_db;
159
160 err = mlx4_buf_write_mtt(dev->dev, &cq->buf.mtt, &cq->buf.buf);
161 if (err)
162 goto err_mtt;
163 219
164 uar = &dev->priv_uar; 220 uar = &dev->priv_uar;
165 } 221 }
@@ -187,12 +243,10 @@ err_dbmap:
187err_mtt: 243err_mtt:
188 mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt); 244 mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);
189 245
190err_buf:
191 if (context) 246 if (context)
192 ib_umem_release(cq->umem); 247 ib_umem_release(cq->umem);
193 else 248 else
194 mlx4_buf_free(dev->dev, entries * sizeof (struct mlx4_cqe), 249 mlx4_ib_free_cq_buf(dev, &cq->buf, entries);
195 &cq->buf.buf);
196 250
197err_db: 251err_db:
198 if (!context) 252 if (!context)
@@ -204,6 +258,170 @@ err_cq:
204 return ERR_PTR(err); 258 return ERR_PTR(err);
205} 259}
206 260
261static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
262 int entries)
263{
264 int err;
265
266 if (cq->resize_buf)
267 return -EBUSY;
268
269 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
270 if (!cq->resize_buf)
271 return -ENOMEM;
272
273 err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
274 if (err) {
275 kfree(cq->resize_buf);
276 cq->resize_buf = NULL;
277 return err;
278 }
279
280 cq->resize_buf->cqe = entries - 1;
281
282 return 0;
283}
284
285static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
286 int entries, struct ib_udata *udata)
287{
288 struct mlx4_ib_resize_cq ucmd;
289 int err;
290
291 if (cq->resize_umem)
292 return -EBUSY;
293
294 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
295 return -EFAULT;
296
297 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
298 if (!cq->resize_buf)
299 return -ENOMEM;
300
301 err = mlx4_ib_get_cq_umem(dev, cq->umem->context, &cq->resize_buf->buf,
302 &cq->resize_umem, ucmd.buf_addr, entries);
303 if (err) {
304 kfree(cq->resize_buf);
305 cq->resize_buf = NULL;
306 return err;
307 }
308
309 cq->resize_buf->cqe = entries - 1;
310
311 return 0;
312}
313
314static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq)
315{
316 u32 i;
317
318 i = cq->mcq.cons_index;
319 while (get_sw_cqe(cq, i & cq->ibcq.cqe))
320 ++i;
321
322 return i - cq->mcq.cons_index;
323}
324
325static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
326{
327 struct mlx4_cqe *cqe;
328 int i;
329
330 i = cq->mcq.cons_index;
331 cqe = get_cqe(cq, i & cq->ibcq.cqe);
332 while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
333 memcpy(get_cqe_from_buf(&cq->resize_buf->buf,
334 (i + 1) & cq->resize_buf->cqe),
335 get_cqe(cq, i & cq->ibcq.cqe), sizeof(struct mlx4_cqe));
336 cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
337 }
338 ++cq->mcq.cons_index;
339}
340
341int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
342{
343 struct mlx4_ib_dev *dev = to_mdev(ibcq->device);
344 struct mlx4_ib_cq *cq = to_mcq(ibcq);
345 int outst_cqe;
346 int err;
347
348 mutex_lock(&cq->resize_mutex);
349
350 if (entries < 1 || entries > dev->dev->caps.max_cqes) {
351 err = -EINVAL;
352 goto out;
353 }
354
355 entries = roundup_pow_of_two(entries + 1);
356 if (entries == ibcq->cqe + 1) {
357 err = 0;
358 goto out;
359 }
360
361 if (ibcq->uobject) {
362 err = mlx4_alloc_resize_umem(dev, cq, entries, udata);
363 if (err)
364 goto out;
365 } else {
366 /* Can't be smaller then the number of outstanding CQEs */
367 outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
368 if (entries < outst_cqe + 1) {
369 err = 0;
370 goto out;
371 }
372
373 err = mlx4_alloc_resize_buf(dev, cq, entries);
374 if (err)
375 goto out;
376 }
377
378 err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt);
379 if (err)
380 goto err_buf;
381
382 if (ibcq->uobject) {
383 cq->buf = cq->resize_buf->buf;
384 cq->ibcq.cqe = cq->resize_buf->cqe;
385 ib_umem_release(cq->umem);
386 cq->umem = cq->resize_umem;
387
388 kfree(cq->resize_buf);
389 cq->resize_buf = NULL;
390 cq->resize_umem = NULL;
391 } else {
392 spin_lock_irq(&cq->lock);
393 if (cq->resize_buf) {
394 mlx4_ib_cq_resize_copy_cqes(cq);
395 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
396 cq->buf = cq->resize_buf->buf;
397 cq->ibcq.cqe = cq->resize_buf->cqe;
398
399 kfree(cq->resize_buf);
400 cq->resize_buf = NULL;
401 }
402 spin_unlock_irq(&cq->lock);
403 }
404
405 goto out;
406
407err_buf:
408 if (!ibcq->uobject)
409 mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf,
410 cq->resize_buf->cqe);
411
412 kfree(cq->resize_buf);
413 cq->resize_buf = NULL;
414
415 if (cq->resize_umem) {
416 ib_umem_release(cq->resize_umem);
417 cq->resize_umem = NULL;
418 }
419
420out:
421 mutex_unlock(&cq->resize_mutex);
422 return err;
423}
424
207int mlx4_ib_destroy_cq(struct ib_cq *cq) 425int mlx4_ib_destroy_cq(struct ib_cq *cq)
208{ 426{
209 struct mlx4_ib_dev *dev = to_mdev(cq->device); 427 struct mlx4_ib_dev *dev = to_mdev(cq->device);
@@ -216,8 +434,7 @@ int mlx4_ib_destroy_cq(struct ib_cq *cq)
216 mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db); 434 mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db);
217 ib_umem_release(mcq->umem); 435 ib_umem_release(mcq->umem);
218 } else { 436 } else {
219 mlx4_buf_free(dev->dev, (cq->cqe + 1) * sizeof (struct mlx4_cqe), 437 mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe + 1);
220 &mcq->buf.buf);
221 mlx4_ib_db_free(dev, &mcq->db); 438 mlx4_ib_db_free(dev, &mcq->db);
222 } 439 }
223 440
@@ -297,6 +514,20 @@ static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
297 wc->vendor_err = cqe->vendor_err_syndrome; 514 wc->vendor_err = cqe->vendor_err_syndrome;
298} 515}
299 516
517static int mlx4_ib_ipoib_csum_ok(__be32 status, __be16 checksum)
518{
519 return ((status & cpu_to_be32(MLX4_CQE_IPOIB_STATUS_IPV4 |
520 MLX4_CQE_IPOIB_STATUS_IPV4F |
521 MLX4_CQE_IPOIB_STATUS_IPV4OPT |
522 MLX4_CQE_IPOIB_STATUS_IPV6 |
523 MLX4_CQE_IPOIB_STATUS_IPOK)) ==
524 cpu_to_be32(MLX4_CQE_IPOIB_STATUS_IPV4 |
525 MLX4_CQE_IPOIB_STATUS_IPOK)) &&
526 (status & cpu_to_be32(MLX4_CQE_IPOIB_STATUS_UDP |
527 MLX4_CQE_IPOIB_STATUS_TCP)) &&
528 checksum == cpu_to_be16(0xffff);
529}
530
300static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, 531static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
301 struct mlx4_ib_qp **cur_qp, 532 struct mlx4_ib_qp **cur_qp,
302 struct ib_wc *wc) 533 struct ib_wc *wc)
@@ -310,6 +541,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
310 u32 g_mlpath_rqpn; 541 u32 g_mlpath_rqpn;
311 u16 wqe_ctr; 542 u16 wqe_ctr;
312 543
544repoll:
313 cqe = next_cqe_sw(cq); 545 cqe = next_cqe_sw(cq);
314 if (!cqe) 546 if (!cqe)
315 return -EAGAIN; 547 return -EAGAIN;
@@ -332,6 +564,22 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
332 return -EINVAL; 564 return -EINVAL;
333 } 565 }
334 566
567 /* Resize CQ in progress */
568 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
569 if (cq->resize_buf) {
570 struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device);
571
572 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
573 cq->buf = cq->resize_buf->buf;
574 cq->ibcq.cqe = cq->resize_buf->cqe;
575
576 kfree(cq->resize_buf);
577 cq->resize_buf = NULL;
578 }
579
580 goto repoll;
581 }
582
335 if (!*cur_qp || 583 if (!*cur_qp ||
336 (be32_to_cpu(cqe->my_qpn) & 0xffffff) != (*cur_qp)->mqp.qpn) { 584 (be32_to_cpu(cqe->my_qpn) & 0xffffff) != (*cur_qp)->mqp.qpn) {
337 /* 585 /*
@@ -406,6 +654,9 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
406 case MLX4_OPCODE_BIND_MW: 654 case MLX4_OPCODE_BIND_MW:
407 wc->opcode = IB_WC_BIND_MW; 655 wc->opcode = IB_WC_BIND_MW;
408 break; 656 break;
657 case MLX4_OPCODE_LSO:
658 wc->opcode = IB_WC_LSO;
659 break;
409 } 660 }
410 } else { 661 } else {
411 wc->byte_len = be32_to_cpu(cqe->byte_cnt); 662 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
@@ -434,6 +685,8 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
434 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f; 685 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
435 wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0; 686 wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
436 wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f; 687 wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
688 wc->csum_ok = mlx4_ib_ipoib_csum_ok(cqe->ipoib_status,
689 cqe->checksum);
437 } 690 }
438 691
439 return 0; 692 return 0;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 0ed02b7834da..4c1e72fc8f57 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -165,7 +165,7 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad)
165 event.device = ibdev; 165 event.device = ibdev;
166 event.element.port_num = port_num; 166 event.element.port_num = port_num;
167 167
168 if(pinfo->clientrereg_resv_subnetto & 0x80) 168 if (pinfo->clientrereg_resv_subnetto & 0x80)
169 event.event = IB_EVENT_CLIENT_REREGISTER; 169 event.event = IB_EVENT_CLIENT_REREGISTER;
170 else 170 else
171 event.event = IB_EVENT_LID_CHANGE; 171 event.event = IB_EVENT_LID_CHANGE;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 96a39b5c9254..136c76c7b4e7 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -44,8 +44,8 @@
44#include "user.h" 44#include "user.h"
45 45
46#define DRV_NAME "mlx4_ib" 46#define DRV_NAME "mlx4_ib"
47#define DRV_VERSION "0.01" 47#define DRV_VERSION "1.0"
48#define DRV_RELDATE "May 1, 2006" 48#define DRV_RELDATE "April 4, 2008"
49 49
50MODULE_AUTHOR("Roland Dreier"); 50MODULE_AUTHOR("Roland Dreier");
51MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver"); 51MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
@@ -99,6 +99,10 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
99 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; 99 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
100 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT) 100 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
101 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE; 101 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
102 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
103 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
104 if (dev->dev->caps.max_gso_sz)
105 props->device_cap_flags |= IB_DEVICE_UD_TSO;
102 106
103 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & 107 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
104 0xffffff; 108 0xffffff;
@@ -567,6 +571,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
567 (1ull << IB_USER_VERBS_CMD_DEREG_MR) | 571 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
568 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | 572 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
569 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | 573 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
574 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
570 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | 575 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
571 (1ull << IB_USER_VERBS_CMD_CREATE_QP) | 576 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
572 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 577 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
@@ -605,6 +610,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
605 ibdev->ib_dev.post_send = mlx4_ib_post_send; 610 ibdev->ib_dev.post_send = mlx4_ib_post_send;
606 ibdev->ib_dev.post_recv = mlx4_ib_post_recv; 611 ibdev->ib_dev.post_recv = mlx4_ib_post_recv;
607 ibdev->ib_dev.create_cq = mlx4_ib_create_cq; 612 ibdev->ib_dev.create_cq = mlx4_ib_create_cq;
613 ibdev->ib_dev.modify_cq = mlx4_ib_modify_cq;
614 ibdev->ib_dev.resize_cq = mlx4_ib_resize_cq;
608 ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq; 615 ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq;
609 ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq; 616 ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq;
610 ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq; 617 ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq;
@@ -675,18 +682,20 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
675} 682}
676 683
677static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, 684static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
678 enum mlx4_dev_event event, int subtype, 685 enum mlx4_dev_event event, int port)
679 int port)
680{ 686{
681 struct ib_event ibev; 687 struct ib_event ibev;
682 688
683 switch (event) { 689 switch (event) {
684 case MLX4_EVENT_TYPE_PORT_CHANGE: 690 case MLX4_DEV_EVENT_PORT_UP:
685 ibev.event = subtype == MLX4_PORT_CHANGE_SUBTYPE_ACTIVE ? 691 ibev.event = IB_EVENT_PORT_ACTIVE;
686 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
687 break; 692 break;
688 693
689 case MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR: 694 case MLX4_DEV_EVENT_PORT_DOWN:
695 ibev.event = IB_EVENT_PORT_ERR;
696 break;
697
698 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
690 ibev.event = IB_EVENT_DEVICE_FATAL; 699 ibev.event = IB_EVENT_DEVICE_FATAL;
691 break; 700 break;
692 701
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 3726e451a327..9e637323c155 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -78,13 +78,21 @@ struct mlx4_ib_cq_buf {
78 struct mlx4_mtt mtt; 78 struct mlx4_mtt mtt;
79}; 79};
80 80
81struct mlx4_ib_cq_resize {
82 struct mlx4_ib_cq_buf buf;
83 int cqe;
84};
85
81struct mlx4_ib_cq { 86struct mlx4_ib_cq {
82 struct ib_cq ibcq; 87 struct ib_cq ibcq;
83 struct mlx4_cq mcq; 88 struct mlx4_cq mcq;
84 struct mlx4_ib_cq_buf buf; 89 struct mlx4_ib_cq_buf buf;
90 struct mlx4_ib_cq_resize *resize_buf;
85 struct mlx4_ib_db db; 91 struct mlx4_ib_db db;
86 spinlock_t lock; 92 spinlock_t lock;
93 struct mutex resize_mutex;
87 struct ib_umem *umem; 94 struct ib_umem *umem;
95 struct ib_umem *resize_umem;
88}; 96};
89 97
90struct mlx4_ib_mr { 98struct mlx4_ib_mr {
@@ -110,6 +118,10 @@ struct mlx4_ib_wq {
110 unsigned tail; 118 unsigned tail;
111}; 119};
112 120
121enum mlx4_ib_qp_flags {
122 MLX4_IB_QP_LSO = 1 << 0
123};
124
113struct mlx4_ib_qp { 125struct mlx4_ib_qp {
114 struct ib_qp ibqp; 126 struct ib_qp ibqp;
115 struct mlx4_qp mqp; 127 struct mlx4_qp mqp;
@@ -129,6 +141,7 @@ struct mlx4_ib_qp {
129 struct mlx4_mtt mtt; 141 struct mlx4_mtt mtt;
130 int buf_size; 142 int buf_size;
131 struct mutex mutex; 143 struct mutex mutex;
144 u32 flags;
132 u8 port; 145 u8 port;
133 u8 alt_port; 146 u8 alt_port;
134 u8 atomic_rd_en; 147 u8 atomic_rd_en;
@@ -249,6 +262,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
249 struct ib_udata *udata); 262 struct ib_udata *udata);
250int mlx4_ib_dereg_mr(struct ib_mr *mr); 263int mlx4_ib_dereg_mr(struct ib_mr *mr);
251 264
265int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
266int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
252struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector, 267struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector,
253 struct ib_ucontext *context, 268 struct ib_ucontext *context,
254 struct ib_udata *udata); 269 struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 958e205b6d7c..b75efae7e449 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -71,6 +71,7 @@ enum {
71 71
72static const __be32 mlx4_ib_opcode[] = { 72static const __be32 mlx4_ib_opcode[] = {
73 [IB_WR_SEND] = __constant_cpu_to_be32(MLX4_OPCODE_SEND), 73 [IB_WR_SEND] = __constant_cpu_to_be32(MLX4_OPCODE_SEND),
74 [IB_WR_LSO] = __constant_cpu_to_be32(MLX4_OPCODE_LSO),
74 [IB_WR_SEND_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_SEND_IMM), 75 [IB_WR_SEND_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_SEND_IMM),
75 [IB_WR_RDMA_WRITE] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE), 76 [IB_WR_RDMA_WRITE] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
76 [IB_WR_RDMA_WRITE_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM), 77 [IB_WR_RDMA_WRITE_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
@@ -122,7 +123,7 @@ static void *get_send_wqe(struct mlx4_ib_qp *qp, int n)
122 */ 123 */
123static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size) 124static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size)
124{ 125{
125 u32 *wqe; 126 __be32 *wqe;
126 int i; 127 int i;
127 int s; 128 int s;
128 int ind; 129 int ind;
@@ -143,7 +144,7 @@ static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size)
143 buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); 144 buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
144 for (i = 64; i < s; i += 64) { 145 for (i = 64; i < s; i += 64) {
145 wqe = buf + i; 146 wqe = buf + i;
146 *wqe = 0xffffffff; 147 *wqe = cpu_to_be32(0xffffffff);
147 } 148 }
148 } 149 }
149} 150}
@@ -242,7 +243,7 @@ static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type)
242 } 243 }
243} 244}
244 245
245static int send_wqe_overhead(enum ib_qp_type type) 246static int send_wqe_overhead(enum ib_qp_type type, u32 flags)
246{ 247{
247 /* 248 /*
248 * UD WQEs must have a datagram segment. 249 * UD WQEs must have a datagram segment.
@@ -253,7 +254,8 @@ static int send_wqe_overhead(enum ib_qp_type type)
253 switch (type) { 254 switch (type) {
254 case IB_QPT_UD: 255 case IB_QPT_UD:
255 return sizeof (struct mlx4_wqe_ctrl_seg) + 256 return sizeof (struct mlx4_wqe_ctrl_seg) +
256 sizeof (struct mlx4_wqe_datagram_seg); 257 sizeof (struct mlx4_wqe_datagram_seg) +
258 ((flags & MLX4_IB_QP_LSO) ? 64 : 0);
257 case IB_QPT_UC: 259 case IB_QPT_UC:
258 return sizeof (struct mlx4_wqe_ctrl_seg) + 260 return sizeof (struct mlx4_wqe_ctrl_seg) +
259 sizeof (struct mlx4_wqe_raddr_seg); 261 sizeof (struct mlx4_wqe_raddr_seg);
@@ -315,7 +317,7 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
315 /* Sanity check SQ size before proceeding */ 317 /* Sanity check SQ size before proceeding */
316 if (cap->max_send_wr > dev->dev->caps.max_wqes || 318 if (cap->max_send_wr > dev->dev->caps.max_wqes ||
317 cap->max_send_sge > dev->dev->caps.max_sq_sg || 319 cap->max_send_sge > dev->dev->caps.max_sq_sg ||
318 cap->max_inline_data + send_wqe_overhead(type) + 320 cap->max_inline_data + send_wqe_overhead(type, qp->flags) +
319 sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz) 321 sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz)
320 return -EINVAL; 322 return -EINVAL;
321 323
@@ -329,7 +331,7 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
329 331
330 s = max(cap->max_send_sge * sizeof (struct mlx4_wqe_data_seg), 332 s = max(cap->max_send_sge * sizeof (struct mlx4_wqe_data_seg),
331 cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) + 333 cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) +
332 send_wqe_overhead(type); 334 send_wqe_overhead(type, qp->flags);
333 335
334 /* 336 /*
335 * Hermon supports shrinking WQEs, such that a single work 337 * Hermon supports shrinking WQEs, such that a single work
@@ -394,7 +396,8 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
394 } 396 }
395 397
396 qp->sq.max_gs = ((qp->sq_max_wqes_per_wr << qp->sq.wqe_shift) - 398 qp->sq.max_gs = ((qp->sq_max_wqes_per_wr << qp->sq.wqe_shift) -
397 send_wqe_overhead(type)) / sizeof (struct mlx4_wqe_data_seg); 399 send_wqe_overhead(type, qp->flags)) /
400 sizeof (struct mlx4_wqe_data_seg);
398 401
399 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + 402 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
400 (qp->sq.wqe_cnt << qp->sq.wqe_shift); 403 (qp->sq.wqe_cnt << qp->sq.wqe_shift);
@@ -503,6 +506,9 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
503 } else { 506 } else {
504 qp->sq_no_prefetch = 0; 507 qp->sq_no_prefetch = 0;
505 508
509 if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
510 qp->flags |= MLX4_IB_QP_LSO;
511
506 err = set_kernel_sq_size(dev, &init_attr->cap, init_attr->qp_type, qp); 512 err = set_kernel_sq_size(dev, &init_attr->cap, init_attr->qp_type, qp);
507 if (err) 513 if (err)
508 goto err; 514 goto err;
@@ -673,6 +679,13 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
673 struct mlx4_ib_qp *qp; 679 struct mlx4_ib_qp *qp;
674 int err; 680 int err;
675 681
682 /* We only support LSO, and only for kernel UD QPs. */
683 if (init_attr->create_flags & ~IB_QP_CREATE_IPOIB_UD_LSO)
684 return ERR_PTR(-EINVAL);
685 if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO &&
686 (pd->uobject || init_attr->qp_type != IB_QPT_UD))
687 return ERR_PTR(-EINVAL);
688
676 switch (init_attr->qp_type) { 689 switch (init_attr->qp_type) {
677 case IB_QPT_RC: 690 case IB_QPT_RC:
678 case IB_QPT_UC: 691 case IB_QPT_UC:
@@ -876,10 +889,15 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
876 } 889 }
877 } 890 }
878 891
879 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI || 892 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
880 ibqp->qp_type == IB_QPT_UD)
881 context->mtu_msgmax = (IB_MTU_4096 << 5) | 11; 893 context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;
882 else if (attr_mask & IB_QP_PATH_MTU) { 894 else if (ibqp->qp_type == IB_QPT_UD) {
895 if (qp->flags & MLX4_IB_QP_LSO)
896 context->mtu_msgmax = (IB_MTU_4096 << 5) |
897 ilog2(dev->dev->caps.max_gso_sz);
898 else
899 context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;
900 } else if (attr_mask & IB_QP_PATH_MTU) {
883 if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) { 901 if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
884 printk(KERN_ERR "path MTU (%u) is invalid\n", 902 printk(KERN_ERR "path MTU (%u) is invalid\n",
885 attr->path_mtu); 903 attr->path_mtu);
@@ -1182,7 +1200,7 @@ out:
1182} 1200}
1183 1201
1184static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, 1202static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
1185 void *wqe) 1203 void *wqe, unsigned *mlx_seg_len)
1186{ 1204{
1187 struct ib_device *ib_dev = &to_mdev(sqp->qp.ibqp.device)->ib_dev; 1205 struct ib_device *ib_dev = &to_mdev(sqp->qp.ibqp.device)->ib_dev;
1188 struct mlx4_wqe_mlx_seg *mlx = wqe; 1206 struct mlx4_wqe_mlx_seg *mlx = wqe;
@@ -1231,7 +1249,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
1231 case IB_WR_SEND_WITH_IMM: 1249 case IB_WR_SEND_WITH_IMM:
1232 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; 1250 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1233 sqp->ud_header.immediate_present = 1; 1251 sqp->ud_header.immediate_present = 1;
1234 sqp->ud_header.immediate_data = wr->imm_data; 1252 sqp->ud_header.immediate_data = wr->ex.imm_data;
1235 break; 1253 break;
1236 default: 1254 default:
1237 return -EINVAL; 1255 return -EINVAL;
@@ -1303,7 +1321,9 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
1303 i = 2; 1321 i = 2;
1304 } 1322 }
1305 1323
1306 return ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16); 1324 *mlx_seg_len =
1325 ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16);
1326 return 0;
1307} 1327}
1308 1328
1309static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq) 1329static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
@@ -1396,6 +1416,34 @@ static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
1396 dseg->addr = cpu_to_be64(sg->addr); 1416 dseg->addr = cpu_to_be64(sg->addr);
1397} 1417}
1398 1418
1419static int build_lso_seg(struct mlx4_lso_seg *wqe, struct ib_send_wr *wr,
1420 struct mlx4_ib_qp *qp, unsigned *lso_seg_len)
1421{
1422 unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16);
1423
1424 /*
1425 * This is a temporary limitation and will be removed in
1426 * a forthcoming FW release:
1427 */
1428 if (unlikely(halign > 64))
1429 return -EINVAL;
1430
1431 if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) &&
1432 wr->num_sge > qp->sq.max_gs - (halign >> 4)))
1433 return -EINVAL;
1434
1435 memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen);
1436
1437 /* make sure LSO header is written before overwriting stamping */
1438 wmb();
1439
1440 wqe->mss_hdr_size = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 |
1441 wr->wr.ud.hlen);
1442
1443 *lso_seg_len = halign;
1444 return 0;
1445}
1446
1399int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 1447int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1400 struct ib_send_wr **bad_wr) 1448 struct ib_send_wr **bad_wr)
1401{ 1449{
@@ -1409,6 +1457,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1409 unsigned ind; 1457 unsigned ind;
1410 int uninitialized_var(stamp); 1458 int uninitialized_var(stamp);
1411 int uninitialized_var(size); 1459 int uninitialized_var(size);
1460 unsigned seglen;
1412 int i; 1461 int i;
1413 1462
1414 spin_lock_irqsave(&qp->sq.lock, flags); 1463 spin_lock_irqsave(&qp->sq.lock, flags);
@@ -1436,11 +1485,14 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1436 cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) : 0) | 1485 cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) : 0) |
1437 (wr->send_flags & IB_SEND_SOLICITED ? 1486 (wr->send_flags & IB_SEND_SOLICITED ?
1438 cpu_to_be32(MLX4_WQE_CTRL_SOLICITED) : 0) | 1487 cpu_to_be32(MLX4_WQE_CTRL_SOLICITED) : 0) |
1488 ((wr->send_flags & IB_SEND_IP_CSUM) ?
1489 cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
1490 MLX4_WQE_CTRL_TCP_UDP_CSUM) : 0) |
1439 qp->sq_signal_bits; 1491 qp->sq_signal_bits;
1440 1492
1441 if (wr->opcode == IB_WR_SEND_WITH_IMM || 1493 if (wr->opcode == IB_WR_SEND_WITH_IMM ||
1442 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) 1494 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
1443 ctrl->imm = wr->imm_data; 1495 ctrl->imm = wr->ex.imm_data;
1444 else 1496 else
1445 ctrl->imm = 0; 1497 ctrl->imm = 0;
1446 1498
@@ -1484,19 +1536,27 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1484 set_datagram_seg(wqe, wr); 1536 set_datagram_seg(wqe, wr);
1485 wqe += sizeof (struct mlx4_wqe_datagram_seg); 1537 wqe += sizeof (struct mlx4_wqe_datagram_seg);
1486 size += sizeof (struct mlx4_wqe_datagram_seg) / 16; 1538 size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
1539
1540 if (wr->opcode == IB_WR_LSO) {
1541 err = build_lso_seg(wqe, wr, qp, &seglen);
1542 if (unlikely(err)) {
1543 *bad_wr = wr;
1544 goto out;
1545 }
1546 wqe += seglen;
1547 size += seglen / 16;
1548 }
1487 break; 1549 break;
1488 1550
1489 case IB_QPT_SMI: 1551 case IB_QPT_SMI:
1490 case IB_QPT_GSI: 1552 case IB_QPT_GSI:
1491 err = build_mlx_header(to_msqp(qp), wr, ctrl); 1553 err = build_mlx_header(to_msqp(qp), wr, ctrl, &seglen);
1492 if (err < 0) { 1554 if (unlikely(err)) {
1493 *bad_wr = wr; 1555 *bad_wr = wr;
1494 goto out; 1556 goto out;
1495 } 1557 }
1496 wqe += err; 1558 wqe += seglen;
1497 size += err / 16; 1559 size += seglen / 16;
1498
1499 err = 0;
1500 break; 1560 break;
1501 1561
1502 default: 1562 default:
@@ -1725,7 +1785,9 @@ int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
1725 struct mlx4_ib_qp *qp = to_mqp(ibqp); 1785 struct mlx4_ib_qp *qp = to_mqp(ibqp);
1726 struct mlx4_qp_context context; 1786 struct mlx4_qp_context context;
1727 int mlx4_state; 1787 int mlx4_state;
1728 int err; 1788 int err = 0;
1789
1790 mutex_lock(&qp->mutex);
1729 1791
1730 if (qp->state == IB_QPS_RESET) { 1792 if (qp->state == IB_QPS_RESET) {
1731 qp_attr->qp_state = IB_QPS_RESET; 1793 qp_attr->qp_state = IB_QPS_RESET;
@@ -1733,12 +1795,15 @@ int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
1733 } 1795 }
1734 1796
1735 err = mlx4_qp_query(dev->dev, &qp->mqp, &context); 1797 err = mlx4_qp_query(dev->dev, &qp->mqp, &context);
1736 if (err) 1798 if (err) {
1737 return -EINVAL; 1799 err = -EINVAL;
1800 goto out;
1801 }
1738 1802
1739 mlx4_state = be32_to_cpu(context.flags) >> 28; 1803 mlx4_state = be32_to_cpu(context.flags) >> 28;
1740 1804
1741 qp_attr->qp_state = to_ib_qp_state(mlx4_state); 1805 qp->state = to_ib_qp_state(mlx4_state);
1806 qp_attr->qp_state = qp->state;
1742 qp_attr->path_mtu = context.mtu_msgmax >> 5; 1807 qp_attr->path_mtu = context.mtu_msgmax >> 5;
1743 qp_attr->path_mig_state = 1808 qp_attr->path_mig_state =
1744 to_ib_mig_state((be32_to_cpu(context.flags) >> 11) & 0x3); 1809 to_ib_mig_state((be32_to_cpu(context.flags) >> 11) & 0x3);
@@ -1797,6 +1862,8 @@ done:
1797 1862
1798 qp_init_attr->cap = qp_attr->cap; 1863 qp_init_attr->cap = qp_attr->cap;
1799 1864
1800 return 0; 1865out:
1866 mutex_unlock(&qp->mutex);
1867 return err;
1801} 1868}
1802 1869
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 09a30dd12b14..54d230ee7d63 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -219,7 +219,7 @@ static void mthca_cmd_post_dbell(struct mthca_dev *dev,
219 __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) | 219 __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) |
220 (1 << HCA_E_BIT) | 220 (1 << HCA_E_BIT) |
221 (op_modifier << HCR_OPMOD_SHIFT) | 221 (op_modifier << HCR_OPMOD_SHIFT) |
222 op), ptr + offs[6]); 222 op), ptr + offs[6]);
223 wmb(); 223 wmb();
224 __raw_writel((__force u32) 0, ptr + offs[7]); 224 __raw_writel((__force u32) 0, ptr + offs[7]);
225 wmb(); 225 wmb();
@@ -1339,6 +1339,10 @@ int mthca_INIT_HCA(struct mthca_dev *dev,
1339 /* Check port for UD address vector: */ 1339 /* Check port for UD address vector: */
1340 *(inbox + INIT_HCA_FLAGS2_OFFSET / 4) |= cpu_to_be32(1); 1340 *(inbox + INIT_HCA_FLAGS2_OFFSET / 4) |= cpu_to_be32(1);
1341 1341
1342 /* Enable IPoIB checksumming if we can: */
1343 if (dev->device_cap_flags & IB_DEVICE_UD_IP_CSUM)
1344 *(inbox + INIT_HCA_FLAGS2_OFFSET / 4) |= cpu_to_be32(7 << 3);
1345
1342 /* We leave wqe_quota, responder_exu, etc as 0 (default) */ 1346 /* We leave wqe_quota, responder_exu, etc as 0 (default) */
1343 1347
1344 /* QPC/EEC/CQC/EQC/RDB attributes */ 1348 /* QPC/EEC/CQC/EQC/RDB attributes */
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.h b/drivers/infiniband/hw/mthca/mthca_cmd.h
index 2f976f2051d6..8928ca4a9325 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.h
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.h
@@ -103,6 +103,7 @@ enum {
103 DEV_LIM_FLAG_RAW_IPV6 = 1 << 4, 103 DEV_LIM_FLAG_RAW_IPV6 = 1 << 4,
104 DEV_LIM_FLAG_RAW_ETHER = 1 << 5, 104 DEV_LIM_FLAG_RAW_ETHER = 1 << 5,
105 DEV_LIM_FLAG_SRQ = 1 << 6, 105 DEV_LIM_FLAG_SRQ = 1 << 6,
106 DEV_LIM_FLAG_IPOIB_CSUM = 1 << 7,
106 DEV_LIM_FLAG_BAD_PKEY_CNTR = 1 << 8, 107 DEV_LIM_FLAG_BAD_PKEY_CNTR = 1 << 8,
107 DEV_LIM_FLAG_BAD_QKEY_CNTR = 1 << 9, 108 DEV_LIM_FLAG_BAD_QKEY_CNTR = 1 << 9,
108 DEV_LIM_FLAG_MW = 1 << 16, 109 DEV_LIM_FLAG_MW = 1 << 16,
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 1e1e336d3ef9..20401d2ba6b2 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -119,7 +119,8 @@ struct mthca_cqe {
119 __be32 my_qpn; 119 __be32 my_qpn;
120 __be32 my_ee; 120 __be32 my_ee;
121 __be32 rqpn; 121 __be32 rqpn;
122 __be16 sl_g_mlpath; 122 u8 sl_ipok;
123 u8 g_mlpath;
123 __be16 rlid; 124 __be16 rlid;
124 __be32 imm_etype_pkey_eec; 125 __be32 imm_etype_pkey_eec;
125 __be32 byte_cnt; 126 __be32 byte_cnt;
@@ -493,6 +494,7 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
493 int is_send; 494 int is_send;
494 int free_cqe = 1; 495 int free_cqe = 1;
495 int err = 0; 496 int err = 0;
497 u16 checksum;
496 498
497 cqe = next_cqe_sw(cq); 499 cqe = next_cqe_sw(cq);
498 if (!cqe) 500 if (!cqe)
@@ -635,12 +637,14 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
635 break; 637 break;
636 } 638 }
637 entry->slid = be16_to_cpu(cqe->rlid); 639 entry->slid = be16_to_cpu(cqe->rlid);
638 entry->sl = be16_to_cpu(cqe->sl_g_mlpath) >> 12; 640 entry->sl = cqe->sl_ipok >> 4;
639 entry->src_qp = be32_to_cpu(cqe->rqpn) & 0xffffff; 641 entry->src_qp = be32_to_cpu(cqe->rqpn) & 0xffffff;
640 entry->dlid_path_bits = be16_to_cpu(cqe->sl_g_mlpath) & 0x7f; 642 entry->dlid_path_bits = cqe->g_mlpath & 0x7f;
641 entry->pkey_index = be32_to_cpu(cqe->imm_etype_pkey_eec) >> 16; 643 entry->pkey_index = be32_to_cpu(cqe->imm_etype_pkey_eec) >> 16;
642 entry->wc_flags |= be16_to_cpu(cqe->sl_g_mlpath) & 0x80 ? 644 entry->wc_flags |= cqe->g_mlpath & 0x80 ? IB_WC_GRH : 0;
643 IB_WC_GRH : 0; 645 checksum = (be32_to_cpu(cqe->rqpn) >> 24) |
646 ((be32_to_cpu(cqe->my_ee) >> 16) & 0xff00);
647 entry->csum_ok = (cqe->sl_ipok & 1 && checksum == 0xffff);
644 } 648 }
645 649
646 entry->status = IB_WC_SUCCESS; 650 entry->status = IB_WC_SUCCESS;
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index 7bbdd1f4e6c7..0e842e023400 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -54,8 +54,8 @@
54 54
55#define DRV_NAME "ib_mthca" 55#define DRV_NAME "ib_mthca"
56#define PFX DRV_NAME ": " 56#define PFX DRV_NAME ": "
57#define DRV_VERSION "0.08" 57#define DRV_VERSION "1.0"
58#define DRV_RELDATE "February 14, 2006" 58#define DRV_RELDATE "April 4, 2008"
59 59
60enum { 60enum {
61 MTHCA_FLAG_DDR_HIDDEN = 1 << 1, 61 MTHCA_FLAG_DDR_HIDDEN = 1 << 1,
@@ -390,11 +390,11 @@ extern void __buggy_use_of_MTHCA_PUT(void);
390 do { \ 390 do { \
391 void *__p = (char *) (source) + (offset); \ 391 void *__p = (char *) (source) + (offset); \
392 switch (sizeof (dest)) { \ 392 switch (sizeof (dest)) { \
393 case 1: (dest) = *(u8 *) __p; break; \ 393 case 1: (dest) = *(u8 *) __p; break; \
394 case 2: (dest) = be16_to_cpup(__p); break; \ 394 case 2: (dest) = be16_to_cpup(__p); break; \
395 case 4: (dest) = be32_to_cpup(__p); break; \ 395 case 4: (dest) = be32_to_cpup(__p); break; \
396 case 8: (dest) = be64_to_cpup(__p); break; \ 396 case 8: (dest) = be64_to_cpup(__p); break; \
397 default: __buggy_use_of_MTHCA_GET(); \ 397 default: __buggy_use_of_MTHCA_GET(); \
398 } \ 398 } \
399 } while (0) 399 } while (0)
400 400
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index b60eb5df96e8..8bde7f98e58a 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -232,9 +232,9 @@ static inline struct mthca_eqe *get_eqe(struct mthca_eq *eq, u32 entry)
232 return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE; 232 return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
233} 233}
234 234
235static inline struct mthca_eqe* next_eqe_sw(struct mthca_eq *eq) 235static inline struct mthca_eqe *next_eqe_sw(struct mthca_eq *eq)
236{ 236{
237 struct mthca_eqe* eqe; 237 struct mthca_eqe *eqe;
238 eqe = get_eqe(eq, eq->cons_index); 238 eqe = get_eqe(eq, eq->cons_index);
239 return (MTHCA_EQ_ENTRY_OWNER_HW & eqe->owner) ? NULL : eqe; 239 return (MTHCA_EQ_ENTRY_OWNER_HW & eqe->owner) ? NULL : eqe;
240} 240}
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index acfa41d968ee..8b7e83e6e88f 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -125,7 +125,7 @@ static void smp_snoop(struct ib_device *ibdev,
125 event.device = ibdev; 125 event.device = ibdev;
126 event.element.port_num = port_num; 126 event.element.port_num = port_num;
127 127
128 if(pinfo->clientrereg_resv_subnetto & 0x80) 128 if (pinfo->clientrereg_resv_subnetto & 0x80)
129 event.event = IB_EVENT_CLIENT_REREGISTER; 129 event.event = IB_EVENT_CLIENT_REREGISTER;
130 else 130 else
131 event.event = IB_EVENT_LID_CHANGE; 131 event.event = IB_EVENT_LID_CHANGE;
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index cd3d8adbef9f..9ebadd6e0cfb 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -267,11 +267,16 @@ static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim)
267 if (dev_lim->flags & DEV_LIM_FLAG_SRQ) 267 if (dev_lim->flags & DEV_LIM_FLAG_SRQ)
268 mdev->mthca_flags |= MTHCA_FLAG_SRQ; 268 mdev->mthca_flags |= MTHCA_FLAG_SRQ;
269 269
270 if (mthca_is_memfree(mdev))
271 if (dev_lim->flags & DEV_LIM_FLAG_IPOIB_CSUM)
272 mdev->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
273
270 return 0; 274 return 0;
271} 275}
272 276
273static int mthca_init_tavor(struct mthca_dev *mdev) 277static int mthca_init_tavor(struct mthca_dev *mdev)
274{ 278{
279 s64 size;
275 u8 status; 280 u8 status;
276 int err; 281 int err;
277 struct mthca_dev_lim dev_lim; 282 struct mthca_dev_lim dev_lim;
@@ -324,9 +329,11 @@ static int mthca_init_tavor(struct mthca_dev *mdev)
324 if (mdev->mthca_flags & MTHCA_FLAG_SRQ) 329 if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
325 profile.num_srq = dev_lim.max_srqs; 330 profile.num_srq = dev_lim.max_srqs;
326 331
327 err = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca); 332 size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
328 if (err < 0) 333 if (size < 0) {
334 err = size;
329 goto err_disable; 335 goto err_disable;
336 }
330 337
331 err = mthca_INIT_HCA(mdev, &init_hca, &status); 338 err = mthca_INIT_HCA(mdev, &init_hca, &status);
332 if (err) { 339 if (err) {
@@ -605,7 +612,7 @@ static int mthca_init_arbel(struct mthca_dev *mdev)
605 struct mthca_dev_lim dev_lim; 612 struct mthca_dev_lim dev_lim;
606 struct mthca_profile profile; 613 struct mthca_profile profile;
607 struct mthca_init_hca_param init_hca; 614 struct mthca_init_hca_param init_hca;
608 u64 icm_size; 615 s64 icm_size;
609 u8 status; 616 u8 status;
610 int err; 617 int err;
611 618
@@ -653,7 +660,7 @@ static int mthca_init_arbel(struct mthca_dev *mdev)
653 profile.num_srq = dev_lim.max_srqs; 660 profile.num_srq = dev_lim.max_srqs;
654 661
655 icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca); 662 icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
656 if ((int) icm_size < 0) { 663 if (icm_size < 0) {
657 err = icm_size; 664 err = icm_size;
658 goto err_stop_fw; 665 goto err_stop_fw;
659 } 666 }
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 252db0822f6c..b224079d4e1f 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -359,12 +359,14 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
359 int use_lowmem, int use_coherent) 359 int use_lowmem, int use_coherent)
360{ 360{
361 struct mthca_icm_table *table; 361 struct mthca_icm_table *table;
362 int obj_per_chunk;
362 int num_icm; 363 int num_icm;
363 unsigned chunk_size; 364 unsigned chunk_size;
364 int i; 365 int i;
365 u8 status; 366 u8 status;
366 367
367 num_icm = (obj_size * nobj + MTHCA_TABLE_CHUNK_SIZE - 1) / MTHCA_TABLE_CHUNK_SIZE; 368 obj_per_chunk = MTHCA_TABLE_CHUNK_SIZE / obj_size;
369 num_icm = DIV_ROUND_UP(nobj, obj_per_chunk);
368 370
369 table = kmalloc(sizeof *table + num_icm * sizeof *table->icm, GFP_KERNEL); 371 table = kmalloc(sizeof *table + num_icm * sizeof *table->icm, GFP_KERNEL);
370 if (!table) 372 if (!table)
@@ -412,7 +414,7 @@ err:
412 if (table->icm[i]) { 414 if (table->icm[i]) {
413 mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE, 415 mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE,
414 MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE, 416 MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
415 &status); 417 &status);
416 mthca_free_icm(dev, table->icm[i], table->coherent); 418 mthca_free_icm(dev, table->icm[i], table->coherent);
417 } 419 }
418 420
diff --git a/drivers/infiniband/hw/mthca/mthca_profile.c b/drivers/infiniband/hw/mthca/mthca_profile.c
index 26bf86d1cfcd..605a8d57fac6 100644
--- a/drivers/infiniband/hw/mthca/mthca_profile.c
+++ b/drivers/infiniband/hw/mthca/mthca_profile.c
@@ -63,7 +63,7 @@ enum {
63 MTHCA_NUM_PDS = 1 << 15 63 MTHCA_NUM_PDS = 1 << 15
64}; 64};
65 65
66u64 mthca_make_profile(struct mthca_dev *dev, 66s64 mthca_make_profile(struct mthca_dev *dev,
67 struct mthca_profile *request, 67 struct mthca_profile *request,
68 struct mthca_dev_lim *dev_lim, 68 struct mthca_dev_lim *dev_lim,
69 struct mthca_init_hca_param *init_hca) 69 struct mthca_init_hca_param *init_hca)
@@ -77,7 +77,7 @@ u64 mthca_make_profile(struct mthca_dev *dev,
77 }; 77 };
78 78
79 u64 mem_base, mem_avail; 79 u64 mem_base, mem_avail;
80 u64 total_size = 0; 80 s64 total_size = 0;
81 struct mthca_resource *profile; 81 struct mthca_resource *profile;
82 struct mthca_resource tmp; 82 struct mthca_resource tmp;
83 int i, j; 83 int i, j;
diff --git a/drivers/infiniband/hw/mthca/mthca_profile.h b/drivers/infiniband/hw/mthca/mthca_profile.h
index 94641808f97f..e76cb62d8e32 100644
--- a/drivers/infiniband/hw/mthca/mthca_profile.h
+++ b/drivers/infiniband/hw/mthca/mthca_profile.h
@@ -53,7 +53,7 @@ struct mthca_profile {
53 int fmr_reserved_mtts; 53 int fmr_reserved_mtts;
54}; 54};
55 55
56u64 mthca_make_profile(struct mthca_dev *mdev, 56s64 mthca_make_profile(struct mthca_dev *mdev,
57 struct mthca_profile *request, 57 struct mthca_profile *request,
58 struct mthca_dev_lim *dev_lim, 58 struct mthca_dev_lim *dev_lim,
59 struct mthca_init_hca_param *init_hca); 59 struct mthca_init_hca_param *init_hca);
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 9e491df6419c..81b257e18bb6 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -60,7 +60,7 @@ static int mthca_query_device(struct ib_device *ibdev,
60 struct ib_smp *in_mad = NULL; 60 struct ib_smp *in_mad = NULL;
61 struct ib_smp *out_mad = NULL; 61 struct ib_smp *out_mad = NULL;
62 int err = -ENOMEM; 62 int err = -ENOMEM;
63 struct mthca_dev* mdev = to_mdev(ibdev); 63 struct mthca_dev *mdev = to_mdev(ibdev);
64 64
65 u8 status; 65 u8 status;
66 66
@@ -540,6 +540,9 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
540 struct mthca_qp *qp; 540 struct mthca_qp *qp;
541 int err; 541 int err;
542 542
543 if (init_attr->create_flags)
544 return ERR_PTR(-EINVAL);
545
543 switch (init_attr->qp_type) { 546 switch (init_attr->qp_type) {
544 case IB_QPT_RC: 547 case IB_QPT_RC:
545 case IB_QPT_UC: 548 case IB_QPT_UC:
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index db5595bbf7f0..09dc3614cf2c 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -437,29 +437,34 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
437 int mthca_state; 437 int mthca_state;
438 u8 status; 438 u8 status;
439 439
440 mutex_lock(&qp->mutex);
441
440 if (qp->state == IB_QPS_RESET) { 442 if (qp->state == IB_QPS_RESET) {
441 qp_attr->qp_state = IB_QPS_RESET; 443 qp_attr->qp_state = IB_QPS_RESET;
442 goto done; 444 goto done;
443 } 445 }
444 446
445 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 447 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
446 if (IS_ERR(mailbox)) 448 if (IS_ERR(mailbox)) {
447 return PTR_ERR(mailbox); 449 err = PTR_ERR(mailbox);
450 goto out;
451 }
448 452
449 err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox, &status); 453 err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox, &status);
450 if (err) 454 if (err)
451 goto out; 455 goto out_mailbox;
452 if (status) { 456 if (status) {
453 mthca_warn(dev, "QUERY_QP returned status %02x\n", status); 457 mthca_warn(dev, "QUERY_QP returned status %02x\n", status);
454 err = -EINVAL; 458 err = -EINVAL;
455 goto out; 459 goto out_mailbox;
456 } 460 }
457 461
458 qp_param = mailbox->buf; 462 qp_param = mailbox->buf;
459 context = &qp_param->context; 463 context = &qp_param->context;
460 mthca_state = be32_to_cpu(context->flags) >> 28; 464 mthca_state = be32_to_cpu(context->flags) >> 28;
461 465
462 qp_attr->qp_state = to_ib_qp_state(mthca_state); 466 qp->state = to_ib_qp_state(mthca_state);
467 qp_attr->qp_state = qp->state;
463 qp_attr->path_mtu = context->mtu_msgmax >> 5; 468 qp_attr->path_mtu = context->mtu_msgmax >> 5;
464 qp_attr->path_mig_state = 469 qp_attr->path_mig_state =
465 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3); 470 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
@@ -506,8 +511,11 @@ done:
506 511
507 qp_init_attr->cap = qp_attr->cap; 512 qp_init_attr->cap = qp_attr->cap;
508 513
509out: 514out_mailbox:
510 mthca_free_mailbox(dev, mailbox); 515 mthca_free_mailbox(dev, mailbox);
516
517out:
518 mutex_unlock(&qp->mutex);
511 return err; 519 return err;
512} 520}
513 521
@@ -1532,7 +1540,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
1532 case IB_WR_SEND_WITH_IMM: 1540 case IB_WR_SEND_WITH_IMM:
1533 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; 1541 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1534 sqp->ud_header.immediate_present = 1; 1542 sqp->ud_header.immediate_present = 1;
1535 sqp->ud_header.immediate_data = wr->imm_data; 1543 sqp->ud_header.immediate_data = wr->ex.imm_data;
1536 break; 1544 break;
1537 default: 1545 default:
1538 return -EINVAL; 1546 return -EINVAL;
@@ -1679,7 +1687,7 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1679 cpu_to_be32(1); 1687 cpu_to_be32(1);
1680 if (wr->opcode == IB_WR_SEND_WITH_IMM || 1688 if (wr->opcode == IB_WR_SEND_WITH_IMM ||
1681 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) 1689 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
1682 ((struct mthca_next_seg *) wqe)->imm = wr->imm_data; 1690 ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data;
1683 1691
1684 wqe += sizeof (struct mthca_next_seg); 1692 wqe += sizeof (struct mthca_next_seg);
1685 size = sizeof (struct mthca_next_seg) / 16; 1693 size = sizeof (struct mthca_next_seg) / 16;
@@ -2015,10 +2023,12 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2015 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) | 2023 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
2016 ((wr->send_flags & IB_SEND_SOLICITED) ? 2024 ((wr->send_flags & IB_SEND_SOLICITED) ?
2017 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) | 2025 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) |
2026 ((wr->send_flags & IB_SEND_IP_CSUM) ?
2027 cpu_to_be32(MTHCA_NEXT_IP_CSUM | MTHCA_NEXT_TCP_UDP_CSUM) : 0) |
2018 cpu_to_be32(1); 2028 cpu_to_be32(1);
2019 if (wr->opcode == IB_WR_SEND_WITH_IMM || 2029 if (wr->opcode == IB_WR_SEND_WITH_IMM ||
2020 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) 2030 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
2021 ((struct mthca_next_seg *) wqe)->imm = wr->imm_data; 2031 ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data;
2022 2032
2023 wqe += sizeof (struct mthca_next_seg); 2033 wqe += sizeof (struct mthca_next_seg);
2024 size = sizeof (struct mthca_next_seg) / 16; 2034 size = sizeof (struct mthca_next_seg) / 16;
diff --git a/drivers/infiniband/hw/mthca/mthca_wqe.h b/drivers/infiniband/hw/mthca/mthca_wqe.h
index f6a66fe78e48..b3551a8dea1d 100644
--- a/drivers/infiniband/hw/mthca/mthca_wqe.h
+++ b/drivers/infiniband/hw/mthca/mthca_wqe.h
@@ -38,14 +38,16 @@
38#include <linux/types.h> 38#include <linux/types.h>
39 39
40enum { 40enum {
41 MTHCA_NEXT_DBD = 1 << 7, 41 MTHCA_NEXT_DBD = 1 << 7,
42 MTHCA_NEXT_FENCE = 1 << 6, 42 MTHCA_NEXT_FENCE = 1 << 6,
43 MTHCA_NEXT_CQ_UPDATE = 1 << 3, 43 MTHCA_NEXT_CQ_UPDATE = 1 << 3,
44 MTHCA_NEXT_EVENT_GEN = 1 << 2, 44 MTHCA_NEXT_EVENT_GEN = 1 << 2,
45 MTHCA_NEXT_SOLICIT = 1 << 1, 45 MTHCA_NEXT_SOLICIT = 1 << 1,
46 46 MTHCA_NEXT_IP_CSUM = 1 << 4,
47 MTHCA_MLX_VL15 = 1 << 17, 47 MTHCA_NEXT_TCP_UDP_CSUM = 1 << 5,
48 MTHCA_MLX_SLR = 1 << 16 48
49 MTHCA_MLX_VL15 = 1 << 17,
50 MTHCA_MLX_SLR = 1 << 16
49}; 51};
50 52
51enum { 53enum {
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index b2112f5a422f..b00b0e3a91dc 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -65,7 +65,6 @@ MODULE_LICENSE("Dual BSD/GPL");
65MODULE_VERSION(DRV_VERSION); 65MODULE_VERSION(DRV_VERSION);
66 66
67int max_mtu = 9000; 67int max_mtu = 9000;
68int nics_per_function = 1;
69int interrupt_mod_interval = 0; 68int interrupt_mod_interval = 0;
70 69
71 70
@@ -93,15 +92,9 @@ module_param_named(debug_level, nes_debug_level, uint, 0644);
93MODULE_PARM_DESC(debug_level, "Enable debug output level"); 92MODULE_PARM_DESC(debug_level, "Enable debug output level");
94 93
95LIST_HEAD(nes_adapter_list); 94LIST_HEAD(nes_adapter_list);
96LIST_HEAD(nes_dev_list); 95static LIST_HEAD(nes_dev_list);
97 96
98atomic_t qps_destroyed; 97atomic_t qps_destroyed;
99atomic_t cqp_reqs_allocated;
100atomic_t cqp_reqs_freed;
101atomic_t cqp_reqs_dynallocated;
102atomic_t cqp_reqs_dynfreed;
103atomic_t cqp_reqs_queued;
104atomic_t cqp_reqs_redriven;
105 98
106static void nes_print_macaddr(struct net_device *netdev); 99static void nes_print_macaddr(struct net_device *netdev);
107static irqreturn_t nes_interrupt(int, void *); 100static irqreturn_t nes_interrupt(int, void *);
@@ -310,7 +303,7 @@ void nes_rem_ref(struct ib_qp *ibqp)
310 303
311 if (atomic_read(&nesqp->refcount) == 0) { 304 if (atomic_read(&nesqp->refcount) == 0) {
312 printk(KERN_INFO PFX "%s: Reference count already 0 for QP%d, last aeq = 0x%04X.\n", 305 printk(KERN_INFO PFX "%s: Reference count already 0 for QP%d, last aeq = 0x%04X.\n",
313 __FUNCTION__, ibqp->qp_num, nesqp->last_aeq); 306 __func__, ibqp->qp_num, nesqp->last_aeq);
314 BUG(); 307 BUG();
315 } 308 }
316 309
@@ -751,13 +744,13 @@ static void __devexit nes_remove(struct pci_dev *pcidev)
751 744
752 list_del(&nesdev->list); 745 list_del(&nesdev->list);
753 nes_destroy_cqp(nesdev); 746 nes_destroy_cqp(nesdev);
747
748 free_irq(pcidev->irq, nesdev);
754 tasklet_kill(&nesdev->dpc_tasklet); 749 tasklet_kill(&nesdev->dpc_tasklet);
755 750
756 /* Deallocate the Adapter Structure */ 751 /* Deallocate the Adapter Structure */
757 nes_destroy_adapter(nesdev->nesadapter); 752 nes_destroy_adapter(nesdev->nesadapter);
758 753
759 free_irq(pcidev->irq, nesdev);
760
761 if (nesdev->msi_enabled) { 754 if (nesdev->msi_enabled) {
762 pci_disable_msi(pcidev); 755 pci_disable_msi(pcidev);
763 } 756 }
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index a48b288618ec..1626124a156d 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -143,12 +143,12 @@
143#ifdef CONFIG_INFINIBAND_NES_DEBUG 143#ifdef CONFIG_INFINIBAND_NES_DEBUG
144#define nes_debug(level, fmt, args...) \ 144#define nes_debug(level, fmt, args...) \
145 if (level & nes_debug_level) \ 145 if (level & nes_debug_level) \
146 printk(KERN_ERR PFX "%s[%u]: " fmt, __FUNCTION__, __LINE__, ##args) 146 printk(KERN_ERR PFX "%s[%u]: " fmt, __func__, __LINE__, ##args)
147 147
148#define assert(expr) \ 148#define assert(expr) \
149if (!(expr)) { \ 149if (!(expr)) { \
150 printk(KERN_ERR PFX "Assertion failed! %s, %s, %s, line %d\n", \ 150 printk(KERN_ERR PFX "Assertion failed! %s, %s, %s, line %d\n", \
151 #expr, __FILE__, __FUNCTION__, __LINE__); \ 151 #expr, __FILE__, __func__, __LINE__); \
152} 152}
153 153
154#define NES_EVENT_TIMEOUT 1200000 154#define NES_EVENT_TIMEOUT 1200000
@@ -166,7 +166,6 @@ if (!(expr)) { \
166#include "nes_cm.h" 166#include "nes_cm.h"
167 167
168extern int max_mtu; 168extern int max_mtu;
169extern int nics_per_function;
170#define max_frame_len (max_mtu+ETH_HLEN) 169#define max_frame_len (max_mtu+ETH_HLEN)
171extern int interrupt_mod_interval; 170extern int interrupt_mod_interval;
172extern int nes_if_count; 171extern int nes_if_count;
@@ -177,9 +176,6 @@ extern unsigned int nes_drv_opt;
177extern unsigned int nes_debug_level; 176extern unsigned int nes_debug_level;
178 177
179extern struct list_head nes_adapter_list; 178extern struct list_head nes_adapter_list;
180extern struct list_head nes_dev_list;
181
182extern struct nes_cm_core *g_cm_core;
183 179
184extern atomic_t cm_connects; 180extern atomic_t cm_connects;
185extern atomic_t cm_accepts; 181extern atomic_t cm_accepts;
@@ -209,7 +205,6 @@ extern atomic_t cm_nodes_destroyed;
209extern atomic_t cm_accel_dropped_pkts; 205extern atomic_t cm_accel_dropped_pkts;
210extern atomic_t cm_resets_recvd; 206extern atomic_t cm_resets_recvd;
211 207
212extern u32 crit_err_count;
213extern u32 int_mod_timer_init; 208extern u32 int_mod_timer_init;
214extern u32 int_mod_cq_depth_256; 209extern u32 int_mod_cq_depth_256;
215extern u32 int_mod_cq_depth_128; 210extern u32 int_mod_cq_depth_128;
@@ -219,14 +214,6 @@ extern u32 int_mod_cq_depth_16;
219extern u32 int_mod_cq_depth_4; 214extern u32 int_mod_cq_depth_4;
220extern u32 int_mod_cq_depth_1; 215extern u32 int_mod_cq_depth_1;
221 216
222extern atomic_t cqp_reqs_allocated;
223extern atomic_t cqp_reqs_freed;
224extern atomic_t cqp_reqs_dynallocated;
225extern atomic_t cqp_reqs_dynfreed;
226extern atomic_t cqp_reqs_queued;
227extern atomic_t cqp_reqs_redriven;
228
229
230struct nes_device { 217struct nes_device {
231 struct nes_adapter *nesadapter; 218 struct nes_adapter *nesadapter;
232 void __iomem *regs; 219 void __iomem *regs;
@@ -412,7 +399,7 @@ static inline int nes_alloc_resource(struct nes_adapter *nesadapter,
412 if (resource_num >= max_resources) { 399 if (resource_num >= max_resources) {
413 resource_num = find_first_zero_bit(resource_array, max_resources); 400 resource_num = find_first_zero_bit(resource_array, max_resources);
414 if (resource_num >= max_resources) { 401 if (resource_num >= max_resources) {
415 printk(KERN_ERR PFX "%s: No available resourcess.\n", __FUNCTION__); 402 printk(KERN_ERR PFX "%s: No available resourcess.\n", __func__);
416 spin_unlock_irqrestore(&nesadapter->resource_lock, flags); 403 spin_unlock_irqrestore(&nesadapter->resource_lock, flags);
417 return -EMFILE; 404 return -EMFILE;
418 } 405 }
@@ -510,9 +497,6 @@ struct ib_qp *nes_get_qp(struct ib_device *, int);
510/* nes_hw.c */ 497/* nes_hw.c */
511struct nes_adapter *nes_init_adapter(struct nes_device *, u8); 498struct nes_adapter *nes_init_adapter(struct nes_device *, u8);
512void nes_nic_init_timer_defaults(struct nes_device *, u8); 499void nes_nic_init_timer_defaults(struct nes_device *, u8);
513unsigned int nes_reset_adapter_ne020(struct nes_device *, u8 *);
514int nes_init_serdes(struct nes_device *, u8, u8, u8);
515void nes_init_csr_ne020(struct nes_device *, u8, u8);
516void nes_destroy_adapter(struct nes_adapter *); 500void nes_destroy_adapter(struct nes_adapter *);
517int nes_init_cqp(struct nes_device *); 501int nes_init_cqp(struct nes_device *);
518int nes_init_phy(struct nes_device *); 502int nes_init_phy(struct nes_device *);
@@ -520,20 +504,12 @@ int nes_init_nic_qp(struct nes_device *, struct net_device *);
520void nes_destroy_nic_qp(struct nes_vnic *); 504void nes_destroy_nic_qp(struct nes_vnic *);
521int nes_napi_isr(struct nes_device *); 505int nes_napi_isr(struct nes_device *);
522void nes_dpc(unsigned long); 506void nes_dpc(unsigned long);
523void nes_process_ceq(struct nes_device *, struct nes_hw_ceq *);
524void nes_process_aeq(struct nes_device *, struct nes_hw_aeq *);
525void nes_process_mac_intr(struct nes_device *, u32);
526void nes_nic_napi_ce_handler(struct nes_device *, struct nes_hw_nic_cq *);
527void nes_nic_ce_handler(struct nes_device *, struct nes_hw_nic_cq *); 507void nes_nic_ce_handler(struct nes_device *, struct nes_hw_nic_cq *);
528void nes_cqp_ce_handler(struct nes_device *, struct nes_hw_cq *);
529void nes_process_iwarp_aeqe(struct nes_device *, struct nes_hw_aeqe *);
530void nes_iwarp_ce_handler(struct nes_device *, struct nes_hw_cq *); 508void nes_iwarp_ce_handler(struct nes_device *, struct nes_hw_cq *);
531int nes_destroy_cqp(struct nes_device *); 509int nes_destroy_cqp(struct nes_device *);
532int nes_nic_cm_xmit(struct sk_buff *, struct net_device *); 510int nes_nic_cm_xmit(struct sk_buff *, struct net_device *);
533 511
534/* nes_nic.c */ 512/* nes_nic.c */
535void nes_netdev_set_multicast_list(struct net_device *);
536void nes_netdev_exit(struct nes_vnic *);
537struct net_device *nes_netdev_init(struct nes_device *, void __iomem *); 513struct net_device *nes_netdev_init(struct nes_device *, void __iomem *);
538void nes_netdev_destroy(struct net_device *); 514void nes_netdev_destroy(struct net_device *);
539int nes_nic_cm_xmit(struct sk_buff *, struct net_device *); 515int nes_nic_cm_xmit(struct sk_buff *, struct net_device *);
@@ -544,7 +520,6 @@ int nes_cm_recv(struct sk_buff *, struct net_device *);
544void nes_update_arp(unsigned char *, u32, u32, u16, u16); 520void nes_update_arp(unsigned char *, u32, u32, u16, u16);
545void nes_manage_arp_cache(struct net_device *, unsigned char *, u32, u32); 521void nes_manage_arp_cache(struct net_device *, unsigned char *, u32, u32);
546void nes_sock_release(struct nes_qp *, unsigned long *); 522void nes_sock_release(struct nes_qp *, unsigned long *);
547struct nes_cm_core *nes_cm_alloc_core(void);
548void flush_wqes(struct nes_device *nesdev, struct nes_qp *, u32, u32); 523void flush_wqes(struct nes_device *nesdev, struct nes_qp *, u32, u32);
549int nes_manage_apbvt(struct nes_vnic *, u32, u32, u32); 524int nes_manage_apbvt(struct nes_vnic *, u32, u32, u32);
550int nes_cm_disconn(struct nes_qp *); 525int nes_cm_disconn(struct nes_qp *);
@@ -556,7 +531,6 @@ int nes_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
556struct nes_ib_device *nes_init_ofa_device(struct net_device *); 531struct nes_ib_device *nes_init_ofa_device(struct net_device *);
557void nes_destroy_ofa_device(struct nes_ib_device *); 532void nes_destroy_ofa_device(struct nes_ib_device *);
558int nes_register_ofa_device(struct nes_ib_device *); 533int nes_register_ofa_device(struct nes_ib_device *);
559void nes_unregister_ofa_device(struct nes_ib_device *);
560 534
561/* nes_util.c */ 535/* nes_util.c */
562int nes_read_eeprom_values(struct nes_device *, struct nes_adapter *); 536int nes_read_eeprom_values(struct nes_device *, struct nes_adapter *);
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 0bef878e0f65..d0738623bcf3 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -80,7 +80,30 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *,
80static int add_ref_cm_node(struct nes_cm_node *); 80static int add_ref_cm_node(struct nes_cm_node *);
81static int rem_ref_cm_node(struct nes_cm_core *, struct nes_cm_node *); 81static int rem_ref_cm_node(struct nes_cm_core *, struct nes_cm_node *);
82static int mini_cm_del_listen(struct nes_cm_core *, struct nes_cm_listener *); 82static int mini_cm_del_listen(struct nes_cm_core *, struct nes_cm_listener *);
83 83static struct sk_buff *form_cm_frame(struct sk_buff *, struct nes_cm_node *,
84 void *, u32, void *, u32, u8);
85static struct sk_buff *get_free_pkt(struct nes_cm_node *cm_node);
86
87static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *,
88 struct nes_vnic *,
89 struct ietf_mpa_frame *,
90 struct nes_cm_info *);
91static int mini_cm_accept(struct nes_cm_core *, struct ietf_mpa_frame *,
92 struct nes_cm_node *);
93static int mini_cm_reject(struct nes_cm_core *, struct ietf_mpa_frame *,
94 struct nes_cm_node *);
95static int mini_cm_close(struct nes_cm_core *, struct nes_cm_node *);
96static int mini_cm_recv_pkt(struct nes_cm_core *, struct nes_vnic *,
97 struct sk_buff *);
98static int mini_cm_dealloc_core(struct nes_cm_core *);
99static int mini_cm_get(struct nes_cm_core *);
100static int mini_cm_set(struct nes_cm_core *, u32, u32);
101static int nes_cm_disconn_true(struct nes_qp *);
102static int nes_cm_post_event(struct nes_cm_event *event);
103static int nes_disconnect(struct nes_qp *nesqp, int abrupt);
104static void nes_disconnect_worker(struct work_struct *work);
105static int send_ack(struct nes_cm_node *cm_node);
106static int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb);
84 107
85/* External CM API Interface */ 108/* External CM API Interface */
86/* instance of function pointers for client API */ 109/* instance of function pointers for client API */
@@ -99,7 +122,7 @@ static struct nes_cm_ops nes_cm_api = {
99 mini_cm_set 122 mini_cm_set
100}; 123};
101 124
102struct nes_cm_core *g_cm_core; 125static struct nes_cm_core *g_cm_core;
103 126
104atomic_t cm_connects; 127atomic_t cm_connects;
105atomic_t cm_accepts; 128atomic_t cm_accepts;
@@ -149,7 +172,7 @@ static struct nes_cm_event *create_event(struct nes_cm_node *cm_node,
149/** 172/**
150 * send_mpa_request 173 * send_mpa_request
151 */ 174 */
152int send_mpa_request(struct nes_cm_node *cm_node) 175static int send_mpa_request(struct nes_cm_node *cm_node)
153{ 176{
154 struct sk_buff *skb; 177 struct sk_buff *skb;
155 int ret; 178 int ret;
@@ -243,8 +266,9 @@ static int handle_exception_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb
243 * form_cm_frame - get a free packet and build empty frame Use 266 * form_cm_frame - get a free packet and build empty frame Use
244 * node info to build. 267 * node info to build.
245 */ 268 */
246struct sk_buff *form_cm_frame(struct sk_buff *skb, struct nes_cm_node *cm_node, 269static struct sk_buff *form_cm_frame(struct sk_buff *skb, struct nes_cm_node *cm_node,
247 void *options, u32 optionsize, void *data, u32 datasize, u8 flags) 270 void *options, u32 optionsize, void *data,
271 u32 datasize, u8 flags)
248{ 272{
249 struct tcphdr *tcph; 273 struct tcphdr *tcph;
250 struct iphdr *iph; 274 struct iphdr *iph;
@@ -342,7 +366,6 @@ static void print_core(struct nes_cm_core *core)
342 if (!core) 366 if (!core)
343 return; 367 return;
344 nes_debug(NES_DBG_CM, "---------------------------------------------\n"); 368 nes_debug(NES_DBG_CM, "---------------------------------------------\n");
345 nes_debug(NES_DBG_CM, "Session ID : %u \n", atomic_read(&core->session_id));
346 369
347 nes_debug(NES_DBG_CM, "State : %u \n", core->state); 370 nes_debug(NES_DBG_CM, "State : %u \n", core->state);
348 371
@@ -395,7 +418,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
395 } 418 }
396 419
397 if (type == NES_TIMER_TYPE_SEND) { 420 if (type == NES_TIMER_TYPE_SEND) {
398 new_send->seq_num = htonl(tcp_hdr(skb)->seq); 421 new_send->seq_num = ntohl(tcp_hdr(skb)->seq);
399 atomic_inc(&new_send->skb->users); 422 atomic_inc(&new_send->skb->users);
400 423
401 ret = nes_nic_cm_xmit(new_send->skb, cm_node->netdev); 424 ret = nes_nic_cm_xmit(new_send->skb, cm_node->netdev);
@@ -420,7 +443,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
420 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); 443 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
421 } 444 }
422 if (type == NES_TIMER_TYPE_RECV) { 445 if (type == NES_TIMER_TYPE_RECV) {
423 new_send->seq_num = htonl(tcp_hdr(skb)->seq); 446 new_send->seq_num = ntohl(tcp_hdr(skb)->seq);
424 new_send->timetosend = jiffies; 447 new_send->timetosend = jiffies;
425 spin_lock_irqsave(&cm_node->recv_list_lock, flags); 448 spin_lock_irqsave(&cm_node->recv_list_lock, flags);
426 list_add_tail(&new_send->list, &cm_node->recv_list); 449 list_add_tail(&new_send->list, &cm_node->recv_list);
@@ -442,7 +465,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
442/** 465/**
443 * nes_cm_timer_tick 466 * nes_cm_timer_tick
444 */ 467 */
445void nes_cm_timer_tick(unsigned long pass) 468static void nes_cm_timer_tick(unsigned long pass)
446{ 469{
447 unsigned long flags, qplockflags; 470 unsigned long flags, qplockflags;
448 unsigned long nexttimeout = jiffies + NES_LONG_TIME; 471 unsigned long nexttimeout = jiffies + NES_LONG_TIME;
@@ -644,7 +667,7 @@ void nes_cm_timer_tick(unsigned long pass)
644/** 667/**
645 * send_syn 668 * send_syn
646 */ 669 */
647int send_syn(struct nes_cm_node *cm_node, u32 sendack) 670static int send_syn(struct nes_cm_node *cm_node, u32 sendack)
648{ 671{
649 int ret; 672 int ret;
650 int flags = SET_SYN; 673 int flags = SET_SYN;
@@ -710,7 +733,7 @@ int send_syn(struct nes_cm_node *cm_node, u32 sendack)
710/** 733/**
711 * send_reset 734 * send_reset
712 */ 735 */
713int send_reset(struct nes_cm_node *cm_node) 736static int send_reset(struct nes_cm_node *cm_node)
714{ 737{
715 int ret; 738 int ret;
716 struct sk_buff *skb = get_free_pkt(cm_node); 739 struct sk_buff *skb = get_free_pkt(cm_node);
@@ -732,7 +755,7 @@ int send_reset(struct nes_cm_node *cm_node)
732/** 755/**
733 * send_ack 756 * send_ack
734 */ 757 */
735int send_ack(struct nes_cm_node *cm_node) 758static int send_ack(struct nes_cm_node *cm_node)
736{ 759{
737 int ret; 760 int ret;
738 struct sk_buff *skb = get_free_pkt(cm_node); 761 struct sk_buff *skb = get_free_pkt(cm_node);
@@ -752,7 +775,7 @@ int send_ack(struct nes_cm_node *cm_node)
752/** 775/**
753 * send_fin 776 * send_fin
754 */ 777 */
755int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb) 778static int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb)
756{ 779{
757 int ret; 780 int ret;
758 781
@@ -775,7 +798,7 @@ int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb)
775/** 798/**
776 * get_free_pkt 799 * get_free_pkt
777 */ 800 */
778struct sk_buff *get_free_pkt(struct nes_cm_node *cm_node) 801static struct sk_buff *get_free_pkt(struct nes_cm_node *cm_node)
779{ 802{
780 struct sk_buff *skb, *new_skb; 803 struct sk_buff *skb, *new_skb;
781 804
@@ -820,7 +843,6 @@ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core,
820{ 843{
821 unsigned long flags; 844 unsigned long flags;
822 u32 hashkey; 845 u32 hashkey;
823 struct list_head *list_pos;
824 struct list_head *hte; 846 struct list_head *hte;
825 struct nes_cm_node *cm_node; 847 struct nes_cm_node *cm_node;
826 848
@@ -835,8 +857,7 @@ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core,
835 857
836 /* walk list and find cm_node associated with this session ID */ 858 /* walk list and find cm_node associated with this session ID */
837 spin_lock_irqsave(&cm_core->ht_lock, flags); 859 spin_lock_irqsave(&cm_core->ht_lock, flags);
838 list_for_each(list_pos, hte) { 860 list_for_each_entry(cm_node, hte, list) {
839 cm_node = container_of(list_pos, struct nes_cm_node, list);
840 /* compare quad, return node handle if a match */ 861 /* compare quad, return node handle if a match */
841 nes_debug(NES_DBG_CM, "finding node %x:%x =? %x:%x ^ %x:%x =? %x:%x\n", 862 nes_debug(NES_DBG_CM, "finding node %x:%x =? %x:%x ^ %x:%x =? %x:%x\n",
842 cm_node->loc_addr, cm_node->loc_port, 863 cm_node->loc_addr, cm_node->loc_port,
@@ -864,13 +885,11 @@ static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core,
864 nes_addr_t dst_addr, u16 dst_port, enum nes_cm_listener_state listener_state) 885 nes_addr_t dst_addr, u16 dst_port, enum nes_cm_listener_state listener_state)
865{ 886{
866 unsigned long flags; 887 unsigned long flags;
867 struct list_head *listen_list;
868 struct nes_cm_listener *listen_node; 888 struct nes_cm_listener *listen_node;
869 889
870 /* walk list and find cm_node associated with this session ID */ 890 /* walk list and find cm_node associated with this session ID */
871 spin_lock_irqsave(&cm_core->listen_list_lock, flags); 891 spin_lock_irqsave(&cm_core->listen_list_lock, flags);
872 list_for_each(listen_list, &cm_core->listen_list.list) { 892 list_for_each_entry(listen_node, &cm_core->listen_list.list, list) {
873 listen_node = container_of(listen_list, struct nes_cm_listener, list);
874 /* compare node pair, return node handle if a match */ 893 /* compare node pair, return node handle if a match */
875 if (((listen_node->loc_addr == dst_addr) || 894 if (((listen_node->loc_addr == dst_addr) ||
876 listen_node->loc_addr == 0x00000000) && 895 listen_node->loc_addr == 0x00000000) &&
@@ -1014,7 +1033,7 @@ static void nes_addr_send_arp(u32 dst_ip)
1014 fl.nl_u.ip4_u.daddr = htonl(dst_ip); 1033 fl.nl_u.ip4_u.daddr = htonl(dst_ip);
1015 if (ip_route_output_key(&init_net, &rt, &fl)) { 1034 if (ip_route_output_key(&init_net, &rt, &fl)) {
1016 printk("%s: ip_route_output_key failed for 0x%08X\n", 1035 printk("%s: ip_route_output_key failed for 0x%08X\n",
1017 __FUNCTION__, dst_ip); 1036 __func__, dst_ip);
1018 return; 1037 return;
1019 } 1038 }
1020 1039
@@ -1077,8 +1096,6 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
1077 cm_node->tcp_cntxt.rcv_nxt = 0; 1096 cm_node->tcp_cntxt.rcv_nxt = 0;
1078 /* get a unique session ID , add thread_id to an upcounter to handle race */ 1097 /* get a unique session ID , add thread_id to an upcounter to handle race */
1079 atomic_inc(&cm_core->node_cnt); 1098 atomic_inc(&cm_core->node_cnt);
1080 atomic_inc(&cm_core->session_id);
1081 cm_node->session_id = (u32)(atomic_read(&cm_core->session_id) + current->tgid);
1082 cm_node->conn_type = cm_info->conn_type; 1099 cm_node->conn_type = cm_info->conn_type;
1083 cm_node->apbvt_set = 0; 1100 cm_node->apbvt_set = 0;
1084 cm_node->accept_pend = 0; 1101 cm_node->accept_pend = 0;
@@ -1239,7 +1256,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc, u32 opti
1239 continue; 1256 continue;
1240 case OPTION_NUMBER_MSS: 1257 case OPTION_NUMBER_MSS:
1241 nes_debug(NES_DBG_CM, "%s: MSS Length: %d Offset: %d Size: %d\n", 1258 nes_debug(NES_DBG_CM, "%s: MSS Length: %d Offset: %d Size: %d\n",
1242 __FUNCTION__, 1259 __func__,
1243 all_options->as_mss.length, offset, optionsize); 1260 all_options->as_mss.length, offset, optionsize);
1244 got_mss_option = 1; 1261 got_mss_option = 1;
1245 if (all_options->as_mss.length != 4) { 1262 if (all_options->as_mss.length != 4) {
@@ -1272,8 +1289,8 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc, u32 opti
1272/** 1289/**
1273 * process_packet 1290 * process_packet
1274 */ 1291 */
1275int process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb, 1292static int process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb,
1276 struct nes_cm_core *cm_core) 1293 struct nes_cm_core *cm_core)
1277{ 1294{
1278 int optionsize; 1295 int optionsize;
1279 int datasize; 1296 int datasize;
@@ -1360,7 +1377,7 @@ int process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb,
1360 if (optionsize) { 1377 if (optionsize) {
1361 u8 *optionsloc = (u8 *)&tcph[1]; 1378 u8 *optionsloc = (u8 *)&tcph[1];
1362 if (process_options(cm_node, optionsloc, optionsize, (u32)tcph->syn)) { 1379 if (process_options(cm_node, optionsloc, optionsize, (u32)tcph->syn)) {
1363 nes_debug(NES_DBG_CM, "%s: Node %p, Sending RESET\n", __FUNCTION__, cm_node); 1380 nes_debug(NES_DBG_CM, "%s: Node %p, Sending RESET\n", __func__, cm_node);
1364 send_reset(cm_node); 1381 send_reset(cm_node);
1365 if (cm_node->state != NES_CM_STATE_SYN_SENT) 1382 if (cm_node->state != NES_CM_STATE_SYN_SENT)
1366 rem_ref_cm_node(cm_core, cm_node); 1383 rem_ref_cm_node(cm_core, cm_node);
@@ -1605,9 +1622,7 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
1605 listener->cm_core = cm_core; 1622 listener->cm_core = cm_core;
1606 listener->nesvnic = nesvnic; 1623 listener->nesvnic = nesvnic;
1607 atomic_inc(&cm_core->node_cnt); 1624 atomic_inc(&cm_core->node_cnt);
1608 atomic_inc(&cm_core->session_id);
1609 1625
1610 listener->session_id = (u32)(atomic_read(&cm_core->session_id) + current->tgid);
1611 listener->conn_type = cm_info->conn_type; 1626 listener->conn_type = cm_info->conn_type;
1612 listener->backlog = cm_info->backlog; 1627 listener->backlog = cm_info->backlog;
1613 listener->listener_state = NES_CM_LISTENER_ACTIVE_STATE; 1628 listener->listener_state = NES_CM_LISTENER_ACTIVE_STATE;
@@ -1631,9 +1646,10 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
1631/** 1646/**
1632 * mini_cm_connect - make a connection node with params 1647 * mini_cm_connect - make a connection node with params
1633 */ 1648 */
1634struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core, 1649static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
1635 struct nes_vnic *nesvnic, struct ietf_mpa_frame *mpa_frame, 1650 struct nes_vnic *nesvnic,
1636 struct nes_cm_info *cm_info) 1651 struct ietf_mpa_frame *mpa_frame,
1652 struct nes_cm_info *cm_info)
1637{ 1653{
1638 int ret = 0; 1654 int ret = 0;
1639 struct nes_cm_node *cm_node; 1655 struct nes_cm_node *cm_node;
@@ -1717,8 +1733,8 @@ struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
1717 * mini_cm_accept - accept a connection 1733 * mini_cm_accept - accept a connection
1718 * This function is never called 1734 * This function is never called
1719 */ 1735 */
1720int mini_cm_accept(struct nes_cm_core *cm_core, struct ietf_mpa_frame *mpa_frame, 1736static int mini_cm_accept(struct nes_cm_core *cm_core, struct ietf_mpa_frame *mpa_frame,
1721 struct nes_cm_node *cm_node) 1737 struct nes_cm_node *cm_node)
1722{ 1738{
1723 return 0; 1739 return 0;
1724} 1740}
@@ -1727,9 +1743,9 @@ int mini_cm_accept(struct nes_cm_core *cm_core, struct ietf_mpa_frame *mpa_frame
1727/** 1743/**
1728 * mini_cm_reject - reject and teardown a connection 1744 * mini_cm_reject - reject and teardown a connection
1729 */ 1745 */
1730int mini_cm_reject(struct nes_cm_core *cm_core, 1746static int mini_cm_reject(struct nes_cm_core *cm_core,
1731 struct ietf_mpa_frame *mpa_frame, 1747 struct ietf_mpa_frame *mpa_frame,
1732 struct nes_cm_node *cm_node) 1748 struct nes_cm_node *cm_node)
1733{ 1749{
1734 int ret = 0; 1750 int ret = 0;
1735 struct sk_buff *skb; 1751 struct sk_buff *skb;
@@ -1761,7 +1777,7 @@ int mini_cm_reject(struct nes_cm_core *cm_core,
1761/** 1777/**
1762 * mini_cm_close 1778 * mini_cm_close
1763 */ 1779 */
1764int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node) 1780static int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node)
1765{ 1781{
1766 int ret = 0; 1782 int ret = 0;
1767 1783
@@ -1808,8 +1824,8 @@ int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node)
1808 * recv_pkt - recv an ETHERNET packet, and process it through CM 1824 * recv_pkt - recv an ETHERNET packet, and process it through CM
1809 * node state machine 1825 * node state machine
1810 */ 1826 */
1811int mini_cm_recv_pkt(struct nes_cm_core *cm_core, struct nes_vnic *nesvnic, 1827static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, struct nes_vnic *nesvnic,
1812 struct sk_buff *skb) 1828 struct sk_buff *skb)
1813{ 1829{
1814 struct nes_cm_node *cm_node = NULL; 1830 struct nes_cm_node *cm_node = NULL;
1815 struct nes_cm_listener *listener = NULL; 1831 struct nes_cm_listener *listener = NULL;
@@ -1898,7 +1914,7 @@ int mini_cm_recv_pkt(struct nes_cm_core *cm_core, struct nes_vnic *nesvnic,
1898/** 1914/**
1899 * nes_cm_alloc_core - allocate a top level instance of a cm core 1915 * nes_cm_alloc_core - allocate a top level instance of a cm core
1900 */ 1916 */
1901struct nes_cm_core *nes_cm_alloc_core(void) 1917static struct nes_cm_core *nes_cm_alloc_core(void)
1902{ 1918{
1903 int i; 1919 int i;
1904 1920
@@ -1919,7 +1935,6 @@ struct nes_cm_core *nes_cm_alloc_core(void)
1919 cm_core->state = NES_CM_STATE_INITED; 1935 cm_core->state = NES_CM_STATE_INITED;
1920 cm_core->free_tx_pkt_max = NES_CM_DEFAULT_FREE_PKTS; 1936 cm_core->free_tx_pkt_max = NES_CM_DEFAULT_FREE_PKTS;
1921 1937
1922 atomic_set(&cm_core->session_id, 0);
1923 atomic_set(&cm_core->events_posted, 0); 1938 atomic_set(&cm_core->events_posted, 0);
1924 1939
1925 /* init the packet lists */ 1940 /* init the packet lists */
@@ -1958,7 +1973,7 @@ struct nes_cm_core *nes_cm_alloc_core(void)
1958/** 1973/**
1959 * mini_cm_dealloc_core - deallocate a top level instance of a cm core 1974 * mini_cm_dealloc_core - deallocate a top level instance of a cm core
1960 */ 1975 */
1961int mini_cm_dealloc_core(struct nes_cm_core *cm_core) 1976static int mini_cm_dealloc_core(struct nes_cm_core *cm_core)
1962{ 1977{
1963 nes_debug(NES_DBG_CM, "De-Alloc CM Core (%p)\n", cm_core); 1978 nes_debug(NES_DBG_CM, "De-Alloc CM Core (%p)\n", cm_core);
1964 1979
@@ -1983,7 +1998,7 @@ int mini_cm_dealloc_core(struct nes_cm_core *cm_core)
1983/** 1998/**
1984 * mini_cm_get 1999 * mini_cm_get
1985 */ 2000 */
1986int mini_cm_get(struct nes_cm_core *cm_core) 2001static int mini_cm_get(struct nes_cm_core *cm_core)
1987{ 2002{
1988 return cm_core->state; 2003 return cm_core->state;
1989} 2004}
@@ -1992,7 +2007,7 @@ int mini_cm_get(struct nes_cm_core *cm_core)
1992/** 2007/**
1993 * mini_cm_set 2008 * mini_cm_set
1994 */ 2009 */
1995int mini_cm_set(struct nes_cm_core *cm_core, u32 type, u32 value) 2010static int mini_cm_set(struct nes_cm_core *cm_core, u32 type, u32 value)
1996{ 2011{
1997 int ret = 0; 2012 int ret = 0;
1998 2013
@@ -2109,7 +2124,7 @@ int nes_cm_disconn(struct nes_qp *nesqp)
2109/** 2124/**
2110 * nes_disconnect_worker 2125 * nes_disconnect_worker
2111 */ 2126 */
2112void nes_disconnect_worker(struct work_struct *work) 2127static void nes_disconnect_worker(struct work_struct *work)
2113{ 2128{
2114 struct nes_qp *nesqp = container_of(work, struct nes_qp, disconn_work); 2129 struct nes_qp *nesqp = container_of(work, struct nes_qp, disconn_work);
2115 2130
@@ -2122,7 +2137,7 @@ void nes_disconnect_worker(struct work_struct *work)
2122/** 2137/**
2123 * nes_cm_disconn_true 2138 * nes_cm_disconn_true
2124 */ 2139 */
2125int nes_cm_disconn_true(struct nes_qp *nesqp) 2140static int nes_cm_disconn_true(struct nes_qp *nesqp)
2126{ 2141{
2127 unsigned long flags; 2142 unsigned long flags;
2128 int ret = 0; 2143 int ret = 0;
@@ -2265,7 +2280,7 @@ int nes_cm_disconn_true(struct nes_qp *nesqp)
2265/** 2280/**
2266 * nes_disconnect 2281 * nes_disconnect
2267 */ 2282 */
2268int nes_disconnect(struct nes_qp *nesqp, int abrupt) 2283static int nes_disconnect(struct nes_qp *nesqp, int abrupt)
2269{ 2284{
2270 int ret = 0; 2285 int ret = 0;
2271 struct nes_vnic *nesvnic; 2286 struct nes_vnic *nesvnic;
@@ -2482,7 +2497,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2482 } 2497 }
2483 if (ret) 2498 if (ret)
2484 printk("%s[%u] OFA CM event_handler returned, ret=%d\n", 2499 printk("%s[%u] OFA CM event_handler returned, ret=%d\n",
2485 __FUNCTION__, __LINE__, ret); 2500 __func__, __LINE__, ret);
2486 2501
2487 return 0; 2502 return 0;
2488} 2503}
@@ -2650,7 +2665,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
2650 cm_node = g_cm_core->api->listen(g_cm_core, nesvnic, &cm_info); 2665 cm_node = g_cm_core->api->listen(g_cm_core, nesvnic, &cm_info);
2651 if (!cm_node) { 2666 if (!cm_node) {
2652 printk("%s[%u] Error returned from listen API call\n", 2667 printk("%s[%u] Error returned from listen API call\n",
2653 __FUNCTION__, __LINE__); 2668 __func__, __LINE__);
2654 return -ENOMEM; 2669 return -ENOMEM;
2655 } 2670 }
2656 2671
@@ -2740,7 +2755,7 @@ int nes_cm_stop(void)
2740 * cm_event_connected 2755 * cm_event_connected
2741 * handle a connected event, setup QPs and HW 2756 * handle a connected event, setup QPs and HW
2742 */ 2757 */
2743void cm_event_connected(struct nes_cm_event *event) 2758static void cm_event_connected(struct nes_cm_event *event)
2744{ 2759{
2745 u64 u64temp; 2760 u64 u64temp;
2746 struct nes_qp *nesqp; 2761 struct nes_qp *nesqp;
@@ -2864,7 +2879,7 @@ void cm_event_connected(struct nes_cm_event *event)
2864 2879
2865 if (ret) 2880 if (ret)
2866 printk("%s[%u] OFA CM event_handler returned, ret=%d\n", 2881 printk("%s[%u] OFA CM event_handler returned, ret=%d\n",
2867 __FUNCTION__, __LINE__, ret); 2882 __func__, __LINE__, ret);
2868 nes_debug(NES_DBG_CM, "Exiting connect thread for QP%u. jiffies = %lu\n", 2883 nes_debug(NES_DBG_CM, "Exiting connect thread for QP%u. jiffies = %lu\n",
2869 nesqp->hwqp.qp_id, jiffies ); 2884 nesqp->hwqp.qp_id, jiffies );
2870 2885
@@ -2877,7 +2892,7 @@ void cm_event_connected(struct nes_cm_event *event)
2877/** 2892/**
2878 * cm_event_connect_error 2893 * cm_event_connect_error
2879 */ 2894 */
2880void cm_event_connect_error(struct nes_cm_event *event) 2895static void cm_event_connect_error(struct nes_cm_event *event)
2881{ 2896{
2882 struct nes_qp *nesqp; 2897 struct nes_qp *nesqp;
2883 struct iw_cm_id *cm_id; 2898 struct iw_cm_id *cm_id;
@@ -2919,7 +2934,7 @@ void cm_event_connect_error(struct nes_cm_event *event)
2919 nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret); 2934 nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
2920 if (ret) 2935 if (ret)
2921 printk("%s[%u] OFA CM event_handler returned, ret=%d\n", 2936 printk("%s[%u] OFA CM event_handler returned, ret=%d\n",
2922 __FUNCTION__, __LINE__, ret); 2937 __func__, __LINE__, ret);
2923 nes_rem_ref(&nesqp->ibqp); 2938 nes_rem_ref(&nesqp->ibqp);
2924 cm_id->rem_ref(cm_id); 2939 cm_id->rem_ref(cm_id);
2925 2940
@@ -2930,7 +2945,7 @@ void cm_event_connect_error(struct nes_cm_event *event)
2930/** 2945/**
2931 * cm_event_reset 2946 * cm_event_reset
2932 */ 2947 */
2933void cm_event_reset(struct nes_cm_event *event) 2948static void cm_event_reset(struct nes_cm_event *event)
2934{ 2949{
2935 struct nes_qp *nesqp; 2950 struct nes_qp *nesqp;
2936 struct iw_cm_id *cm_id; 2951 struct iw_cm_id *cm_id;
@@ -2973,7 +2988,7 @@ void cm_event_reset(struct nes_cm_event *event)
2973/** 2988/**
2974 * cm_event_mpa_req 2989 * cm_event_mpa_req
2975 */ 2990 */
2976void cm_event_mpa_req(struct nes_cm_event *event) 2991static void cm_event_mpa_req(struct nes_cm_event *event)
2977{ 2992{
2978 struct iw_cm_id *cm_id; 2993 struct iw_cm_id *cm_id;
2979 struct iw_cm_event cm_event; 2994 struct iw_cm_event cm_event;
@@ -3007,7 +3022,7 @@ void cm_event_mpa_req(struct nes_cm_event *event)
3007 ret = cm_id->event_handler(cm_id, &cm_event); 3022 ret = cm_id->event_handler(cm_id, &cm_event);
3008 if (ret) 3023 if (ret)
3009 printk("%s[%u] OFA CM event_handler returned, ret=%d\n", 3024 printk("%s[%u] OFA CM event_handler returned, ret=%d\n",
3010 __FUNCTION__, __LINE__, ret); 3025 __func__, __LINE__, ret);
3011 3026
3012 return; 3027 return;
3013} 3028}
@@ -3019,7 +3034,7 @@ static void nes_cm_event_handler(struct work_struct *);
3019 * nes_cm_post_event 3034 * nes_cm_post_event
3020 * post an event to the cm event handler 3035 * post an event to the cm event handler
3021 */ 3036 */
3022int nes_cm_post_event(struct nes_cm_event *event) 3037static int nes_cm_post_event(struct nes_cm_event *event)
3023{ 3038{
3024 atomic_inc(&event->cm_node->cm_core->events_posted); 3039 atomic_inc(&event->cm_node->cm_core->events_posted);
3025 add_ref_cm_node(event->cm_node); 3040 add_ref_cm_node(event->cm_node);
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h
index a59f0a7fb278..7717cb2ab500 100644
--- a/drivers/infiniband/hw/nes/nes_cm.h
+++ b/drivers/infiniband/hw/nes/nes_cm.h
@@ -225,7 +225,6 @@ enum nes_cm_listener_state {
225 225
226struct nes_cm_listener { 226struct nes_cm_listener {
227 struct list_head list; 227 struct list_head list;
228 u64 session_id;
229 struct nes_cm_core *cm_core; 228 struct nes_cm_core *cm_core;
230 u8 loc_mac[ETH_ALEN]; 229 u8 loc_mac[ETH_ALEN];
231 nes_addr_t loc_addr; 230 nes_addr_t loc_addr;
@@ -242,7 +241,6 @@ struct nes_cm_listener {
242 241
243/* per connection node and node state information */ 242/* per connection node and node state information */
244struct nes_cm_node { 243struct nes_cm_node {
245 u64 session_id;
246 u32 hashkey; 244 u32 hashkey;
247 245
248 nes_addr_t loc_addr, rem_addr; 246 nes_addr_t loc_addr, rem_addr;
@@ -327,7 +325,6 @@ struct nes_cm_event {
327 325
328struct nes_cm_core { 326struct nes_cm_core {
329 enum nes_cm_node_state state; 327 enum nes_cm_node_state state;
330 atomic_t session_id;
331 328
332 atomic_t listen_node_cnt; 329 atomic_t listen_node_cnt;
333 struct nes_cm_node listen_list; 330 struct nes_cm_node listen_list;
@@ -383,35 +380,10 @@ struct nes_cm_ops {
383 int (*set)(struct nes_cm_core *, u32, u32); 380 int (*set)(struct nes_cm_core *, u32, u32);
384}; 381};
385 382
386
387int send_mpa_request(struct nes_cm_node *);
388struct sk_buff *form_cm_frame(struct sk_buff *, struct nes_cm_node *,
389 void *, u32, void *, u32, u8);
390int schedule_nes_timer(struct nes_cm_node *, struct sk_buff *, 383int schedule_nes_timer(struct nes_cm_node *, struct sk_buff *,
391 enum nes_timer_type, int, int); 384 enum nes_timer_type, int, int);
392void nes_cm_timer_tick(unsigned long);
393int send_syn(struct nes_cm_node *, u32);
394int send_reset(struct nes_cm_node *);
395int send_ack(struct nes_cm_node *);
396int send_fin(struct nes_cm_node *, struct sk_buff *);
397struct sk_buff *get_free_pkt(struct nes_cm_node *);
398int process_packet(struct nes_cm_node *, struct sk_buff *, struct nes_cm_core *);
399
400struct nes_cm_node * mini_cm_connect(struct nes_cm_core *,
401 struct nes_vnic *, struct ietf_mpa_frame *, struct nes_cm_info *);
402int mini_cm_accept(struct nes_cm_core *, struct ietf_mpa_frame *, struct nes_cm_node *);
403int mini_cm_reject(struct nes_cm_core *, struct ietf_mpa_frame *, struct nes_cm_node *);
404int mini_cm_close(struct nes_cm_core *, struct nes_cm_node *);
405int mini_cm_recv_pkt(struct nes_cm_core *, struct nes_vnic *, struct sk_buff *);
406struct nes_cm_core *mini_cm_alloc_core(struct nes_cm_info *);
407int mini_cm_dealloc_core(struct nes_cm_core *);
408int mini_cm_get(struct nes_cm_core *);
409int mini_cm_set(struct nes_cm_core *, u32, u32);
410 385
411int nes_cm_disconn(struct nes_qp *); 386int nes_cm_disconn(struct nes_qp *);
412void nes_disconnect_worker(struct work_struct *);
413int nes_cm_disconn_true(struct nes_qp *);
414int nes_disconnect(struct nes_qp *, int);
415 387
416int nes_accept(struct iw_cm_id *, struct iw_cm_conn_param *); 388int nes_accept(struct iw_cm_id *, struct iw_cm_conn_param *);
417int nes_reject(struct iw_cm_id *, const void *, u8); 389int nes_reject(struct iw_cm_id *, const void *, u8);
@@ -423,11 +395,4 @@ int nes_cm_recv(struct sk_buff *, struct net_device *);
423int nes_cm_start(void); 395int nes_cm_start(void);
424int nes_cm_stop(void); 396int nes_cm_stop(void);
425 397
426/* CM event handler functions */
427void cm_event_connected(struct nes_cm_event *);
428void cm_event_connect_error(struct nes_cm_event *);
429void cm_event_reset(struct nes_cm_event *);
430void cm_event_mpa_req(struct nes_cm_event *);
431int nes_cm_post_event(struct nes_cm_event *);
432
433#endif /* NES_CM_H */ 398#endif /* NES_CM_H */
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 49e53e4c1ebe..aa53aab91bf8 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -41,7 +41,7 @@
41 41
42#include "nes.h" 42#include "nes.h"
43 43
44u32 crit_err_count = 0; 44static u32 crit_err_count;
45u32 int_mod_timer_init; 45u32 int_mod_timer_init;
46u32 int_mod_cq_depth_256; 46u32 int_mod_cq_depth_256;
47u32 int_mod_cq_depth_128; 47u32 int_mod_cq_depth_128;
@@ -53,6 +53,17 @@ u32 int_mod_cq_depth_1;
53 53
54#include "nes_cm.h" 54#include "nes_cm.h"
55 55
56static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq);
57static void nes_init_csr_ne020(struct nes_device *nesdev, u8 hw_rev, u8 port_count);
58static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
59 u8 OneG_Mode);
60static void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq);
61static void nes_process_aeq(struct nes_device *nesdev, struct nes_hw_aeq *aeq);
62static void nes_process_ceq(struct nes_device *nesdev, struct nes_hw_ceq *ceq);
63static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
64 struct nes_hw_aeqe *aeqe);
65static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number);
66static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode);
56 67
57#ifdef CONFIG_INFINIBAND_NES_DEBUG 68#ifdef CONFIG_INFINIBAND_NES_DEBUG
58static unsigned char *nes_iwarp_state_str[] = { 69static unsigned char *nes_iwarp_state_str[] = {
@@ -370,7 +381,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
370 nesadapter->et_use_adaptive_rx_coalesce = 1; 381 nesadapter->et_use_adaptive_rx_coalesce = 1;
371 nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT_DYNAMIC; 382 nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT_DYNAMIC;
372 nesadapter->et_rx_coalesce_usecs_irq = 0; 383 nesadapter->et_rx_coalesce_usecs_irq = 0;
373 printk(PFX "%s: Using Adaptive Interrupt Moderation\n", __FUNCTION__); 384 printk(PFX "%s: Using Adaptive Interrupt Moderation\n", __func__);
374 } 385 }
375 /* Setup and enable the periodic timer */ 386 /* Setup and enable the periodic timer */
376 if (nesadapter->et_rx_coalesce_usecs_irq) 387 if (nesadapter->et_rx_coalesce_usecs_irq)
@@ -382,7 +393,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
382 nesadapter->base_pd = 1; 393 nesadapter->base_pd = 1;
383 394
384 nesadapter->device_cap_flags = 395 nesadapter->device_cap_flags =
385 IB_DEVICE_ZERO_STAG | IB_DEVICE_SEND_W_INV | IB_DEVICE_MEM_WINDOW; 396 IB_DEVICE_ZERO_STAG | IB_DEVICE_MEM_WINDOW;
386 397
387 nesadapter->allocated_qps = (unsigned long *)&(((unsigned char *)nesadapter) 398 nesadapter->allocated_qps = (unsigned long *)&(((unsigned char *)nesadapter)
388 [(sizeof(struct nes_adapter)+(sizeof(unsigned long)-1))&(~(sizeof(unsigned long)-1))]); 399 [(sizeof(struct nes_adapter)+(sizeof(unsigned long)-1))&(~(sizeof(unsigned long)-1))]);
@@ -572,7 +583,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
572 if (vendor_id == 0xffff) 583 if (vendor_id == 0xffff)
573 break; 584 break;
574 } 585 }
575 nes_debug(NES_DBG_INIT, "%s %d functions found for %s.\n", __FUNCTION__, 586 nes_debug(NES_DBG_INIT, "%s %d functions found for %s.\n", __func__,
576 func_index, pci_name(nesdev->pcidev)); 587 func_index, pci_name(nesdev->pcidev));
577 nesadapter->adapter_fcn_count = func_index; 588 nesadapter->adapter_fcn_count = func_index;
578 589
@@ -583,7 +594,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
583/** 594/**
584 * nes_reset_adapter_ne020 595 * nes_reset_adapter_ne020
585 */ 596 */
586unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode) 597static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode)
587{ 598{
588 u32 port_count; 599 u32 port_count;
589 u32 u32temp; 600 u32 u32temp;
@@ -691,7 +702,8 @@ unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode)
691/** 702/**
692 * nes_init_serdes 703 * nes_init_serdes
693 */ 704 */
694int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count, u8 OneG_Mode) 705static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
706 u8 OneG_Mode)
695{ 707{
696 int i; 708 int i;
697 u32 u32temp; 709 u32 u32temp;
@@ -739,7 +751,7 @@ int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count, u8 One
739 & 0x0000000f)) != 0x0000000f) && (i++ < 5000)) 751 & 0x0000000f)) != 0x0000000f) && (i++ < 5000))
740 mdelay(1); 752 mdelay(1);
741 if (i >= 5000) { 753 if (i >= 5000) {
742 printk("%s: Init: serdes 1 not ready, status=%x\n", __FUNCTION__, u32temp); 754 printk("%s: Init: serdes 1 not ready, status=%x\n", __func__, u32temp);
743 /* return 1; */ 755 /* return 1; */
744 } 756 }
745 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP1, 0x000bdef7); 757 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP1, 0x000bdef7);
@@ -760,7 +772,7 @@ int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count, u8 One
760 * nes_init_csr_ne020 772 * nes_init_csr_ne020
761 * Initialize registers for ne020 hardware 773 * Initialize registers for ne020 hardware
762 */ 774 */
763void nes_init_csr_ne020(struct nes_device *nesdev, u8 hw_rev, u8 port_count) 775static void nes_init_csr_ne020(struct nes_device *nesdev, u8 hw_rev, u8 port_count)
764{ 776{
765 u32 u32temp; 777 u32 u32temp;
766 778
@@ -1204,7 +1216,7 @@ int nes_init_phy(struct nes_device *nesdev)
1204 if (nesadapter->OneG_Mode) { 1216 if (nesadapter->OneG_Mode) {
1205 nes_debug(NES_DBG_PHY, "1G PHY, mac_index = %d.\n", mac_index); 1217 nes_debug(NES_DBG_PHY, "1G PHY, mac_index = %d.\n", mac_index);
1206 if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_1G) { 1218 if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_1G) {
1207 printk(PFX "%s: Programming mdc config for 1G\n", __FUNCTION__); 1219 printk(PFX "%s: Programming mdc config for 1G\n", __func__);
1208 tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG); 1220 tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
1209 tx_config |= 0x04; 1221 tx_config |= 0x04;
1210 nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config); 1222 nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
@@ -1358,7 +1370,7 @@ static void nes_replenish_nic_rq(struct nes_vnic *nesvnic)
1358static void nes_rq_wqes_timeout(unsigned long parm) 1370static void nes_rq_wqes_timeout(unsigned long parm)
1359{ 1371{
1360 struct nes_vnic *nesvnic = (struct nes_vnic *)parm; 1372 struct nes_vnic *nesvnic = (struct nes_vnic *)parm;
1361 printk("%s: Timer fired.\n", __FUNCTION__); 1373 printk("%s: Timer fired.\n", __func__);
1362 atomic_set(&nesvnic->rx_skb_timer_running, 0); 1374 atomic_set(&nesvnic->rx_skb_timer_running, 0);
1363 if (atomic_read(&nesvnic->rx_skbs_needed)) 1375 if (atomic_read(&nesvnic->rx_skbs_needed))
1364 nes_replenish_nic_rq(nesvnic); 1376 nes_replenish_nic_rq(nesvnic);
@@ -1909,7 +1921,7 @@ void nes_dpc(unsigned long param)
1909/** 1921/**
1910 * nes_process_ceq 1922 * nes_process_ceq
1911 */ 1923 */
1912void nes_process_ceq(struct nes_device *nesdev, struct nes_hw_ceq *ceq) 1924static void nes_process_ceq(struct nes_device *nesdev, struct nes_hw_ceq *ceq)
1913{ 1925{
1914 u64 u64temp; 1926 u64 u64temp;
1915 struct nes_hw_cq *cq; 1927 struct nes_hw_cq *cq;
@@ -1949,7 +1961,7 @@ void nes_process_ceq(struct nes_device *nesdev, struct nes_hw_ceq *ceq)
1949/** 1961/**
1950 * nes_process_aeq 1962 * nes_process_aeq
1951 */ 1963 */
1952void nes_process_aeq(struct nes_device *nesdev, struct nes_hw_aeq *aeq) 1964static void nes_process_aeq(struct nes_device *nesdev, struct nes_hw_aeq *aeq)
1953{ 1965{
1954// u64 u64temp; 1966// u64 u64temp;
1955 u32 head; 1967 u32 head;
@@ -2060,7 +2072,7 @@ static void nes_reset_link(struct nes_device *nesdev, u32 mac_index)
2060/** 2072/**
2061 * nes_process_mac_intr 2073 * nes_process_mac_intr
2062 */ 2074 */
2063void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number) 2075static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2064{ 2076{
2065 unsigned long flags; 2077 unsigned long flags;
2066 u32 pcs_control_status; 2078 u32 pcs_control_status;
@@ -2163,7 +2175,7 @@ void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2163 temp_phy_data = phy_data; 2175 temp_phy_data = phy_data;
2164 } while (1); 2176 } while (1);
2165 nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n", 2177 nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n",
2166 __FUNCTION__, phy_data, nesadapter->mac_link_down ? "DOWN" : "UP"); 2178 __func__, phy_data, nesadapter->mac_link_down ? "DOWN" : "UP");
2167 2179
2168 } else { 2180 } else {
2169 phy_data = (0x0f0f0000 == (pcs_control_status & 0x0f1f0000)) ? 4 : 0; 2181 phy_data = (0x0f0f0000 == (pcs_control_status & 0x0f1f0000)) ? 4 : 0;
@@ -2205,7 +2217,7 @@ void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2205 2217
2206 2218
2207 2219
2208void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq) 2220static void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
2209{ 2221{
2210 struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq); 2222 struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq);
2211 2223
@@ -2428,7 +2440,7 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
2428/** 2440/**
2429 * nes_cqp_ce_handler 2441 * nes_cqp_ce_handler
2430 */ 2442 */
2431void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq) 2443static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq)
2432{ 2444{
2433 u64 u64temp; 2445 u64 u64temp;
2434 unsigned long flags; 2446 unsigned long flags;
@@ -2567,7 +2579,8 @@ void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq)
2567/** 2579/**
2568 * nes_process_iwarp_aeqe 2580 * nes_process_iwarp_aeqe
2569 */ 2581 */
2570void nes_process_iwarp_aeqe(struct nes_device *nesdev, struct nes_hw_aeqe *aeqe) 2582static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
2583 struct nes_hw_aeqe *aeqe)
2571{ 2584{
2572 u64 context; 2585 u64 context;
2573 u64 aeqe_context = 0; 2586 u64 aeqe_context = 0;
@@ -2819,7 +2832,7 @@ void nes_process_iwarp_aeqe(struct nes_device *nesdev, struct nes_hw_aeqe *aeqe)
2819 le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])); 2832 le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]));
2820 if (resource_allocated) { 2833 if (resource_allocated) {
2821 printk(KERN_ERR PFX "%s: Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u\n", 2834 printk(KERN_ERR PFX "%s: Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u\n",
2822 __FUNCTION__, le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])); 2835 __func__, le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]));
2823 } 2836 }
2824 break; 2837 break;
2825 case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER: 2838 case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index eee77da61935..34166641f207 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -802,7 +802,7 @@ static int nes_netdev_set_mac_address(struct net_device *netdev, void *p)
802 802
803 memcpy(netdev->dev_addr, mac_addr->sa_data, netdev->addr_len); 803 memcpy(netdev->dev_addr, mac_addr->sa_data, netdev->addr_len);
804 printk(PFX "%s: Address length = %d, Address = %02X%02X%02X%02X%02X%02X..\n", 804 printk(PFX "%s: Address length = %d, Address = %02X%02X%02X%02X%02X%02X..\n",
805 __FUNCTION__, netdev->addr_len, 805 __func__, netdev->addr_len,
806 mac_addr->sa_data[0], mac_addr->sa_data[1], 806 mac_addr->sa_data[0], mac_addr->sa_data[1],
807 mac_addr->sa_data[2], mac_addr->sa_data[3], 807 mac_addr->sa_data[2], mac_addr->sa_data[3],
808 mac_addr->sa_data[4], mac_addr->sa_data[5]); 808 mac_addr->sa_data[4], mac_addr->sa_data[5]);
@@ -832,7 +832,7 @@ static int nes_netdev_set_mac_address(struct net_device *netdev, void *p)
832/** 832/**
833 * nes_netdev_set_multicast_list 833 * nes_netdev_set_multicast_list
834 */ 834 */
835void nes_netdev_set_multicast_list(struct net_device *netdev) 835static void nes_netdev_set_multicast_list(struct net_device *netdev)
836{ 836{
837 struct nes_vnic *nesvnic = netdev_priv(netdev); 837 struct nes_vnic *nesvnic = netdev_priv(netdev);
838 struct nes_device *nesdev = nesvnic->nesdev; 838 struct nes_device *nesdev = nesvnic->nesdev;
@@ -947,28 +947,6 @@ static int nes_netdev_change_mtu(struct net_device *netdev, int new_mtu)
947 return ret; 947 return ret;
948} 948}
949 949
950
951/**
952 * nes_netdev_exit - destroy network device
953 */
954void nes_netdev_exit(struct nes_vnic *nesvnic)
955{
956 struct net_device *netdev = nesvnic->netdev;
957 struct nes_ib_device *nesibdev = nesvnic->nesibdev;
958
959 nes_debug(NES_DBG_SHUTDOWN, "\n");
960
961 // destroy the ibdevice if RDMA enabled
962 if ((nesvnic->rdma_enabled)&&(nesvnic->of_device_registered)) {
963 nes_destroy_ofa_device( nesibdev );
964 nesvnic->of_device_registered = 0;
965 nesvnic->nesibdev = NULL;
966 }
967 unregister_netdev(netdev);
968 nes_debug(NES_DBG_SHUTDOWN, "\n");
969}
970
971
972#define NES_ETHTOOL_STAT_COUNT 55 950#define NES_ETHTOOL_STAT_COUNT 55
973static const char nes_ethtool_stringset[NES_ETHTOOL_STAT_COUNT][ETH_GSTRING_LEN] = { 951static const char nes_ethtool_stringset[NES_ETHTOOL_STAT_COUNT][ETH_GSTRING_LEN] = {
974 "Link Change Interrupts", 952 "Link Change Interrupts",
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
index c4ec6ac63461..f9db07c2717d 100644
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -566,7 +566,7 @@ struct nes_cqp_request *nes_get_cqp_request(struct nes_device *nesdev)
566 cqp_request); 566 cqp_request);
567 } else 567 } else
568 printk(KERN_ERR PFX "%s: Could not allocated a CQP request.\n", 568 printk(KERN_ERR PFX "%s: Could not allocated a CQP request.\n",
569 __FUNCTION__); 569 __func__);
570 570
571 return cqp_request; 571 return cqp_request;
572} 572}
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index a651e9d9f0ef..7c27420c2240 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -49,6 +49,7 @@ atomic_t mod_qp_timouts;
49atomic_t qps_created; 49atomic_t qps_created;
50atomic_t sw_qps_destroyed; 50atomic_t sw_qps_destroyed;
51 51
52static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
52 53
53/** 54/**
54 * nes_alloc_mw 55 * nes_alloc_mw
@@ -1043,10 +1044,10 @@ static int nes_setup_virt_qp(struct nes_qp *nesqp, struct nes_pbl *nespbl,
1043 u8 sq_pbl_entries; 1044 u8 sq_pbl_entries;
1044 1045
1045 pbl_entries = nespbl->pbl_size >> 3; 1046 pbl_entries = nespbl->pbl_size >> 3;
1046 nes_debug(NES_DBG_QP, "Userspace PBL, pbl_size=%u, pbl_entries = %d pbl_vbase=%p, pbl_pbase=%p\n", 1047 nes_debug(NES_DBG_QP, "Userspace PBL, pbl_size=%u, pbl_entries = %d pbl_vbase=%p, pbl_pbase=%lx\n",
1047 nespbl->pbl_size, pbl_entries, 1048 nespbl->pbl_size, pbl_entries,
1048 (void *)nespbl->pbl_vbase, 1049 (void *)nespbl->pbl_vbase,
1049 (void *)nespbl->pbl_pbase); 1050 (unsigned long) nespbl->pbl_pbase);
1050 pbl = (__le64 *) nespbl->pbl_vbase; /* points to first pbl entry */ 1051 pbl = (__le64 *) nespbl->pbl_vbase; /* points to first pbl entry */
1051 /* now lets set the sq_vbase as well as rq_vbase addrs we will assign */ 1052 /* now lets set the sq_vbase as well as rq_vbase addrs we will assign */
1052 /* the first pbl to be fro the rq_vbase... */ 1053 /* the first pbl to be fro the rq_vbase... */
@@ -1074,9 +1075,9 @@ static int nes_setup_virt_qp(struct nes_qp *nesqp, struct nes_pbl *nespbl,
1074 /* nesqp->hwqp.rq_vbase = bus_to_virt(*pbl); */ 1075 /* nesqp->hwqp.rq_vbase = bus_to_virt(*pbl); */
1075 /*nesqp->hwqp.rq_vbase = phys_to_virt(*pbl); */ 1076 /*nesqp->hwqp.rq_vbase = phys_to_virt(*pbl); */
1076 1077
1077 nes_debug(NES_DBG_QP, "QP sq_vbase= %p sq_pbase=%p rq_vbase=%p rq_pbase=%p\n", 1078 nes_debug(NES_DBG_QP, "QP sq_vbase= %p sq_pbase=%lx rq_vbase=%p rq_pbase=%lx\n",
1078 nesqp->hwqp.sq_vbase, (void *)nesqp->hwqp.sq_pbase, 1079 nesqp->hwqp.sq_vbase, (unsigned long) nesqp->hwqp.sq_pbase,
1079 nesqp->hwqp.rq_vbase, (void *)nesqp->hwqp.rq_pbase); 1080 nesqp->hwqp.rq_vbase, (unsigned long) nesqp->hwqp.rq_pbase);
1080 spin_lock_irqsave(&nesadapter->pbl_lock, flags); 1081 spin_lock_irqsave(&nesadapter->pbl_lock, flags);
1081 if (!nesadapter->free_256pbl) { 1082 if (!nesadapter->free_256pbl) {
1082 pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase, 1083 pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase,
@@ -1251,6 +1252,9 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
1251 u8 rq_encoded_size; 1252 u8 rq_encoded_size;
1252 /* int counter; */ 1253 /* int counter; */
1253 1254
1255 if (init_attr->create_flags)
1256 return ERR_PTR(-EINVAL);
1257
1254 atomic_inc(&qps_created); 1258 atomic_inc(&qps_created);
1255 switch (init_attr->qp_type) { 1259 switch (init_attr->qp_type) {
1256 case IB_QPT_RC: 1260 case IB_QPT_RC:
@@ -1908,13 +1912,13 @@ static int nes_destroy_cq(struct ib_cq *ib_cq)
1908 nesadapter->free_256pbl++; 1912 nesadapter->free_256pbl++;
1909 if (nesadapter->free_256pbl > nesadapter->max_256pbl) { 1913 if (nesadapter->free_256pbl > nesadapter->max_256pbl) {
1910 printk(KERN_ERR PFX "%s: free 256B PBLs(%u) has exceeded the max(%u)\n", 1914 printk(KERN_ERR PFX "%s: free 256B PBLs(%u) has exceeded the max(%u)\n",
1911 __FUNCTION__, nesadapter->free_256pbl, nesadapter->max_256pbl); 1915 __func__, nesadapter->free_256pbl, nesadapter->max_256pbl);
1912 } 1916 }
1913 } else if (nescq->virtual_cq == 2) { 1917 } else if (nescq->virtual_cq == 2) {
1914 nesadapter->free_4kpbl++; 1918 nesadapter->free_4kpbl++;
1915 if (nesadapter->free_4kpbl > nesadapter->max_4kpbl) { 1919 if (nesadapter->free_4kpbl > nesadapter->max_4kpbl) {
1916 printk(KERN_ERR PFX "%s: free 4K PBLs(%u) has exceeded the max(%u)\n", 1920 printk(KERN_ERR PFX "%s: free 4K PBLs(%u) has exceeded the max(%u)\n",
1917 __FUNCTION__, nesadapter->free_4kpbl, nesadapter->max_4kpbl); 1921 __func__, nesadapter->free_4kpbl, nesadapter->max_4kpbl);
1918 } 1922 }
1919 opcode |= NES_CQP_CQ_4KB_CHUNK; 1923 opcode |= NES_CQP_CQ_4KB_CHUNK;
1920 } 1924 }
@@ -2653,10 +2657,10 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
2653 2657
2654 nespbl->pbl_vbase = (u64 *)pbl; 2658 nespbl->pbl_vbase = (u64 *)pbl;
2655 nespbl->user_base = start; 2659 nespbl->user_base = start;
2656 nes_debug(NES_DBG_MR, "Allocated PBL memory, %u bytes, pbl_pbase=%p," 2660 nes_debug(NES_DBG_MR, "Allocated PBL memory, %u bytes, pbl_pbase=%lx,"
2657 " pbl_vbase=%p user_base=0x%lx\n", 2661 " pbl_vbase=%p user_base=0x%lx\n",
2658 nespbl->pbl_size, (void *)nespbl->pbl_pbase, 2662 nespbl->pbl_size, (unsigned long) nespbl->pbl_pbase,
2659 (void*)nespbl->pbl_vbase, nespbl->user_base); 2663 (void *) nespbl->pbl_vbase, nespbl->user_base);
2660 2664
2661 list_for_each_entry(chunk, &region->chunk_list, list) { 2665 list_for_each_entry(chunk, &region->chunk_list, list) {
2662 for (nmap_index = 0; nmap_index < chunk->nmap; ++nmap_index) { 2666 for (nmap_index = 0; nmap_index < chunk->nmap; ++nmap_index) {
@@ -3895,14 +3899,11 @@ int nes_register_ofa_device(struct nes_ib_device *nesibdev)
3895/** 3899/**
3896 * nes_unregister_ofa_device 3900 * nes_unregister_ofa_device
3897 */ 3901 */
3898void nes_unregister_ofa_device(struct nes_ib_device *nesibdev) 3902static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev)
3899{ 3903{
3900 struct nes_vnic *nesvnic = nesibdev->nesvnic; 3904 struct nes_vnic *nesvnic = nesibdev->nesvnic;
3901 int i; 3905 int i;
3902 3906
3903 if (nesibdev == NULL)
3904 return;
3905
3906 for (i = 0; i < ARRAY_SIZE(nes_class_attributes); ++i) { 3907 for (i = 0; i < ARRAY_SIZE(nes_class_attributes); ++i) {
3907 class_device_remove_file(&nesibdev->ibdev.class_dev, nes_class_attributes[i]); 3908 class_device_remove_file(&nesibdev->ibdev.class_dev, nes_class_attributes[i]);
3908 } 3909 }
diff --git a/drivers/infiniband/ulp/ipoib/Makefile b/drivers/infiniband/ulp/ipoib/Makefile
index 98ee38e8c2c4..3090100f0de7 100644
--- a/drivers/infiniband/ulp/ipoib/Makefile
+++ b/drivers/infiniband/ulp/ipoib/Makefile
@@ -4,7 +4,8 @@ ib_ipoib-y := ipoib_main.o \
4 ipoib_ib.o \ 4 ipoib_ib.o \
5 ipoib_multicast.o \ 5 ipoib_multicast.o \
6 ipoib_verbs.o \ 6 ipoib_verbs.o \
7 ipoib_vlan.o 7 ipoib_vlan.o \
8 ipoib_ethtool.o
8ib_ipoib-$(CONFIG_INFINIBAND_IPOIB_CM) += ipoib_cm.o 9ib_ipoib-$(CONFIG_INFINIBAND_IPOIB_CM) += ipoib_cm.o
9ib_ipoib-$(CONFIG_INFINIBAND_IPOIB_DEBUG) += ipoib_fs.o 10ib_ipoib-$(CONFIG_INFINIBAND_IPOIB_DEBUG) += ipoib_fs.o
10 11
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 054fab8e27a0..73b2b176ad0e 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -87,6 +87,7 @@ enum {
87 IPOIB_MCAST_STARTED = 8, 87 IPOIB_MCAST_STARTED = 8,
88 IPOIB_FLAG_ADMIN_CM = 9, 88 IPOIB_FLAG_ADMIN_CM = 9,
89 IPOIB_FLAG_UMCAST = 10, 89 IPOIB_FLAG_UMCAST = 10,
90 IPOIB_FLAG_CSUM = 11,
90 91
91 IPOIB_MAX_BACKOFF_SECONDS = 16, 92 IPOIB_MAX_BACKOFF_SECONDS = 16,
92 93
@@ -241,6 +242,11 @@ struct ipoib_cm_dev_priv {
241 int num_frags; 242 int num_frags;
242}; 243};
243 244
245struct ipoib_ethtool_st {
246 u16 coalesce_usecs;
247 u16 max_coalesced_frames;
248};
249
244/* 250/*
245 * Device private locking: tx_lock protects members used in TX fast 251 * Device private locking: tx_lock protects members used in TX fast
246 * path (and we use LLTX so upper layers don't do extra locking). 252 * path (and we use LLTX so upper layers don't do extra locking).
@@ -318,6 +324,8 @@ struct ipoib_dev_priv {
318 struct dentry *mcg_dentry; 324 struct dentry *mcg_dentry;
319 struct dentry *path_dentry; 325 struct dentry *path_dentry;
320#endif 326#endif
327 int hca_caps;
328 struct ipoib_ethtool_st ethtool;
321}; 329};
322 330
323struct ipoib_ah { 331struct ipoib_ah {
@@ -458,6 +466,8 @@ void ipoib_pkey_poll(struct work_struct *work);
458int ipoib_pkey_dev_delay_open(struct net_device *dev); 466int ipoib_pkey_dev_delay_open(struct net_device *dev);
459void ipoib_drain_cq(struct net_device *dev); 467void ipoib_drain_cq(struct net_device *dev);
460 468
469void ipoib_set_ethtool_ops(struct net_device *dev);
470
461#ifdef CONFIG_INFINIBAND_IPOIB_CM 471#ifdef CONFIG_INFINIBAND_IPOIB_CM
462 472
463#define IPOIB_FLAGS_RC 0x80 473#define IPOIB_FLAGS_RC 0x80
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 2490b2d79dbb..9db7b0bd9134 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1007,9 +1007,9 @@ static int ipoib_cm_modify_tx_init(struct net_device *dev,
1007 struct ipoib_dev_priv *priv = netdev_priv(dev); 1007 struct ipoib_dev_priv *priv = netdev_priv(dev);
1008 struct ib_qp_attr qp_attr; 1008 struct ib_qp_attr qp_attr;
1009 int qp_attr_mask, ret; 1009 int qp_attr_mask, ret;
1010 ret = ib_find_cached_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index); 1010 ret = ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index);
1011 if (ret) { 1011 if (ret) {
1012 ipoib_warn(priv, "pkey 0x%x not in cache: %d\n", priv->pkey, ret); 1012 ipoib_warn(priv, "pkey 0x%x not found: %d\n", priv->pkey, ret);
1013 return ret; 1013 return ret;
1014 } 1014 }
1015 1015
@@ -1383,6 +1383,10 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
1383 set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); 1383 set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
1384 ipoib_warn(priv, "enabling connected mode " 1384 ipoib_warn(priv, "enabling connected mode "
1385 "will cause multicast packet drops\n"); 1385 "will cause multicast packet drops\n");
1386
1387 dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO);
1388 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
1389
1386 ipoib_flush_paths(dev); 1390 ipoib_flush_paths(dev);
1387 return count; 1391 return count;
1388 } 1392 }
@@ -1391,6 +1395,13 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
1391 clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); 1395 clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
1392 dev->mtu = min(priv->mcast_mtu, dev->mtu); 1396 dev->mtu = min(priv->mcast_mtu, dev->mtu);
1393 ipoib_flush_paths(dev); 1397 ipoib_flush_paths(dev);
1398
1399 if (test_bit(IPOIB_FLAG_CSUM, &priv->flags)) {
1400 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
1401 if (priv->hca_caps & IB_DEVICE_UD_TSO)
1402 dev->features |= NETIF_F_TSO;
1403 }
1404
1394 return count; 1405 return count;
1395 } 1406 }
1396 1407
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
new file mode 100644
index 000000000000..9a47428366c9
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -0,0 +1,99 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/kernel.h>
34#include <linux/ethtool.h>
35#include <linux/netdevice.h>
36
37#include "ipoib.h"
38
39static void ipoib_get_drvinfo(struct net_device *netdev,
40 struct ethtool_drvinfo *drvinfo)
41{
42 strncpy(drvinfo->driver, "ipoib", sizeof(drvinfo->driver) - 1);
43}
44
45static int ipoib_get_coalesce(struct net_device *dev,
46 struct ethtool_coalesce *coal)
47{
48 struct ipoib_dev_priv *priv = netdev_priv(dev);
49
50 coal->rx_coalesce_usecs = priv->ethtool.coalesce_usecs;
51 coal->tx_coalesce_usecs = priv->ethtool.coalesce_usecs;
52 coal->rx_max_coalesced_frames = priv->ethtool.max_coalesced_frames;
53 coal->tx_max_coalesced_frames = priv->ethtool.max_coalesced_frames;
54
55 return 0;
56}
57
58static int ipoib_set_coalesce(struct net_device *dev,
59 struct ethtool_coalesce *coal)
60{
61 struct ipoib_dev_priv *priv = netdev_priv(dev);
62 int ret;
63
64 /*
65 * Since IPoIB uses a single CQ for both rx and tx, we assume
66 * that rx params dictate the configuration. These values are
67 * saved in the private data and returned when ipoib_get_coalesce()
68 * is called.
69 */
70 if (coal->rx_coalesce_usecs > 0xffff ||
71 coal->rx_max_coalesced_frames > 0xffff)
72 return -EINVAL;
73
74 ret = ib_modify_cq(priv->cq, coal->rx_max_coalesced_frames,
75 coal->rx_coalesce_usecs);
76 if (ret && ret != -ENOSYS) {
77 ipoib_warn(priv, "failed modifying CQ (%d)\n", ret);
78 return ret;
79 }
80
81 coal->tx_coalesce_usecs = coal->rx_coalesce_usecs;
82 coal->tx_max_coalesced_frames = coal->rx_max_coalesced_frames;
83 priv->ethtool.coalesce_usecs = coal->rx_coalesce_usecs;
84 priv->ethtool.max_coalesced_frames = coal->rx_max_coalesced_frames;
85
86 return 0;
87}
88
89static const struct ethtool_ops ipoib_ethtool_ops = {
90 .get_drvinfo = ipoib_get_drvinfo,
91 .get_tso = ethtool_op_get_tso,
92 .get_coalesce = ipoib_get_coalesce,
93 .set_coalesce = ipoib_set_coalesce,
94};
95
96void ipoib_set_ethtool_ops(struct net_device *dev)
97{
98 SET_ETHTOOL_OPS(dev, &ipoib_ethtool_ops);
99}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 08c4396cf418..0205eb7c1bd3 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -39,6 +39,8 @@
39#include <linux/dma-mapping.h> 39#include <linux/dma-mapping.h>
40 40
41#include <rdma/ib_cache.h> 41#include <rdma/ib_cache.h>
42#include <linux/ip.h>
43#include <linux/tcp.h>
42 44
43#include "ipoib.h" 45#include "ipoib.h"
44 46
@@ -231,6 +233,10 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
231 skb->dev = dev; 233 skb->dev = dev;
232 /* XXX get correct PACKET_ type here */ 234 /* XXX get correct PACKET_ type here */
233 skb->pkt_type = PACKET_HOST; 235 skb->pkt_type = PACKET_HOST;
236
237 if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->csum_ok))
238 skb->ip_summed = CHECKSUM_UNNECESSARY;
239
234 netif_receive_skb(skb); 240 netif_receive_skb(skb);
235 241
236repost: 242repost:
@@ -245,29 +251,37 @@ static int ipoib_dma_map_tx(struct ib_device *ca,
245 struct sk_buff *skb = tx_req->skb; 251 struct sk_buff *skb = tx_req->skb;
246 u64 *mapping = tx_req->mapping; 252 u64 *mapping = tx_req->mapping;
247 int i; 253 int i;
254 int off;
248 255
249 mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb), 256 if (skb_headlen(skb)) {
250 DMA_TO_DEVICE); 257 mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb),
251 if (unlikely(ib_dma_mapping_error(ca, mapping[0]))) 258 DMA_TO_DEVICE);
252 return -EIO; 259 if (unlikely(ib_dma_mapping_error(ca, mapping[0])))
260 return -EIO;
261
262 off = 1;
263 } else
264 off = 0;
253 265
254 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { 266 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
255 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 267 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
256 mapping[i + 1] = ib_dma_map_page(ca, frag->page, 268 mapping[i + off] = ib_dma_map_page(ca, frag->page,
257 frag->page_offset, frag->size, 269 frag->page_offset, frag->size,
258 DMA_TO_DEVICE); 270 DMA_TO_DEVICE);
259 if (unlikely(ib_dma_mapping_error(ca, mapping[i + 1]))) 271 if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
260 goto partial_error; 272 goto partial_error;
261 } 273 }
262 return 0; 274 return 0;
263 275
264partial_error: 276partial_error:
265 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
266
267 for (; i > 0; --i) { 277 for (; i > 0; --i) {
268 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; 278 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
269 ib_dma_unmap_page(ca, mapping[i], frag->size, DMA_TO_DEVICE); 279 ib_dma_unmap_page(ca, mapping[i - !off], frag->size, DMA_TO_DEVICE);
270 } 280 }
281
282 if (off)
283 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
284
271 return -EIO; 285 return -EIO;
272} 286}
273 287
@@ -277,12 +291,17 @@ static void ipoib_dma_unmap_tx(struct ib_device *ca,
277 struct sk_buff *skb = tx_req->skb; 291 struct sk_buff *skb = tx_req->skb;
278 u64 *mapping = tx_req->mapping; 292 u64 *mapping = tx_req->mapping;
279 int i; 293 int i;
294 int off;
280 295
281 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE); 296 if (skb_headlen(skb)) {
297 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
298 off = 1;
299 } else
300 off = 0;
282 301
283 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { 302 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
284 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 303 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
285 ib_dma_unmap_page(ca, mapping[i + 1], frag->size, 304 ib_dma_unmap_page(ca, mapping[i + off], frag->size,
286 DMA_TO_DEVICE); 305 DMA_TO_DEVICE);
287 } 306 }
288} 307}
@@ -388,24 +407,40 @@ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
388static inline int post_send(struct ipoib_dev_priv *priv, 407static inline int post_send(struct ipoib_dev_priv *priv,
389 unsigned int wr_id, 408 unsigned int wr_id,
390 struct ib_ah *address, u32 qpn, 409 struct ib_ah *address, u32 qpn,
391 u64 *mapping, int headlen, 410 struct ipoib_tx_buf *tx_req,
392 skb_frag_t *frags, 411 void *head, int hlen)
393 int nr_frags)
394{ 412{
395 struct ib_send_wr *bad_wr; 413 struct ib_send_wr *bad_wr;
396 int i; 414 int i, off;
415 struct sk_buff *skb = tx_req->skb;
416 skb_frag_t *frags = skb_shinfo(skb)->frags;
417 int nr_frags = skb_shinfo(skb)->nr_frags;
418 u64 *mapping = tx_req->mapping;
419
420 if (skb_headlen(skb)) {
421 priv->tx_sge[0].addr = mapping[0];
422 priv->tx_sge[0].length = skb_headlen(skb);
423 off = 1;
424 } else
425 off = 0;
397 426
398 priv->tx_sge[0].addr = mapping[0];
399 priv->tx_sge[0].length = headlen;
400 for (i = 0; i < nr_frags; ++i) { 427 for (i = 0; i < nr_frags; ++i) {
401 priv->tx_sge[i + 1].addr = mapping[i + 1]; 428 priv->tx_sge[i + off].addr = mapping[i + off];
402 priv->tx_sge[i + 1].length = frags[i].size; 429 priv->tx_sge[i + off].length = frags[i].size;
403 } 430 }
404 priv->tx_wr.num_sge = nr_frags + 1; 431 priv->tx_wr.num_sge = nr_frags + off;
405 priv->tx_wr.wr_id = wr_id; 432 priv->tx_wr.wr_id = wr_id;
406 priv->tx_wr.wr.ud.remote_qpn = qpn; 433 priv->tx_wr.wr.ud.remote_qpn = qpn;
407 priv->tx_wr.wr.ud.ah = address; 434 priv->tx_wr.wr.ud.ah = address;
408 435
436 if (head) {
437 priv->tx_wr.wr.ud.mss = skb_shinfo(skb)->gso_size;
438 priv->tx_wr.wr.ud.header = head;
439 priv->tx_wr.wr.ud.hlen = hlen;
440 priv->tx_wr.opcode = IB_WR_LSO;
441 } else
442 priv->tx_wr.opcode = IB_WR_SEND;
443
409 return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr); 444 return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr);
410} 445}
411 446
@@ -414,14 +449,30 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
414{ 449{
415 struct ipoib_dev_priv *priv = netdev_priv(dev); 450 struct ipoib_dev_priv *priv = netdev_priv(dev);
416 struct ipoib_tx_buf *tx_req; 451 struct ipoib_tx_buf *tx_req;
417 452 int hlen;
418 if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) { 453 void *phead;
419 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", 454
420 skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN); 455 if (skb_is_gso(skb)) {
421 ++dev->stats.tx_dropped; 456 hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
422 ++dev->stats.tx_errors; 457 phead = skb->data;
423 ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu); 458 if (unlikely(!skb_pull(skb, hlen))) {
424 return; 459 ipoib_warn(priv, "linear data too small\n");
460 ++dev->stats.tx_dropped;
461 ++dev->stats.tx_errors;
462 dev_kfree_skb_any(skb);
463 return;
464 }
465 } else {
466 if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
467 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
468 skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
469 ++dev->stats.tx_dropped;
470 ++dev->stats.tx_errors;
471 ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
472 return;
473 }
474 phead = NULL;
475 hlen = 0;
425 } 476 }
426 477
427 ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n", 478 ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n",
@@ -442,10 +493,13 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
442 return; 493 return;
443 } 494 }
444 495
496 if (skb->ip_summed == CHECKSUM_PARTIAL)
497 priv->tx_wr.send_flags |= IB_SEND_IP_CSUM;
498 else
499 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
500
445 if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), 501 if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
446 address->ah, qpn, 502 address->ah, qpn, tx_req, phead, hlen))) {
447 tx_req->mapping, skb_headlen(skb),
448 skb_shinfo(skb)->frags, skb_shinfo(skb)->nr_frags))) {
449 ipoib_warn(priv, "post_send failed\n"); 503 ipoib_warn(priv, "post_send failed\n");
450 ++dev->stats.tx_errors; 504 ++dev->stats.tx_errors;
451 ipoib_dma_unmap_tx(priv->ca, tx_req); 505 ipoib_dma_unmap_tx(priv->ca, tx_req);
@@ -540,7 +594,7 @@ static void ipoib_pkey_dev_check_presence(struct net_device *dev)
540 struct ipoib_dev_priv *priv = netdev_priv(dev); 594 struct ipoib_dev_priv *priv = netdev_priv(dev);
541 u16 pkey_index = 0; 595 u16 pkey_index = 0;
542 596
543 if (ib_find_cached_pkey(priv->ca, priv->port, priv->pkey, &pkey_index)) 597 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &pkey_index))
544 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); 598 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
545 else 599 else
546 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); 600 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
@@ -781,13 +835,13 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, int pkey_event)
781 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); 835 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
782 ipoib_ib_dev_down(dev, 0); 836 ipoib_ib_dev_down(dev, 0);
783 ipoib_ib_dev_stop(dev, 0); 837 ipoib_ib_dev_stop(dev, 0);
784 ipoib_pkey_dev_delay_open(dev); 838 if (ipoib_pkey_dev_delay_open(dev))
785 return; 839 return;
786 } 840 }
787 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
788 841
789 /* restart QP only if P_Key index is changed */ 842 /* restart QP only if P_Key index is changed */
790 if (new_index == priv->pkey_index) { 843 if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
844 new_index == priv->pkey_index) {
791 ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n"); 845 ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
792 return; 846 return;
793 } 847 }
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 57282048865c..bd07f02cf02b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -359,8 +359,7 @@ void ipoib_flush_paths(struct net_device *dev)
359 spin_lock_irq(&priv->tx_lock); 359 spin_lock_irq(&priv->tx_lock);
360 spin_lock(&priv->lock); 360 spin_lock(&priv->lock);
361 361
362 list_splice(&priv->path_list, &remove_list); 362 list_splice_init(&priv->path_list, &remove_list);
363 INIT_LIST_HEAD(&priv->path_list);
364 363
365 list_for_each_entry(path, &remove_list, list) 364 list_for_each_entry(path, &remove_list, list)
366 rb_erase(&path->rb_node, &priv->path_tree); 365 rb_erase(&path->rb_node, &priv->path_tree);
@@ -952,6 +951,8 @@ static void ipoib_setup(struct net_device *dev)
952 dev->set_multicast_list = ipoib_set_mcast_list; 951 dev->set_multicast_list = ipoib_set_mcast_list;
953 dev->neigh_setup = ipoib_neigh_setup_dev; 952 dev->neigh_setup = ipoib_neigh_setup_dev;
954 953
954 ipoib_set_ethtool_ops(dev);
955
955 netif_napi_add(dev, &priv->napi, ipoib_poll, 100); 956 netif_napi_add(dev, &priv->napi, ipoib_poll, 100);
956 957
957 dev->watchdog_timeo = HZ; 958 dev->watchdog_timeo = HZ;
@@ -1105,6 +1106,7 @@ static struct net_device *ipoib_add_port(const char *format,
1105 struct ib_device *hca, u8 port) 1106 struct ib_device *hca, u8 port)
1106{ 1107{
1107 struct ipoib_dev_priv *priv; 1108 struct ipoib_dev_priv *priv;
1109 struct ib_device_attr *device_attr;
1108 int result = -ENOMEM; 1110 int result = -ENOMEM;
1109 1111
1110 priv = ipoib_intf_alloc(format); 1112 priv = ipoib_intf_alloc(format);
@@ -1120,6 +1122,29 @@ static struct net_device *ipoib_add_port(const char *format,
1120 goto device_init_failed; 1122 goto device_init_failed;
1121 } 1123 }
1122 1124
1125 device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
1126 if (!device_attr) {
1127 printk(KERN_WARNING "%s: allocation of %zu bytes failed\n",
1128 hca->name, sizeof *device_attr);
1129 goto device_init_failed;
1130 }
1131
1132 result = ib_query_device(hca, device_attr);
1133 if (result) {
1134 printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n",
1135 hca->name, result);
1136 kfree(device_attr);
1137 goto device_init_failed;
1138 }
1139 priv->hca_caps = device_attr->device_cap_flags;
1140
1141 kfree(device_attr);
1142
1143 if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
1144 set_bit(IPOIB_FLAG_CSUM, &priv->flags);
1145 priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1146 }
1147
1123 /* 1148 /*
1124 * Set the full membership bit, so that we join the right 1149 * Set the full membership bit, so that we join the right
1125 * broadcast group, etc. 1150 * broadcast group, etc.
@@ -1137,7 +1162,6 @@ static struct net_device *ipoib_add_port(const char *format,
1137 } else 1162 } else
1138 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); 1163 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
1139 1164
1140
1141 result = ipoib_dev_init(priv->dev, hca, port); 1165 result = ipoib_dev_init(priv->dev, hca, port);
1142 if (result < 0) { 1166 if (result < 0) {
1143 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n", 1167 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
@@ -1155,6 +1179,9 @@ static struct net_device *ipoib_add_port(const char *format,
1155 goto event_failed; 1179 goto event_failed;
1156 } 1180 }
1157 1181
1182 if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO)
1183 priv->dev->features |= NETIF_F_TSO;
1184
1158 result = register_netdev(priv->dev); 1185 result = register_netdev(priv->dev);
1159 if (result) { 1186 if (result) {
1160 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n", 1187 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index a3aeb911f024..8a20e3742c43 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -192,6 +192,9 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
192 init_attr.send_cq = priv->cq; 192 init_attr.send_cq = priv->cq;
193 init_attr.recv_cq = priv->cq; 193 init_attr.recv_cq = priv->cq;
194 194
195 if (priv->hca_caps & IB_DEVICE_UD_TSO)
196 init_attr.create_flags = IB_QP_CREATE_IPOIB_UD_LSO;
197
195 if (dev->features & NETIF_F_SG) 198 if (dev->features & NETIF_F_SG)
196 init_attr.cap.max_send_sge = MAX_SKB_FRAGS + 1; 199 init_attr.cap.max_send_sge = MAX_SKB_FRAGS + 1;
197 200
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 83247f1fdf72..08dc81c46f41 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -405,7 +405,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
405 struct iser_dto *send_dto = NULL; 405 struct iser_dto *send_dto = NULL;
406 unsigned long buf_offset; 406 unsigned long buf_offset;
407 unsigned long data_seg_len; 407 unsigned long data_seg_len;
408 unsigned int itt; 408 uint32_t itt;
409 int err = 0; 409 int err = 0;
410 410
411 if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) { 411 if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
@@ -416,7 +416,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
416 if (iser_check_xmit(conn, ctask)) 416 if (iser_check_xmit(conn, ctask))
417 return -ENOBUFS; 417 return -ENOBUFS;
418 418
419 itt = ntohl(hdr->itt); 419 itt = (__force uint32_t)hdr->itt;
420 data_seg_len = ntoh24(hdr->dlength); 420 data_seg_len = ntoh24(hdr->dlength);
421 buf_offset = ntohl(hdr->offset); 421 buf_offset = ntohl(hdr->offset);
422 422
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 993f0a8ff28f..d19cfe605ebb 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -473,11 +473,8 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
473 iser_connect_error(cma_id); 473 iser_connect_error(cma_id);
474 break; 474 break;
475 case RDMA_CM_EVENT_DISCONNECTED: 475 case RDMA_CM_EVENT_DISCONNECTED:
476 iser_disconnected_handler(cma_id);
477 break;
478 case RDMA_CM_EVENT_DEVICE_REMOVAL: 476 case RDMA_CM_EVENT_DEVICE_REMOVAL:
479 iser_err("Device removal is currently unsupported\n"); 477 iser_disconnected_handler(cma_id);
480 BUG();
481 break; 478 break;
482 default: 479 default:
483 iser_err("Unexpected RDMA CM event (%d)\n", event->event); 480 iser_err("Unexpected RDMA CM event (%d)\n", event->event);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index fd4a49fc4773..125765aa9d59 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -68,7 +68,7 @@ static int srp_max_iu_len;
68 68
69module_param(srp_sg_tablesize, int, 0444); 69module_param(srp_sg_tablesize, int, 0444);
70MODULE_PARM_DESC(srp_sg_tablesize, 70MODULE_PARM_DESC(srp_sg_tablesize,
71 "Max number of gather/scatter entries per I/O (default is 12)"); 71 "Max number of gather/scatter entries per I/O (default is 12, max 255)");
72 72
73static int topspin_workarounds = 1; 73static int topspin_workarounds = 1;
74 74
@@ -2138,6 +2138,11 @@ static int __init srp_init_module(void)
2138{ 2138{
2139 int ret; 2139 int ret;
2140 2140
2141 if (srp_sg_tablesize > 255) {
2142 printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n");
2143 srp_sg_tablesize = 255;
2144 }
2145
2141 ib_srp_transport_template = 2146 ib_srp_transport_template =
2142 srp_attach_transport(&ib_srp_transport_functions); 2147 srp_attach_transport(&ib_srp_transport_functions);
2143 if (!ib_srp_transport_template) 2148 if (!ib_srp_transport_template)
diff --git a/drivers/net/mlx4/catas.c b/drivers/net/mlx4/catas.c
index 6b32ec94b3a8..aa9528779044 100644
--- a/drivers/net/mlx4/catas.c
+++ b/drivers/net/mlx4/catas.c
@@ -69,7 +69,7 @@ static void poll_catas(unsigned long dev_ptr)
69 if (readl(priv->catas_err.map)) { 69 if (readl(priv->catas_err.map)) {
70 dump_err_buf(dev); 70 dump_err_buf(dev);
71 71
72 mlx4_dispatch_event(dev, MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR, 0, 0); 72 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, 0);
73 73
74 if (internal_err_reset) { 74 if (internal_err_reset) {
75 spin_lock(&catas_lock); 75 spin_lock(&catas_lock);
diff --git a/drivers/net/mlx4/cmd.c b/drivers/net/mlx4/cmd.c
index db49051b97b1..70dff94a8bc6 100644
--- a/drivers/net/mlx4/cmd.c
+++ b/drivers/net/mlx4/cmd.c
@@ -106,7 +106,8 @@ struct mlx4_cmd_context {
106 u16 token; 106 u16 token;
107}; 107};
108 108
109static int mlx4_status_to_errno(u8 status) { 109static int mlx4_status_to_errno(u8 status)
110{
110 static const int trans_table[] = { 111 static const int trans_table[] = {
111 [CMD_STAT_INTERNAL_ERR] = -EIO, 112 [CMD_STAT_INTERNAL_ERR] = -EIO,
112 [CMD_STAT_BAD_OP] = -EPERM, 113 [CMD_STAT_BAD_OP] = -EPERM,
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c
index d4441fee3d80..caa5bcf54e35 100644
--- a/drivers/net/mlx4/cq.c
+++ b/drivers/net/mlx4/cq.c
@@ -38,6 +38,7 @@
38#include <linux/hardirq.h> 38#include <linux/hardirq.h>
39 39
40#include <linux/mlx4/cmd.h> 40#include <linux/mlx4/cmd.h>
41#include <linux/mlx4/cq.h>
41 42
42#include "mlx4.h" 43#include "mlx4.h"
43#include "icm.h" 44#include "icm.h"
@@ -47,21 +48,19 @@ struct mlx4_cq_context {
47 u16 reserved1[3]; 48 u16 reserved1[3];
48 __be16 page_offset; 49 __be16 page_offset;
49 __be32 logsize_usrpage; 50 __be32 logsize_usrpage;
50 u8 reserved2; 51 __be16 cq_period;
51 u8 cq_period; 52 __be16 cq_max_count;
52 u8 reserved3; 53 u8 reserved2[3];
53 u8 cq_max_count;
54 u8 reserved4[3];
55 u8 comp_eqn; 54 u8 comp_eqn;
56 u8 log_page_size; 55 u8 log_page_size;
57 u8 reserved5[2]; 56 u8 reserved3[2];
58 u8 mtt_base_addr_h; 57 u8 mtt_base_addr_h;
59 __be32 mtt_base_addr_l; 58 __be32 mtt_base_addr_l;
60 __be32 last_notified_index; 59 __be32 last_notified_index;
61 __be32 solicit_producer_index; 60 __be32 solicit_producer_index;
62 __be32 consumer_index; 61 __be32 consumer_index;
63 __be32 producer_index; 62 __be32 producer_index;
64 u32 reserved6[2]; 63 u32 reserved4[2];
65 __be64 db_rec_addr; 64 __be64 db_rec_addr;
66}; 65};
67 66
@@ -121,6 +120,13 @@ static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
121 MLX4_CMD_TIME_CLASS_A); 120 MLX4_CMD_TIME_CLASS_A);
122} 121}
123 122
123static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
124 int cq_num, u32 opmod)
125{
126 return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ,
127 MLX4_CMD_TIME_CLASS_A);
128}
129
124static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 130static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
125 int cq_num) 131 int cq_num)
126{ 132{
@@ -129,6 +135,58 @@ static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
129 MLX4_CMD_TIME_CLASS_A); 135 MLX4_CMD_TIME_CLASS_A);
130} 136}
131 137
138int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
139 u16 count, u16 period)
140{
141 struct mlx4_cmd_mailbox *mailbox;
142 struct mlx4_cq_context *cq_context;
143 int err;
144
145 mailbox = mlx4_alloc_cmd_mailbox(dev);
146 if (IS_ERR(mailbox))
147 return PTR_ERR(mailbox);
148
149 cq_context = mailbox->buf;
150 memset(cq_context, 0, sizeof *cq_context);
151
152 cq_context->cq_max_count = cpu_to_be16(count);
153 cq_context->cq_period = cpu_to_be16(period);
154
155 err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1);
156
157 mlx4_free_cmd_mailbox(dev, mailbox);
158 return err;
159}
160EXPORT_SYMBOL_GPL(mlx4_cq_modify);
161
162int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
163 int entries, struct mlx4_mtt *mtt)
164{
165 struct mlx4_cmd_mailbox *mailbox;
166 struct mlx4_cq_context *cq_context;
167 u64 mtt_addr;
168 int err;
169
170 mailbox = mlx4_alloc_cmd_mailbox(dev);
171 if (IS_ERR(mailbox))
172 return PTR_ERR(mailbox);
173
174 cq_context = mailbox->buf;
175 memset(cq_context, 0, sizeof *cq_context);
176
177 cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24);
178 cq_context->log_page_size = mtt->page_shift - 12;
179 mtt_addr = mlx4_mtt_addr(dev, mtt);
180 cq_context->mtt_base_addr_h = mtt_addr >> 32;
181 cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
182
183 err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1);
184
185 mlx4_free_cmd_mailbox(dev, mailbox);
186 return err;
187}
188EXPORT_SYMBOL_GPL(mlx4_cq_resize);
189
132int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, 190int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
133 struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq) 191 struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq)
134{ 192{
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 9c36c2034030..e141a1513f07 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -202,7 +202,10 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
202 break; 202 break;
203 203
204 case MLX4_EVENT_TYPE_PORT_CHANGE: 204 case MLX4_EVENT_TYPE_PORT_CHANGE:
205 mlx4_dispatch_event(dev, eqe->type, eqe->subtype, 205 mlx4_dispatch_event(dev,
206 eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_ACTIVE ?
207 MLX4_DEV_EVENT_PORT_UP :
208 MLX4_DEV_EVENT_PORT_DOWN,
206 be32_to_cpu(eqe->event.port_change.port) >> 28); 209 be32_to_cpu(eqe->event.port_change.port) >> 28);
207 break; 210 break;
208 211
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 61dc4951d6b0..d82f2751d2c7 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -133,6 +133,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
133#define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27 133#define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27
134#define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29 134#define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29
135#define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b 135#define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b
136#define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d
136#define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f 137#define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f
137#define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33 138#define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33
138#define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35 139#define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35
@@ -215,6 +216,13 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
215 dev_cap->max_requester_per_qp = 1 << (field & 0x3f); 216 dev_cap->max_requester_per_qp = 1 << (field & 0x3f);
216 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET); 217 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET);
217 dev_cap->max_responder_per_qp = 1 << (field & 0x3f); 218 dev_cap->max_responder_per_qp = 1 << (field & 0x3f);
219 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET);
220 field &= 0x1f;
221 if (!field)
222 dev_cap->max_gso_sz = 0;
223 else
224 dev_cap->max_gso_sz = 1 << field;
225
218 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET); 226 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET);
219 dev_cap->max_rdma_global = 1 << (field & 0x3f); 227 dev_cap->max_rdma_global = 1 << (field & 0x3f);
220 MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET); 228 MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET);
@@ -377,6 +385,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
377 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg); 385 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
378 mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n", 386 mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
379 dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg); 387 dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg);
388 mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz);
380 389
381 dump_dev_cap_flags(dev, dev_cap->flags); 390 dump_dev_cap_flags(dev, dev_cap->flags);
382 391
@@ -696,6 +705,10 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
696 /* Check port for UD address vector: */ 705 /* Check port for UD address vector: */
697 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1); 706 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1);
698 707
708 /* Enable IPoIB checksumming if we can: */
709 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
710 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3);
711
699 /* QPC/EEC/CQC/EQC/RDMARC attributes */ 712 /* QPC/EEC/CQC/EQC/RDMARC attributes */
700 713
701 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); 714 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index e16dec890413..306cb9b0242d 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -96,6 +96,7 @@ struct mlx4_dev_cap {
96 u8 bmme_flags; 96 u8 bmme_flags;
97 u32 reserved_lkey; 97 u32 reserved_lkey;
98 u64 max_icm_sz; 98 u64 max_icm_sz;
99 int max_gso_sz;
99}; 100};
100 101
101struct mlx4_adapter { 102struct mlx4_adapter {
diff --git a/drivers/net/mlx4/intf.c b/drivers/net/mlx4/intf.c
index be5d9e90ccf2..4a6c4d526f1b 100644
--- a/drivers/net/mlx4/intf.c
+++ b/drivers/net/mlx4/intf.c
@@ -30,8 +30,6 @@
30 * SOFTWARE. 30 * SOFTWARE.
31 */ 31 */
32 32
33#include <linux/mlx4/driver.h>
34
35#include "mlx4.h" 33#include "mlx4.h"
36 34
37struct mlx4_device_context { 35struct mlx4_device_context {
@@ -113,8 +111,7 @@ void mlx4_unregister_interface(struct mlx4_interface *intf)
113} 111}
114EXPORT_SYMBOL_GPL(mlx4_unregister_interface); 112EXPORT_SYMBOL_GPL(mlx4_unregister_interface);
115 113
116void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_event type, 114void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, int port)
117 int subtype, int port)
118{ 115{
119 struct mlx4_priv *priv = mlx4_priv(dev); 116 struct mlx4_priv *priv = mlx4_priv(dev);
120 struct mlx4_device_context *dev_ctx; 117 struct mlx4_device_context *dev_ctx;
@@ -124,8 +121,7 @@ void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_event type,
124 121
125 list_for_each_entry(dev_ctx, &priv->ctx_list, list) 122 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
126 if (dev_ctx->intf->event) 123 if (dev_ctx->intf->event)
127 dev_ctx->intf->event(dev, dev_ctx->context, type, 124 dev_ctx->intf->event(dev, dev_ctx->context, type, port);
128 subtype, port);
129 125
130 spin_unlock_irqrestore(&priv->ctx_lock, flags); 126 spin_unlock_irqrestore(&priv->ctx_lock, flags);
131} 127}
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 08bfc130a33e..49a4acab5e82 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -76,7 +76,7 @@ static char mlx4_version[] __devinitdata =
76 DRV_VERSION " (" DRV_RELDATE ")\n"; 76 DRV_VERSION " (" DRV_RELDATE ")\n";
77 77
78static struct mlx4_profile default_profile = { 78static struct mlx4_profile default_profile = {
79 .num_qp = 1 << 16, 79 .num_qp = 1 << 17,
80 .num_srq = 1 << 16, 80 .num_srq = 1 << 16,
81 .rdmarc_per_qp = 1 << 4, 81 .rdmarc_per_qp = 1 << 4,
82 .num_cq = 1 << 16, 82 .num_cq = 1 << 16,
@@ -159,6 +159,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
159 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); 159 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
160 dev->caps.flags = dev_cap->flags; 160 dev->caps.flags = dev_cap->flags;
161 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 161 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
162 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
162 163
163 return 0; 164 return 0;
164} 165}
@@ -735,8 +736,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
735 } 736 }
736 737
737 /* 738 /*
738 * Check for BARs. We expect 0: 1MB, 2: 8MB, 4: DDR (may not 739 * Check for BARs. We expect 0: 1MB
739 * be present)
740 */ 740 */
741 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || 741 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
742 pci_resource_len(pdev, 0) != 1 << 20) { 742 pci_resource_len(pdev, 0) != 1 << 20) {
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
index a99e7729d333..57f7f1f0d4ec 100644
--- a/drivers/net/mlx4/mcg.c
+++ b/drivers/net/mlx4/mcg.c
@@ -190,10 +190,6 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
190 } 190 }
191 index += dev->caps.num_mgms; 191 index += dev->caps.num_mgms;
192 192
193 err = mlx4_READ_MCG(dev, index, mailbox);
194 if (err)
195 goto out;
196
197 memset(mgm, 0, sizeof *mgm); 193 memset(mgm, 0, sizeof *mgm);
198 memcpy(mgm->gid, gid, 16); 194 memcpy(mgm->gid, gid, 16);
199 } 195 }
@@ -301,12 +297,10 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
301 mgm->qp[loc] = mgm->qp[i - 1]; 297 mgm->qp[loc] = mgm->qp[i - 1];
302 mgm->qp[i - 1] = 0; 298 mgm->qp[i - 1] = 0;
303 299
304 err = mlx4_WRITE_MCG(dev, index, mailbox); 300 if (i != 1) {
305 if (err) 301 err = mlx4_WRITE_MCG(dev, index, mailbox);
306 goto out;
307
308 if (i != 1)
309 goto out; 302 goto out;
303 }
310 304
311 if (prev == -1) { 305 if (prev == -1) {
312 /* Remove entry from MGM */ 306 /* Remove entry from MGM */
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 53a1cdddfc13..73336810e652 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -42,6 +42,7 @@
42#include <linux/timer.h> 42#include <linux/timer.h>
43 43
44#include <linux/mlx4/device.h> 44#include <linux/mlx4/device.h>
45#include <linux/mlx4/driver.h>
45#include <linux/mlx4/doorbell.h> 46#include <linux/mlx4/doorbell.h>
46 47
47#define DRV_NAME "mlx4_core" 48#define DRV_NAME "mlx4_core"
@@ -313,8 +314,7 @@ void mlx4_catas_cleanup(void);
313int mlx4_restart_one(struct pci_dev *pdev); 314int mlx4_restart_one(struct pci_dev *pdev);
314int mlx4_register_device(struct mlx4_dev *dev); 315int mlx4_register_device(struct mlx4_dev *dev);
315void mlx4_unregister_device(struct mlx4_dev *dev); 316void mlx4_unregister_device(struct mlx4_dev *dev);
316void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_event type, 317void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, int port);
317 int subtype, int port);
318 318
319struct mlx4_dev_cap; 319struct mlx4_dev_cap;
320struct mlx4_init_hca_param; 320struct mlx4_init_hca_param;
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index e879b212cf43..07883197f474 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -20,6 +20,7 @@ config DCSSBLK
20config DASD 20config DASD
21 tristate "Support for DASD devices" 21 tristate "Support for DASD devices"
22 depends on CCW && BLOCK 22 depends on CCW && BLOCK
23 select IOSCHED_DEADLINE
23 help 24 help
24 Enable this option if you want to access DASDs directly utilizing 25 Enable this option if you want to access DASDs directly utilizing
25 S/390s channel subsystem commands. This is necessary for running 26 S/390s channel subsystem commands. This is necessary for running
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index ccf46c96adb4..ac6d4d3218b3 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -980,12 +980,12 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
980 break; 980 break;
981 case -ETIMEDOUT: 981 case -ETIMEDOUT:
982 printk(KERN_WARNING"%s(%s): request timed out\n", 982 printk(KERN_WARNING"%s(%s): request timed out\n",
983 __FUNCTION__, cdev->dev.bus_id); 983 __func__, cdev->dev.bus_id);
984 //FIXME - dasd uses own timeout interface... 984 //FIXME - dasd uses own timeout interface...
985 break; 985 break;
986 default: 986 default:
987 printk(KERN_WARNING"%s(%s): unknown error %ld\n", 987 printk(KERN_WARNING"%s(%s): unknown error %ld\n",
988 __FUNCTION__, cdev->dev.bus_id, PTR_ERR(irb)); 988 __func__, cdev->dev.bus_id, PTR_ERR(irb));
989 } 989 }
990 return; 990 return;
991 } 991 }
@@ -1956,6 +1956,7 @@ static int dasd_alloc_queue(struct dasd_block *block)
1956 block->request_queue->queuedata = block; 1956 block->request_queue->queuedata = block;
1957 1957
1958 elevator_exit(block->request_queue->elevator); 1958 elevator_exit(block->request_queue->elevator);
1959 block->request_queue->elevator = NULL;
1959 rc = elevator_init(block->request_queue, "deadline"); 1960 rc = elevator_init(block->request_queue, "deadline");
1960 if (rc) { 1961 if (rc) {
1961 blk_cleanup_queue(block->request_queue); 1962 blk_cleanup_queue(block->request_queue);
@@ -2298,9 +2299,8 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
2298 * in the other openers. 2299 * in the other openers.
2299 */ 2300 */
2300 if (device->block) { 2301 if (device->block) {
2301 struct dasd_block *block = device->block; 2302 max_count = device->block->bdev ? 0 : -1;
2302 max_count = block->bdev ? 0 : -1; 2303 open_count = atomic_read(&device->block->open_count);
2303 open_count = (int) atomic_read(&block->open_count);
2304 if (open_count > max_count) { 2304 if (open_count > max_count) {
2305 if (open_count > 0) 2305 if (open_count > 0)
2306 printk(KERN_WARNING "Can't offline dasd " 2306 printk(KERN_WARNING "Can't offline dasd "
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index b19db20a0bef..e6700df52df4 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -1996,6 +1996,36 @@ dasd_3990_erp_compound(struct dasd_ccw_req * erp, char *sense)
1996} /* end dasd_3990_erp_compound */ 1996} /* end dasd_3990_erp_compound */
1997 1997
1998/* 1998/*
1999 *DASD_3990_ERP_HANDLE_SIM
2000 *
2001 *DESCRIPTION
2002 * inspects the SIM SENSE data and starts an appropriate action
2003 *
2004 * PARAMETER
2005 * sense sense data of the actual error
2006 *
2007 * RETURN VALUES
2008 * none
2009 */
2010void
2011dasd_3990_erp_handle_sim(struct dasd_device *device, char *sense)
2012{
2013 /* print message according to log or message to operator mode */
2014 if ((sense[24] & DASD_SIM_MSG_TO_OP) || (sense[1] & 0x10)) {
2015
2016 /* print SIM SRC from RefCode */
2017 DEV_MESSAGE(KERN_ERR, device, "SIM - SRC: "
2018 "%02x%02x%02x%02x", sense[22],
2019 sense[23], sense[11], sense[12]);
2020 } else if (sense[24] & DASD_SIM_LOG) {
2021 /* print SIM SRC Refcode */
2022 DEV_MESSAGE(KERN_WARNING, device, "SIM - SRC: "
2023 "%02x%02x%02x%02x", sense[22],
2024 sense[23], sense[11], sense[12]);
2025 }
2026}
2027
2028/*
1999 * DASD_3990_ERP_INSPECT_32 2029 * DASD_3990_ERP_INSPECT_32
2000 * 2030 *
2001 * DESCRIPTION 2031 * DESCRIPTION
@@ -2018,6 +2048,10 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
2018 2048
2019 erp->function = dasd_3990_erp_inspect_32; 2049 erp->function = dasd_3990_erp_inspect_32;
2020 2050
2051 /* check for SIM sense data */
2052 if ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)
2053 dasd_3990_erp_handle_sim(device, sense);
2054
2021 if (sense[25] & DASD_SENSE_BIT_0) { 2055 if (sense[25] & DASD_SENSE_BIT_0) {
2022 2056
2023 /* compound program action codes (byte25 bit 0 == '1') */ 2057 /* compound program action codes (byte25 bit 0 == '1') */
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 3a40bee9d358..2d8df0b30538 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -745,6 +745,19 @@ static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
745 spin_unlock_irqrestore(&lcu->lock, flags); 745 spin_unlock_irqrestore(&lcu->lock, flags);
746} 746}
747 747
748static void __stop_device_on_lcu(struct dasd_device *device,
749 struct dasd_device *pos)
750{
751 /* If pos == device then device is already locked! */
752 if (pos == device) {
753 pos->stopped |= DASD_STOPPED_SU;
754 return;
755 }
756 spin_lock(get_ccwdev_lock(pos->cdev));
757 pos->stopped |= DASD_STOPPED_SU;
758 spin_unlock(get_ccwdev_lock(pos->cdev));
759}
760
748/* 761/*
749 * This function is called in interrupt context, so the 762 * This function is called in interrupt context, so the
750 * cdev lock for device is already locked! 763 * cdev lock for device is already locked!
@@ -755,35 +768,15 @@ static void _stop_all_devices_on_lcu(struct alias_lcu *lcu,
755 struct alias_pav_group *pavgroup; 768 struct alias_pav_group *pavgroup;
756 struct dasd_device *pos; 769 struct dasd_device *pos;
757 770
758 list_for_each_entry(pos, &lcu->active_devices, alias_list) { 771 list_for_each_entry(pos, &lcu->active_devices, alias_list)
759 if (pos != device) 772 __stop_device_on_lcu(device, pos);
760 spin_lock(get_ccwdev_lock(pos->cdev)); 773 list_for_each_entry(pos, &lcu->inactive_devices, alias_list)
761 pos->stopped |= DASD_STOPPED_SU; 774 __stop_device_on_lcu(device, pos);
762 if (pos != device)
763 spin_unlock(get_ccwdev_lock(pos->cdev));
764 }
765 list_for_each_entry(pos, &lcu->inactive_devices, alias_list) {
766 if (pos != device)
767 spin_lock(get_ccwdev_lock(pos->cdev));
768 pos->stopped |= DASD_STOPPED_SU;
769 if (pos != device)
770 spin_unlock(get_ccwdev_lock(pos->cdev));
771 }
772 list_for_each_entry(pavgroup, &lcu->grouplist, group) { 775 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
773 list_for_each_entry(pos, &pavgroup->baselist, alias_list) { 776 list_for_each_entry(pos, &pavgroup->baselist, alias_list)
774 if (pos != device) 777 __stop_device_on_lcu(device, pos);
775 spin_lock(get_ccwdev_lock(pos->cdev)); 778 list_for_each_entry(pos, &pavgroup->aliaslist, alias_list)
776 pos->stopped |= DASD_STOPPED_SU; 779 __stop_device_on_lcu(device, pos);
777 if (pos != device)
778 spin_unlock(get_ccwdev_lock(pos->cdev));
779 }
780 list_for_each_entry(pos, &pavgroup->aliaslist, alias_list) {
781 if (pos != device)
782 spin_lock(get_ccwdev_lock(pos->cdev));
783 pos->stopped |= DASD_STOPPED_SU;
784 if (pos != device)
785 spin_unlock(get_ccwdev_lock(pos->cdev));
786 }
787 } 780 }
788} 781}
789 782
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 61f16937c1e0..a0edae091b5e 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1415,6 +1415,13 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
1415 return; 1415 return;
1416 } 1416 }
1417 1417
1418
1419 /* service information message SIM */
1420 if ((irb->ecw[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE) {
1421 dasd_3990_erp_handle_sim(device, irb->ecw);
1422 return;
1423 }
1424
1418 /* just report other unsolicited interrupts */ 1425 /* just report other unsolicited interrupts */
1419 DEV_MESSAGE(KERN_DEBUG, device, "%s", 1426 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1420 "unsolicited interrupt received"); 1427 "unsolicited interrupt received");
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index d13ea05089a7..116611583df8 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -125,7 +125,8 @@ dasd_fba_check_characteristics(struct dasd_device *device)
125 125
126 private = (struct dasd_fba_private *) device->private; 126 private = (struct dasd_fba_private *) device->private;
127 if (private == NULL) { 127 if (private == NULL) {
128 private = kzalloc(sizeof(struct dasd_fba_private), GFP_KERNEL); 128 private = kzalloc(sizeof(struct dasd_fba_private),
129 GFP_KERNEL | GFP_DMA);
129 if (private == NULL) { 130 if (private == NULL) {
130 DEV_MESSAGE(KERN_WARNING, device, "%s", 131 DEV_MESSAGE(KERN_WARNING, device, "%s",
131 "memory allocation failed for private " 132 "memory allocation failed for private "
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 44b2984dfbee..6c624bf44617 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -72,6 +72,11 @@ struct dasd_block;
72#define DASD_SENSE_BIT_2 0x20 72#define DASD_SENSE_BIT_2 0x20
73#define DASD_SENSE_BIT_3 0x10 73#define DASD_SENSE_BIT_3 0x10
74 74
75/* BIT DEFINITIONS FOR SIM SENSE */
76#define DASD_SIM_SENSE 0x0F
77#define DASD_SIM_MSG_TO_OP 0x03
78#define DASD_SIM_LOG 0x0C
79
75/* 80/*
76 * SECTION: MACROs for klogd and s390 debug feature (dbf) 81 * SECTION: MACROs for klogd and s390 debug feature (dbf)
77 */ 82 */
@@ -621,6 +626,7 @@ void dasd_log_sense(struct dasd_ccw_req *, struct irb *);
621 626
622/* externals in dasd_3990_erp.c */ 627/* externals in dasd_3990_erp.c */
623struct dasd_ccw_req *dasd_3990_erp_action(struct dasd_ccw_req *); 628struct dasd_ccw_req *dasd_3990_erp_action(struct dasd_ccw_req *);
629void dasd_3990_erp_handle_sim(struct dasd_device *, char *);
624 630
625/* externals in dasd_eer.c */ 631/* externals in dasd_eer.c */
626#ifdef CONFIG_DASD_EER 632#ifdef CONFIG_DASD_EER
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index e6c94dbfdeaa..04787eab1016 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -142,57 +142,6 @@ dcssblk_get_device_by_name(char *name)
142 return NULL; 142 return NULL;
143} 143}
144 144
145/*
146 * print appropriate error message for segment_load()/segment_type()
147 * return code
148 */
149static void
150dcssblk_segment_warn(int rc, char* seg_name)
151{
152 switch (rc) {
153 case -ENOENT:
154 PRINT_WARN("cannot load/query segment %s, does not exist\n",
155 seg_name);
156 break;
157 case -ENOSYS:
158 PRINT_WARN("cannot load/query segment %s, not running on VM\n",
159 seg_name);
160 break;
161 case -EIO:
162 PRINT_WARN("cannot load/query segment %s, hardware error\n",
163 seg_name);
164 break;
165 case -ENOTSUPP:
166 PRINT_WARN("cannot load/query segment %s, is a multi-part "
167 "segment\n", seg_name);
168 break;
169 case -ENOSPC:
170 PRINT_WARN("cannot load/query segment %s, overlaps with "
171 "storage\n", seg_name);
172 break;
173 case -EBUSY:
174 PRINT_WARN("cannot load/query segment %s, overlaps with "
175 "already loaded dcss\n", seg_name);
176 break;
177 case -EPERM:
178 PRINT_WARN("cannot load/query segment %s, already loaded in "
179 "incompatible mode\n", seg_name);
180 break;
181 case -ENOMEM:
182 PRINT_WARN("cannot load/query segment %s, out of memory\n",
183 seg_name);
184 break;
185 case -ERANGE:
186 PRINT_WARN("cannot load/query segment %s, exceeds kernel "
187 "mapping range\n", seg_name);
188 break;
189 default:
190 PRINT_WARN("cannot load/query segment %s, return value %i\n",
191 seg_name, rc);
192 break;
193 }
194}
195
196static void dcssblk_unregister_callback(struct device *dev) 145static void dcssblk_unregister_callback(struct device *dev)
197{ 146{
198 device_unregister(dev); 147 device_unregister(dev);
@@ -423,7 +372,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
423 rc = segment_load(local_buf, SEGMENT_SHARED, 372 rc = segment_load(local_buf, SEGMENT_SHARED,
424 &dev_info->start, &dev_info->end); 373 &dev_info->start, &dev_info->end);
425 if (rc < 0) { 374 if (rc < 0) {
426 dcssblk_segment_warn(rc, dev_info->segment_name); 375 segment_warning(rc, dev_info->segment_name);
427 goto dealloc_gendisk; 376 goto dealloc_gendisk;
428 } 377 }
429 seg_byte_size = (dev_info->end - dev_info->start + 1); 378 seg_byte_size = (dev_info->end - dev_info->start + 1);
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 67009bfa093e..1e1f50655bbf 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -111,56 +111,6 @@ static void dcss_mkname(char *ascii_name, char *ebcdic_name)
111 ASCEBC(ebcdic_name, 8); 111 ASCEBC(ebcdic_name, 8);
112} 112}
113 113
114/*
115 * print appropriate error message for segment_load()/segment_type()
116 * return code
117 */
118static void mon_segment_warn(int rc, char* seg_name)
119{
120 switch (rc) {
121 case -ENOENT:
122 P_WARNING("cannot load/query segment %s, does not exist\n",
123 seg_name);
124 break;
125 case -ENOSYS:
126 P_WARNING("cannot load/query segment %s, not running on VM\n",
127 seg_name);
128 break;
129 case -EIO:
130 P_WARNING("cannot load/query segment %s, hardware error\n",
131 seg_name);
132 break;
133 case -ENOTSUPP:
134 P_WARNING("cannot load/query segment %s, is a multi-part "
135 "segment\n", seg_name);
136 break;
137 case -ENOSPC:
138 P_WARNING("cannot load/query segment %s, overlaps with "
139 "storage\n", seg_name);
140 break;
141 case -EBUSY:
142 P_WARNING("cannot load/query segment %s, overlaps with "
143 "already loaded dcss\n", seg_name);
144 break;
145 case -EPERM:
146 P_WARNING("cannot load/query segment %s, already loaded in "
147 "incompatible mode\n", seg_name);
148 break;
149 case -ENOMEM:
150 P_WARNING("cannot load/query segment %s, out of memory\n",
151 seg_name);
152 break;
153 case -ERANGE:
154 P_WARNING("cannot load/query segment %s, exceeds kernel "
155 "mapping range\n", seg_name);
156 break;
157 default:
158 P_WARNING("cannot load/query segment %s, return value %i\n",
159 seg_name, rc);
160 break;
161 }
162}
163
164static inline unsigned long mon_mca_start(struct mon_msg *monmsg) 114static inline unsigned long mon_mca_start(struct mon_msg *monmsg)
165{ 115{
166 return *(u32 *) &monmsg->msg.rmmsg; 116 return *(u32 *) &monmsg->msg.rmmsg;
@@ -585,7 +535,7 @@ static int __init mon_init(void)
585 535
586 rc = segment_type(mon_dcss_name); 536 rc = segment_type(mon_dcss_name);
587 if (rc < 0) { 537 if (rc < 0) {
588 mon_segment_warn(rc, mon_dcss_name); 538 segment_warning(rc, mon_dcss_name);
589 goto out_iucv; 539 goto out_iucv;
590 } 540 }
591 if (rc != SEG_TYPE_SC) { 541 if (rc != SEG_TYPE_SC) {
@@ -598,7 +548,7 @@ static int __init mon_init(void)
598 rc = segment_load(mon_dcss_name, SEGMENT_SHARED, 548 rc = segment_load(mon_dcss_name, SEGMENT_SHARED,
599 &mon_dcss_start, &mon_dcss_end); 549 &mon_dcss_start, &mon_dcss_end);
600 if (rc < 0) { 550 if (rc < 0) {
601 mon_segment_warn(rc, mon_dcss_name); 551 segment_warning(rc, mon_dcss_name);
602 rc = -EINVAL; 552 rc = -EINVAL;
603 goto out_iucv; 553 goto out_iucv;
604 } 554 }
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 2e616e33891d..e3b3d390b4a3 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -332,7 +332,7 @@ sclp_tty_write_string(const unsigned char *str, int count)
332 if (sclp_ttybuf == NULL) { 332 if (sclp_ttybuf == NULL) {
333 while (list_empty(&sclp_tty_pages)) { 333 while (list_empty(&sclp_tty_pages)) {
334 spin_unlock_irqrestore(&sclp_tty_lock, flags); 334 spin_unlock_irqrestore(&sclp_tty_lock, flags);
335 if (in_atomic()) 335 if (in_interrupt())
336 sclp_sync_wait(); 336 sclp_sync_wait();
337 else 337 else
338 wait_event(sclp_tty_waitq, 338 wait_event(sclp_tty_waitq,
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index f7b258dfd52c..ed507594e62b 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -383,7 +383,7 @@ sclp_vt220_timeout(unsigned long data)
383 */ 383 */
384static int 384static int
385__sclp_vt220_write(const unsigned char *buf, int count, int do_schedule, 385__sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
386 int convertlf) 386 int convertlf, int may_schedule)
387{ 387{
388 unsigned long flags; 388 unsigned long flags;
389 void *page; 389 void *page;
@@ -398,9 +398,8 @@ __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
398 /* Create a sclp output buffer if none exists yet */ 398 /* Create a sclp output buffer if none exists yet */
399 if (sclp_vt220_current_request == NULL) { 399 if (sclp_vt220_current_request == NULL) {
400 while (list_empty(&sclp_vt220_empty)) { 400 while (list_empty(&sclp_vt220_empty)) {
401 spin_unlock_irqrestore(&sclp_vt220_lock, 401 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
402 flags); 402 if (in_interrupt() || !may_schedule)
403 if (in_atomic())
404 sclp_sync_wait(); 403 sclp_sync_wait();
405 else 404 else
406 wait_event(sclp_vt220_waitq, 405 wait_event(sclp_vt220_waitq,
@@ -450,7 +449,7 @@ __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
450static int 449static int
451sclp_vt220_write(struct tty_struct *tty, const unsigned char *buf, int count) 450sclp_vt220_write(struct tty_struct *tty, const unsigned char *buf, int count)
452{ 451{
453 return __sclp_vt220_write(buf, count, 1, 0); 452 return __sclp_vt220_write(buf, count, 1, 0, 1);
454} 453}
455 454
456#define SCLP_VT220_SESSION_ENDED 0x01 455#define SCLP_VT220_SESSION_ENDED 0x01
@@ -529,7 +528,7 @@ sclp_vt220_close(struct tty_struct *tty, struct file *filp)
529static void 528static void
530sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch) 529sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch)
531{ 530{
532 __sclp_vt220_write(&ch, 1, 0, 0); 531 __sclp_vt220_write(&ch, 1, 0, 0, 1);
533} 532}
534 533
535/* 534/*
@@ -746,7 +745,7 @@ __initcall(sclp_vt220_tty_init);
746static void 745static void
747sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count) 746sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count)
748{ 747{
749 __sclp_vt220_write((const unsigned char *) buf, count, 1, 1); 748 __sclp_vt220_write((const unsigned char *) buf, count, 1, 1, 0);
750} 749}
751 750
752static struct tty_driver * 751static struct tty_driver *
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 5b47e9cce75f..874adf365e46 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -394,7 +394,7 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
394 return tape_34xx_erp_failed(request, -ENOSPC); 394 return tape_34xx_erp_failed(request, -ENOSPC);
395 default: 395 default:
396 PRINT_ERR("Invalid op in %s:%i\n", 396 PRINT_ERR("Invalid op in %s:%i\n",
397 __FUNCTION__, __LINE__); 397 __func__, __LINE__);
398 return tape_34xx_erp_failed(request, 0); 398 return tape_34xx_erp_failed(request, 0);
399 } 399 }
400 } 400 }
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index b830a8cbef78..ebe84067bae9 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -83,9 +83,9 @@ tapechar_setup_device(struct tape_device * device)
83void 83void
84tapechar_cleanup_device(struct tape_device *device) 84tapechar_cleanup_device(struct tape_device *device)
85{ 85{
86 unregister_tape_dev(device->rt); 86 unregister_tape_dev(&device->cdev->dev, device->rt);
87 device->rt = NULL; 87 device->rt = NULL;
88 unregister_tape_dev(device->nt); 88 unregister_tape_dev(&device->cdev->dev, device->nt);
89 device->nt = NULL; 89 device->nt = NULL;
90} 90}
91 91
diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c
index aa7f166f4034..6dfdb7c17981 100644
--- a/drivers/s390/char/tape_class.c
+++ b/drivers/s390/char/tape_class.c
@@ -99,11 +99,10 @@ fail_with_tcd:
99} 99}
100EXPORT_SYMBOL(register_tape_dev); 100EXPORT_SYMBOL(register_tape_dev);
101 101
102void unregister_tape_dev(struct tape_class_device *tcd) 102void unregister_tape_dev(struct device *device, struct tape_class_device *tcd)
103{ 103{
104 if (tcd != NULL && !IS_ERR(tcd)) { 104 if (tcd != NULL && !IS_ERR(tcd)) {
105 sysfs_remove_link(&tcd->class_device->kobj, 105 sysfs_remove_link(&device->kobj, tcd->mode_name);
106 tcd->mode_name);
107 device_destroy(tape_class, tcd->char_device->dev); 106 device_destroy(tape_class, tcd->char_device->dev);
108 cdev_del(tcd->char_device); 107 cdev_del(tcd->char_device);
109 kfree(tcd); 108 kfree(tcd);
diff --git a/drivers/s390/char/tape_class.h b/drivers/s390/char/tape_class.h
index e2b5ac918acf..707b7f48c232 100644
--- a/drivers/s390/char/tape_class.h
+++ b/drivers/s390/char/tape_class.h
@@ -56,6 +56,6 @@ struct tape_class_device *register_tape_dev(
56 char * device_name, 56 char * device_name,
57 char * node_name 57 char * node_name
58); 58);
59void unregister_tape_dev(struct tape_class_device *tcd); 59void unregister_tape_dev(struct device *device, struct tape_class_device *tcd);
60 60
61#endif /* __TAPE_CLASS_H__ */ 61#endif /* __TAPE_CLASS_H__ */
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 7689b500a104..83ae9a852f00 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -100,7 +100,8 @@ static struct urdev *urdev_alloc(struct ccw_device *cdev)
100 urd->reclen = cdev->id.driver_info; 100 urd->reclen = cdev->id.driver_info;
101 ccw_device_get_id(cdev, &urd->dev_id); 101 ccw_device_get_id(cdev, &urd->dev_id);
102 mutex_init(&urd->io_mutex); 102 mutex_init(&urd->io_mutex);
103 mutex_init(&urd->open_mutex); 103 init_waitqueue_head(&urd->wait);
104 spin_lock_init(&urd->open_lock);
104 atomic_set(&urd->ref_count, 1); 105 atomic_set(&urd->ref_count, 1);
105 urd->cdev = cdev; 106 urd->cdev = cdev;
106 get_device(&cdev->dev); 107 get_device(&cdev->dev);
@@ -678,17 +679,21 @@ static int ur_open(struct inode *inode, struct file *file)
678 if (!urd) 679 if (!urd)
679 return -ENXIO; 680 return -ENXIO;
680 681
681 if (file->f_flags & O_NONBLOCK) { 682 spin_lock(&urd->open_lock);
682 if (!mutex_trylock(&urd->open_mutex)) { 683 while (urd->open_flag) {
684 spin_unlock(&urd->open_lock);
685 if (file->f_flags & O_NONBLOCK) {
683 rc = -EBUSY; 686 rc = -EBUSY;
684 goto fail_put; 687 goto fail_put;
685 } 688 }
686 } else { 689 if (wait_event_interruptible(urd->wait, urd->open_flag == 0)) {
687 if (mutex_lock_interruptible(&urd->open_mutex)) {
688 rc = -ERESTARTSYS; 690 rc = -ERESTARTSYS;
689 goto fail_put; 691 goto fail_put;
690 } 692 }
693 spin_lock(&urd->open_lock);
691 } 694 }
695 urd->open_flag++;
696 spin_unlock(&urd->open_lock);
692 697
693 TRACE("ur_open\n"); 698 TRACE("ur_open\n");
694 699
@@ -720,7 +725,9 @@ static int ur_open(struct inode *inode, struct file *file)
720fail_urfile_free: 725fail_urfile_free:
721 urfile_free(urf); 726 urfile_free(urf);
722fail_unlock: 727fail_unlock:
723 mutex_unlock(&urd->open_mutex); 728 spin_lock(&urd->open_lock);
729 urd->open_flag--;
730 spin_unlock(&urd->open_lock);
724fail_put: 731fail_put:
725 urdev_put(urd); 732 urdev_put(urd);
726 return rc; 733 return rc;
@@ -731,7 +738,10 @@ static int ur_release(struct inode *inode, struct file *file)
731 struct urfile *urf = file->private_data; 738 struct urfile *urf = file->private_data;
732 739
733 TRACE("ur_release\n"); 740 TRACE("ur_release\n");
734 mutex_unlock(&urf->urd->open_mutex); 741 spin_lock(&urf->urd->open_lock);
742 urf->urd->open_flag--;
743 spin_unlock(&urf->urd->open_lock);
744 wake_up_interruptible(&urf->urd->wait);
735 urdev_put(urf->urd); 745 urdev_put(urf->urd);
736 urfile_free(urf); 746 urfile_free(urf);
737 return 0; 747 return 0;
diff --git a/drivers/s390/char/vmur.h b/drivers/s390/char/vmur.h
index fa959644735a..fa320ad4593d 100644
--- a/drivers/s390/char/vmur.h
+++ b/drivers/s390/char/vmur.h
@@ -62,7 +62,6 @@ struct file_control_block {
62struct urdev { 62struct urdev {
63 struct ccw_device *cdev; /* Backpointer to ccw device */ 63 struct ccw_device *cdev; /* Backpointer to ccw device */
64 struct mutex io_mutex; /* Serialises device IO */ 64 struct mutex io_mutex; /* Serialises device IO */
65 struct mutex open_mutex; /* Serialises access to device */
66 struct completion *io_done; /* do_ur_io waits; irq completes */ 65 struct completion *io_done; /* do_ur_io waits; irq completes */
67 struct device *device; 66 struct device *device;
68 struct cdev *char_device; 67 struct cdev *char_device;
@@ -71,6 +70,9 @@ struct urdev {
71 int class; /* VM device class */ 70 int class; /* VM device class */
72 int io_request_rc; /* return code from I/O request */ 71 int io_request_rc; /* return code from I/O request */
73 atomic_t ref_count; /* reference counter */ 72 atomic_t ref_count; /* reference counter */
73 wait_queue_head_t wait; /* wait queue to serialize open */
74 int open_flag; /* "urdev is open" flag */
75 spinlock_t open_lock; /* serialize critical sections */
74}; 76};
75 77
76/* 78/*
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
index 6f40facb1c4d..19f8389291b6 100644
--- a/drivers/s390/char/vmwatchdog.c
+++ b/drivers/s390/char/vmwatchdog.c
@@ -96,7 +96,7 @@ static int vmwdt_keepalive(void)
96 96
97 if (ret) { 97 if (ret) {
98 printk(KERN_WARNING "%s: problem setting interval %d, " 98 printk(KERN_WARNING "%s: problem setting interval %d, "
99 "cmd %s\n", __FUNCTION__, vmwdt_interval, 99 "cmd %s\n", __func__, vmwdt_interval,
100 vmwdt_cmd); 100 vmwdt_cmd);
101 } 101 }
102 return ret; 102 return ret;
@@ -107,7 +107,7 @@ static int vmwdt_disable(void)
107 int ret = __diag288(wdt_cancel, 0, "", 0); 107 int ret = __diag288(wdt_cancel, 0, "", 0);
108 if (ret) { 108 if (ret) {
109 printk(KERN_WARNING "%s: problem disabling watchdog\n", 109 printk(KERN_WARNING "%s: problem disabling watchdog\n",
110 __FUNCTION__); 110 __func__);
111 } 111 }
112 return ret; 112 return ret;
113} 113}
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index f523501e6e6c..bbbd14e9d48f 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -224,7 +224,7 @@ static int __init init_cpu_info(enum arch_id arch)
224 224
225 sa = kmalloc(sizeof(*sa), GFP_KERNEL); 225 sa = kmalloc(sizeof(*sa), GFP_KERNEL);
226 if (!sa) { 226 if (!sa) {
227 ERROR_MSG("kmalloc failed: %s: %i\n",__FUNCTION__, __LINE__); 227 ERROR_MSG("kmalloc failed: %s: %i\n",__func__, __LINE__);
228 return -ENOMEM; 228 return -ENOMEM;
229 } 229 }
230 if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) { 230 if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) {
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 007aaeb4f532..5de86908b0d0 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -217,6 +217,8 @@ void chsc_chp_offline(struct chp_id chpid)
217 217
218 if (chp_get_status(chpid) <= 0) 218 if (chp_get_status(chpid) <= 0)
219 return; 219 return;
220 /* Wait until previous actions have settled. */
221 css_wait_for_slow_path();
220 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &chpid); 222 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &chpid);
221} 223}
222 224
@@ -303,7 +305,8 @@ static void s390_process_res_acc (struct res_acc_data *res_data)
303 sprintf(dbf_txt, "fla%x", res_data->fla); 305 sprintf(dbf_txt, "fla%x", res_data->fla);
304 CIO_TRACE_EVENT( 2, dbf_txt); 306 CIO_TRACE_EVENT( 2, dbf_txt);
305 } 307 }
306 308 /* Wait until previous actions have settled. */
309 css_wait_for_slow_path();
307 /* 310 /*
308 * I/O resources may have become accessible. 311 * I/O resources may have become accessible.
309 * Scan through all subchannels that may be concerned and 312 * Scan through all subchannels that may be concerned and
@@ -561,9 +564,12 @@ void chsc_chp_online(struct chp_id chpid)
561 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); 564 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
562 CIO_TRACE_EVENT(2, dbf_txt); 565 CIO_TRACE_EVENT(2, dbf_txt);
563 566
564 if (chp_get_status(chpid) != 0) 567 if (chp_get_status(chpid) != 0) {
568 /* Wait until previous actions have settled. */
569 css_wait_for_slow_path();
565 for_each_subchannel_staged(__chp_add, __chp_add_new_sch, 570 for_each_subchannel_staged(__chp_add, __chp_add_new_sch,
566 &chpid); 571 &chpid);
572 }
567} 573}
568 574
569static void __s390_subchannel_vary_chpid(struct subchannel *sch, 575static void __s390_subchannel_vary_chpid(struct subchannel *sch,
@@ -650,6 +656,8 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
650 */ 656 */
651int chsc_chp_vary(struct chp_id chpid, int on) 657int chsc_chp_vary(struct chp_id chpid, int on)
652{ 658{
659 /* Wait until previous actions have settled. */
660 css_wait_for_slow_path();
653 /* 661 /*
654 * Redo PathVerification on the devices the chpid connects to 662 * Redo PathVerification on the devices the chpid connects to
655 */ 663 */
@@ -758,7 +766,6 @@ chsc_secm(struct channel_subsystem *css, int enable)
758 if (!secm_area) 766 if (!secm_area)
759 return -ENOMEM; 767 return -ENOMEM;
760 768
761 mutex_lock(&css->mutex);
762 if (enable && !css->cm_enabled) { 769 if (enable && !css->cm_enabled) {
763 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 770 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
764 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 771 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
@@ -766,7 +773,6 @@ chsc_secm(struct channel_subsystem *css, int enable)
766 free_page((unsigned long)css->cub_addr1); 773 free_page((unsigned long)css->cub_addr1);
767 free_page((unsigned long)css->cub_addr2); 774 free_page((unsigned long)css->cub_addr2);
768 free_page((unsigned long)secm_area); 775 free_page((unsigned long)secm_area);
769 mutex_unlock(&css->mutex);
770 return -ENOMEM; 776 return -ENOMEM;
771 } 777 }
772 } 778 }
@@ -787,7 +793,6 @@ chsc_secm(struct channel_subsystem *css, int enable)
787 free_page((unsigned long)css->cub_addr1); 793 free_page((unsigned long)css->cub_addr1);
788 free_page((unsigned long)css->cub_addr2); 794 free_page((unsigned long)css->cub_addr2);
789 } 795 }
790 mutex_unlock(&css->mutex);
791 free_page((unsigned long)secm_area); 796 free_page((unsigned long)secm_area);
792 return ret; 797 return ret;
793} 798}
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 60590a12d529..23ffcc4768a7 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -24,6 +24,7 @@
24#include <asm/ipl.h> 24#include <asm/ipl.h>
25#include <asm/chpid.h> 25#include <asm/chpid.h>
26#include <asm/airq.h> 26#include <asm/airq.h>
27#include <asm/cpu.h>
27#include "cio.h" 28#include "cio.h"
28#include "css.h" 29#include "css.h"
29#include "chsc.h" 30#include "chsc.h"
@@ -649,13 +650,10 @@ do_IRQ (struct pt_regs *regs)
649 650
650 old_regs = set_irq_regs(regs); 651 old_regs = set_irq_regs(regs);
651 irq_enter(); 652 irq_enter();
652 asm volatile ("mc 0,0"); 653 s390_idle_check();
653 if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer) 654 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
654 /** 655 /* Serve timer interrupts first. */
655 * Make sure that the i/o interrupt did not "overtake" 656 clock_comparator_work();
656 * the last HZ timer interrupt.
657 */
658 account_ticks(S390_lowcore.int_clock);
659 /* 657 /*
660 * Get interrupt information from lowcore 658 * Get interrupt information from lowcore
661 */ 659 */
@@ -672,10 +670,14 @@ do_IRQ (struct pt_regs *regs)
672 continue; 670 continue;
673 } 671 }
674 sch = (struct subchannel *)(unsigned long)tpi_info->intparm; 672 sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
675 if (sch) 673 if (!sch) {
676 spin_lock(sch->lock); 674 /* Clear pending interrupt condition. */
675 tsch(tpi_info->schid, irb);
676 continue;
677 }
678 spin_lock(sch->lock);
677 /* Store interrupt response block to lowcore. */ 679 /* Store interrupt response block to lowcore. */
678 if (tsch (tpi_info->schid, irb) == 0 && sch) { 680 if (tsch(tpi_info->schid, irb) == 0) {
679 /* Keep subchannel information word up to date. */ 681 /* Keep subchannel information word up to date. */
680 memcpy (&sch->schib.scsw, &irb->scsw, 682 memcpy (&sch->schib.scsw, &irb->scsw,
681 sizeof (irb->scsw)); 683 sizeof (irb->scsw));
@@ -683,8 +685,7 @@ do_IRQ (struct pt_regs *regs)
683 if (sch->driver && sch->driver->irq) 685 if (sch->driver && sch->driver->irq)
684 sch->driver->irq(sch); 686 sch->driver->irq(sch);
685 } 687 }
686 if (sch) 688 spin_unlock(sch->lock);
687 spin_unlock(sch->lock);
688 /* 689 /*
689 * Are more interrupts pending? 690 * Are more interrupts pending?
690 * If so, the tpi instruction will update the lowcore 691 * If so, the tpi instruction will update the lowcore
@@ -710,8 +711,9 @@ void *cio_get_console_priv(void)
710/* 711/*
711 * busy wait for the next interrupt on the console 712 * busy wait for the next interrupt on the console
712 */ 713 */
713void 714void wait_cons_dev(void)
714wait_cons_dev (void) 715 __releases(console_subchannel.lock)
716 __acquires(console_subchannel.lock)
715{ 717{
716 unsigned long cr6 __attribute__ ((aligned (8))); 718 unsigned long cr6 __attribute__ ((aligned (8)));
717 unsigned long save_cr6 __attribute__ ((aligned (8))); 719 unsigned long save_cr6 __attribute__ ((aligned (8)));
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 52afa4c784de..08f2235c5a6f 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -100,6 +100,7 @@ extern int cio_modify (struct subchannel *);
100 100
101int cio_create_sch_lock(struct subchannel *); 101int cio_create_sch_lock(struct subchannel *);
102void do_adapter_IO(void); 102void do_adapter_IO(void);
103void do_IRQ(struct pt_regs *);
103 104
104/* Use with care. */ 105/* Use with care. */
105#ifdef CONFIG_CCW_CONSOLE 106#ifdef CONFIG_CCW_CONSOLE
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 3b45bbe6cce0..c1afab5f72d6 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -533,6 +533,12 @@ void css_schedule_eval_all(void)
533 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 533 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
534} 534}
535 535
536void css_wait_for_slow_path(void)
537{
538 flush_workqueue(ccw_device_notify_work);
539 flush_workqueue(slow_path_wq);
540}
541
536/* Reprobe subchannel if unregistered. */ 542/* Reprobe subchannel if unregistered. */
537static int reprobe_subchannel(struct subchannel_id schid, void *data) 543static int reprobe_subchannel(struct subchannel_id schid, void *data)
538{ 544{
@@ -683,10 +689,14 @@ css_cm_enable_show(struct device *dev, struct device_attribute *attr,
683 char *buf) 689 char *buf)
684{ 690{
685 struct channel_subsystem *css = to_css(dev); 691 struct channel_subsystem *css = to_css(dev);
692 int ret;
686 693
687 if (!css) 694 if (!css)
688 return 0; 695 return 0;
689 return sprintf(buf, "%x\n", css->cm_enabled); 696 mutex_lock(&css->mutex);
697 ret = sprintf(buf, "%x\n", css->cm_enabled);
698 mutex_unlock(&css->mutex);
699 return ret;
690} 700}
691 701
692static ssize_t 702static ssize_t
@@ -696,6 +706,7 @@ css_cm_enable_store(struct device *dev, struct device_attribute *attr,
696 struct channel_subsystem *css = to_css(dev); 706 struct channel_subsystem *css = to_css(dev);
697 int ret; 707 int ret;
698 708
709 mutex_lock(&css->mutex);
699 switch (buf[0]) { 710 switch (buf[0]) {
700 case '0': 711 case '0':
701 ret = css->cm_enabled ? chsc_secm(css, 0) : 0; 712 ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
@@ -706,6 +717,7 @@ css_cm_enable_store(struct device *dev, struct device_attribute *attr,
706 default: 717 default:
707 ret = -EINVAL; 718 ret = -EINVAL;
708 } 719 }
720 mutex_unlock(&css->mutex);
709 return ret < 0 ? ret : count; 721 return ret < 0 ? ret : count;
710} 722}
711 723
@@ -752,9 +764,11 @@ static int css_reboot_event(struct notifier_block *this,
752 struct channel_subsystem *css; 764 struct channel_subsystem *css;
753 765
754 css = channel_subsystems[i]; 766 css = channel_subsystems[i];
767 mutex_lock(&css->mutex);
755 if (css->cm_enabled) 768 if (css->cm_enabled)
756 if (chsc_secm(css, 0)) 769 if (chsc_secm(css, 0))
757 ret = NOTIFY_BAD; 770 ret = NOTIFY_BAD;
771 mutex_unlock(&css->mutex);
758 } 772 }
759 773
760 return ret; 774 return ret;
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index b70554523552..e1913518f354 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -144,6 +144,7 @@ struct schib;
144int css_sch_is_valid(struct schib *); 144int css_sch_is_valid(struct schib *);
145 145
146extern struct workqueue_struct *slow_path_wq; 146extern struct workqueue_struct *slow_path_wq;
147void css_wait_for_slow_path(void);
147 148
148extern struct attribute_group *subch_attr_groups[]; 149extern struct attribute_group *subch_attr_groups[];
149#endif 150#endif
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index fec004f62bcf..e0c7adb8958e 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -577,7 +577,6 @@ static DEVICE_ATTR(devtype, 0444, devtype_show, NULL);
577static DEVICE_ATTR(cutype, 0444, cutype_show, NULL); 577static DEVICE_ATTR(cutype, 0444, cutype_show, NULL);
578static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); 578static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
579static DEVICE_ATTR(online, 0644, online_show, online_store); 579static DEVICE_ATTR(online, 0644, online_show, online_store);
580extern struct device_attribute dev_attr_cmb_enable;
581static DEVICE_ATTR(availability, 0444, available_show, NULL); 580static DEVICE_ATTR(availability, 0444, available_show, NULL);
582 581
583static struct attribute * subch_attrs[] = { 582static struct attribute * subch_attrs[] = {
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index d40a2ffaa000..cb08092be39f 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -127,4 +127,5 @@ extern struct bus_type ccw_bus_type;
127void retry_set_schib(struct ccw_device *cdev); 127void retry_set_schib(struct ccw_device *cdev);
128void cmf_retry_copy_block(struct ccw_device *); 128void cmf_retry_copy_block(struct ccw_device *);
129int cmf_reenable(struct ccw_device *); 129int cmf_reenable(struct ccw_device *);
130extern struct device_attribute dev_attr_cmb_enable;
130#endif 131#endif
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 49b58eb0fab8..a1718a0aa539 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -193,8 +193,15 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
193 return -EACCES; 193 return -EACCES;
194 } 194 }
195 ret = cio_start_key (sch, cpa, lpm, key); 195 ret = cio_start_key (sch, cpa, lpm, key);
196 if (ret == 0) 196 switch (ret) {
197 case 0:
197 cdev->private->intparm = intparm; 198 cdev->private->intparm = intparm;
199 break;
200 case -EACCES:
201 case -ENODEV:
202 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
203 break;
204 }
198 return ret; 205 return ret;
199} 206}
200 207
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index ebe0848cfe33..4a38993000f2 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -62,7 +62,7 @@ ccw_device_path_notoper(struct ccw_device *cdev)
62 stsch (sch->schid, &sch->schib); 62 stsch (sch->schid, &sch->schib);
63 63
64 CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are " 64 CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are "
65 "not operational \n", __FUNCTION__, 65 "not operational \n", __func__,
66 sch->schid.ssid, sch->schid.sch_no, 66 sch->schid.ssid, sch->schid.sch_no,
67 sch->schib.pmcw.pnom); 67 sch->schib.pmcw.pnom);
68 68
@@ -312,6 +312,7 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
312{ 312{
313 struct subchannel *sch; 313 struct subchannel *sch;
314 struct ccw1 *sense_ccw; 314 struct ccw1 *sense_ccw;
315 int rc;
315 316
316 sch = to_subchannel(cdev->dev.parent); 317 sch = to_subchannel(cdev->dev.parent);
317 318
@@ -337,7 +338,10 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
337 /* Reset internal retry indication. */ 338 /* Reset internal retry indication. */
338 cdev->private->flags.intretry = 0; 339 cdev->private->flags.intretry = 0;
339 340
340 return cio_start(sch, sense_ccw, 0xff); 341 rc = cio_start(sch, sense_ccw, 0xff);
342 if (rc == -ENODEV || rc == -EACCES)
343 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
344 return rc;
341} 345}
342 346
343/* 347/*
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index 2b5bfb7c69e5..c359386708e9 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -1399,7 +1399,7 @@ __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
1399 * q->dev_st_chg_ind is the indicator, be it shared or not. 1399 * q->dev_st_chg_ind is the indicator, be it shared or not.
1400 * only clear it, if indicator is non-shared 1400 * only clear it, if indicator is non-shared
1401 */ 1401 */
1402 if (!spare_ind_was_set) 1402 if (q->dev_st_chg_ind != &spare_indicator)
1403 tiqdio_clear_summary_bit((__u32*)q->dev_st_chg_ind); 1403 tiqdio_clear_summary_bit((__u32*)q->dev_st_chg_ind);
1404 1404
1405 if (q->hydra_gives_outbound_pcis) { 1405 if (q->hydra_gives_outbound_pcis) {
@@ -2217,9 +2217,78 @@ qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
2217 return cc; 2217 return cc;
2218} 2218}
2219 2219
2220static int
2221qdio_get_ssqd_information(struct subchannel_id *schid,
2222 struct qdio_chsc_ssqd **ssqd_area)
2223{
2224 int result;
2225
2226 QDIO_DBF_TEXT0(0, setup, "getssqd");
2227 *ssqd_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC);
2228 if (!ssqd_area) {
2229 QDIO_PRINT_WARN("Could not get memory for chsc on sch x%x.\n",
2230 schid->sch_no);
2231 return -ENOMEM;
2232 }
2233
2234 (*ssqd_area)->request = (struct chsc_header) {
2235 .length = 0x0010,
2236 .code = 0x0024,
2237 };
2238 (*ssqd_area)->first_sch = schid->sch_no;
2239 (*ssqd_area)->last_sch = schid->sch_no;
2240 (*ssqd_area)->ssid = schid->ssid;
2241 result = chsc(*ssqd_area);
2242
2243 if (result) {
2244 QDIO_PRINT_WARN("CHSC returned cc %i on sch 0.%x.%x.\n",
2245 result, schid->ssid, schid->sch_no);
2246 goto out;
2247 }
2248
2249 if ((*ssqd_area)->response.code != QDIO_CHSC_RESPONSE_CODE_OK) {
2250 QDIO_PRINT_WARN("CHSC response is 0x%x on sch 0.%x.%x.\n",
2251 (*ssqd_area)->response.code,
2252 schid->ssid, schid->sch_no);
2253 goto out;
2254 }
2255 if (!((*ssqd_area)->flags & CHSC_FLAG_QDIO_CAPABILITY) ||
2256 !((*ssqd_area)->flags & CHSC_FLAG_VALIDITY) ||
2257 ((*ssqd_area)->sch != schid->sch_no)) {
2258 QDIO_PRINT_WARN("huh? problems checking out sch 0.%x.%x... " \
2259 "using all SIGAs.\n",
2260 schid->ssid, schid->sch_no);
2261 goto out;
2262 }
2263 return 0;
2264out:
2265 return -EINVAL;
2266}
2267
2268int
2269qdio_get_ssqd_pct(struct ccw_device *cdev)
2270{
2271 struct qdio_chsc_ssqd *ssqd_area;
2272 struct subchannel_id schid;
2273 char dbf_text[15];
2274 int rc;
2275 int pct = 0;
2276
2277 QDIO_DBF_TEXT0(0, setup, "getpct");
2278 schid = ccw_device_get_subchannel_id(cdev);
2279 rc = qdio_get_ssqd_information(&schid, &ssqd_area);
2280 if (!rc)
2281 pct = (int)ssqd_area->pct;
2282 if (rc != -ENOMEM)
2283 mempool_free(ssqd_area, qdio_mempool_scssc);
2284 sprintf(dbf_text, "pct: %d", pct);
2285 QDIO_DBF_TEXT2(0, setup, dbf_text);
2286 return pct;
2287}
2288EXPORT_SYMBOL(qdio_get_ssqd_pct);
2289
2220static void 2290static void
2221qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac, 2291qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned long token)
2222 unsigned long token)
2223{ 2292{
2224 struct qdio_q *q; 2293 struct qdio_q *q;
2225 int i; 2294 int i;
@@ -2227,7 +2296,7 @@ qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac,
2227 char dbf_text[15]; 2296 char dbf_text[15];
2228 2297
2229 /*check if QEBSM is disabled */ 2298 /*check if QEBSM is disabled */
2230 if (!(irq_ptr->is_qebsm) || !(qdioac & 0x01)) { 2299 if (!(irq_ptr->is_qebsm) || !(irq_ptr->qdioac & 0x01)) {
2231 irq_ptr->is_qebsm = 0; 2300 irq_ptr->is_qebsm = 0;
2232 irq_ptr->sch_token = 0; 2301 irq_ptr->sch_token = 0;
2233 irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM; 2302 irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
@@ -2256,102 +2325,27 @@ qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac,
2256} 2325}
2257 2326
2258static void 2327static void
2259qdio_get_ssqd_information(struct qdio_irq *irq_ptr) 2328qdio_get_ssqd_siga(struct qdio_irq *irq_ptr)
2260{ 2329{
2261 int result; 2330 int rc;
2262 unsigned char qdioac; 2331 struct qdio_chsc_ssqd *ssqd_area;
2263 struct {
2264 struct chsc_header request;
2265 u16 reserved1:10;
2266 u16 ssid:2;
2267 u16 fmt:4;
2268 u16 first_sch;
2269 u16 reserved2;
2270 u16 last_sch;
2271 u32 reserved3;
2272 struct chsc_header response;
2273 u32 reserved4;
2274 u8 flags;
2275 u8 reserved5;
2276 u16 sch;
2277 u8 qfmt;
2278 u8 parm;
2279 u8 qdioac1;
2280 u8 sch_class;
2281 u8 reserved7;
2282 u8 icnt;
2283 u8 reserved8;
2284 u8 ocnt;
2285 u8 reserved9;
2286 u8 mbccnt;
2287 u16 qdioac2;
2288 u64 sch_token;
2289 } *ssqd_area;
2290 2332
2291 QDIO_DBF_TEXT0(0,setup,"getssqd"); 2333 QDIO_DBF_TEXT0(0,setup,"getssqd");
2292 qdioac = 0; 2334 irq_ptr->qdioac = 0;
2293 ssqd_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC); 2335 rc = qdio_get_ssqd_information(&irq_ptr->schid, &ssqd_area);
2294 if (!ssqd_area) { 2336 if (rc) {
2295 QDIO_PRINT_WARN("Could not get memory for chsc. Using all " \ 2337 QDIO_PRINT_WARN("using all SIGAs for sch x%x.n",
2296 "SIGAs for sch x%x.\n", irq_ptr->schid.sch_no); 2338 irq_ptr->schid.sch_no);
2297 irq_ptr->qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY | 2339 irq_ptr->qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
2298 CHSC_FLAG_SIGA_OUTPUT_NECESSARY | 2340 CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
2299 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */ 2341 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
2300 irq_ptr->is_qebsm = 0; 2342 irq_ptr->is_qebsm = 0;
2301 irq_ptr->sch_token = 0; 2343 } else
2302 irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM; 2344 irq_ptr->qdioac = ssqd_area->qdioac1;
2303 return;
2304 }
2305
2306 ssqd_area->request = (struct chsc_header) {
2307 .length = 0x0010,
2308 .code = 0x0024,
2309 };
2310 ssqd_area->first_sch = irq_ptr->schid.sch_no;
2311 ssqd_area->last_sch = irq_ptr->schid.sch_no;
2312 ssqd_area->ssid = irq_ptr->schid.ssid;
2313 result = chsc(ssqd_area);
2314
2315 if (result) {
2316 QDIO_PRINT_WARN("CHSC returned cc %i. Using all " \
2317 "SIGAs for sch 0.%x.%x.\n", result,
2318 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2319 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
2320 CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
2321 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
2322 irq_ptr->is_qebsm = 0;
2323 goto out;
2324 }
2325 2345
2326 if (ssqd_area->response.code != QDIO_CHSC_RESPONSE_CODE_OK) { 2346 qdio_check_subchannel_qebsm(irq_ptr, ssqd_area->sch_token);
2327 QDIO_PRINT_WARN("response upon checking SIGA needs " \ 2347 if (rc != -ENOMEM)
2328 "is 0x%x. Using all SIGAs for sch 0.%x.%x.\n", 2348 mempool_free(ssqd_area, qdio_mempool_scssc);
2329 ssqd_area->response.code,
2330 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2331 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
2332 CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
2333 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
2334 irq_ptr->is_qebsm = 0;
2335 goto out;
2336 }
2337 if (!(ssqd_area->flags & CHSC_FLAG_QDIO_CAPABILITY) ||
2338 !(ssqd_area->flags & CHSC_FLAG_VALIDITY) ||
2339 (ssqd_area->sch != irq_ptr->schid.sch_no)) {
2340 QDIO_PRINT_WARN("huh? problems checking out sch 0.%x.%x... " \
2341 "using all SIGAs.\n",
2342 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2343 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
2344 CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
2345 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* worst case */
2346 irq_ptr->is_qebsm = 0;
2347 goto out;
2348 }
2349 qdioac = ssqd_area->qdioac1;
2350out:
2351 qdio_check_subchannel_qebsm(irq_ptr, qdioac,
2352 ssqd_area->sch_token);
2353 mempool_free(ssqd_area, qdio_mempool_scssc);
2354 irq_ptr->qdioac = qdioac;
2355} 2349}
2356 2350
2357static unsigned int 2351static unsigned int
@@ -3227,7 +3221,7 @@ qdio_establish(struct qdio_initialize *init_data)
3227 return -EIO; 3221 return -EIO;
3228 } 3222 }
3229 3223
3230 qdio_get_ssqd_information(irq_ptr); 3224 qdio_get_ssqd_siga(irq_ptr);
3231 /* if this gets set once, we're running under VM and can omit SVSes */ 3225 /* if this gets set once, we're running under VM and can omit SVSes */
3232 if (irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY) 3226 if (irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY)
3233 omit_svs=1; 3227 omit_svs=1;
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index da8a272fd75b..c3df6b2c38b7 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -406,6 +406,34 @@ do_clear_global_summary(void)
406#define CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS 0x08 406#define CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS 0x08
407#define CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS 0x04 407#define CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS 0x04
408 408
409struct qdio_chsc_ssqd {
410 struct chsc_header request;
411 u16 reserved1:10;
412 u16 ssid:2;
413 u16 fmt:4;
414 u16 first_sch;
415 u16 reserved2;
416 u16 last_sch;
417 u32 reserved3;
418 struct chsc_header response;
419 u32 reserved4;
420 u8 flags;
421 u8 reserved5;
422 u16 sch;
423 u8 qfmt;
424 u8 parm;
425 u8 qdioac1;
426 u8 sch_class;
427 u8 pct;
428 u8 icnt;
429 u8 reserved7;
430 u8 ocnt;
431 u8 reserved8;
432 u8 mbccnt;
433 u16 qdioac2;
434 u64 sch_token;
435};
436
409struct qdio_perf_stats { 437struct qdio_perf_stats {
410#ifdef CONFIG_64BIT 438#ifdef CONFIG_64BIT
411 atomic64_t tl_runs; 439 atomic64_t tl_runs;
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 7b0b81901297..a1ab3e3efd11 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -45,7 +45,7 @@ static int ap_poll_thread_start(void);
45static void ap_poll_thread_stop(void); 45static void ap_poll_thread_stop(void);
46static void ap_request_timeout(unsigned long); 46static void ap_request_timeout(unsigned long);
47 47
48/** 48/*
49 * Module description. 49 * Module description.
50 */ 50 */
51MODULE_AUTHOR("IBM Corporation"); 51MODULE_AUTHOR("IBM Corporation");
@@ -53,7 +53,7 @@ MODULE_DESCRIPTION("Adjunct Processor Bus driver, "
53 "Copyright 2006 IBM Corporation"); 53 "Copyright 2006 IBM Corporation");
54MODULE_LICENSE("GPL"); 54MODULE_LICENSE("GPL");
55 55
56/** 56/*
57 * Module parameter 57 * Module parameter
58 */ 58 */
59int ap_domain_index = -1; /* Adjunct Processor Domain Index */ 59int ap_domain_index = -1; /* Adjunct Processor Domain Index */
@@ -69,7 +69,7 @@ static struct device *ap_root_device = NULL;
69static DEFINE_SPINLOCK(ap_device_lock); 69static DEFINE_SPINLOCK(ap_device_lock);
70static LIST_HEAD(ap_device_list); 70static LIST_HEAD(ap_device_list);
71 71
72/** 72/*
73 * Workqueue & timer for bus rescan. 73 * Workqueue & timer for bus rescan.
74 */ 74 */
75static struct workqueue_struct *ap_work_queue; 75static struct workqueue_struct *ap_work_queue;
@@ -77,7 +77,7 @@ static struct timer_list ap_config_timer;
77static int ap_config_time = AP_CONFIG_TIME; 77static int ap_config_time = AP_CONFIG_TIME;
78static DECLARE_WORK(ap_config_work, ap_scan_bus); 78static DECLARE_WORK(ap_config_work, ap_scan_bus);
79 79
80/** 80/*
81 * Tasklet & timer for AP request polling. 81 * Tasklet & timer for AP request polling.
82 */ 82 */
83static struct timer_list ap_poll_timer = TIMER_INITIALIZER(ap_poll_timeout,0,0); 83static struct timer_list ap_poll_timer = TIMER_INITIALIZER(ap_poll_timeout,0,0);
@@ -88,9 +88,9 @@ static struct task_struct *ap_poll_kthread = NULL;
88static DEFINE_MUTEX(ap_poll_thread_mutex); 88static DEFINE_MUTEX(ap_poll_thread_mutex);
89 89
90/** 90/**
91 * Test if ap instructions are available. 91 * ap_intructions_available() - Test if AP instructions are available.
92 * 92 *
93 * Returns 0 if the ap instructions are installed. 93 * Returns 0 if the AP instructions are installed.
94 */ 94 */
95static inline int ap_instructions_available(void) 95static inline int ap_instructions_available(void)
96{ 96{
@@ -108,12 +108,12 @@ static inline int ap_instructions_available(void)
108} 108}
109 109
110/** 110/**
111 * Test adjunct processor queue. 111 * ap_test_queue(): Test adjunct processor queue.
112 * @qid: the ap queue number 112 * @qid: The AP queue number
113 * @queue_depth: pointer to queue depth value 113 * @queue_depth: Pointer to queue depth value
114 * @device_type: pointer to device type value 114 * @device_type: Pointer to device type value
115 * 115 *
116 * Returns ap queue status structure. 116 * Returns AP queue status structure.
117 */ 117 */
118static inline struct ap_queue_status 118static inline struct ap_queue_status
119ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type) 119ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type)
@@ -130,10 +130,10 @@ ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type)
130} 130}
131 131
132/** 132/**
133 * Reset adjunct processor queue. 133 * ap_reset_queue(): Reset adjunct processor queue.
134 * @qid: the ap queue number 134 * @qid: The AP queue number
135 * 135 *
136 * Returns ap queue status structure. 136 * Returns AP queue status structure.
137 */ 137 */
138static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid) 138static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid)
139{ 139{
@@ -148,16 +148,14 @@ static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid)
148} 148}
149 149
150/** 150/**
151 * Send message to adjunct processor queue. 151 * __ap_send(): Send message to adjunct processor queue.
152 * @qid: the ap queue number 152 * @qid: The AP queue number
153 * @psmid: the program supplied message identifier 153 * @psmid: The program supplied message identifier
154 * @msg: the message text 154 * @msg: The message text
155 * @length: the message length 155 * @length: The message length
156 *
157 * Returns ap queue status structure.
158 * 156 *
157 * Returns AP queue status structure.
159 * Condition code 1 on NQAP can't happen because the L bit is 1. 158 * Condition code 1 on NQAP can't happen because the L bit is 1.
160 *
161 * Condition code 2 on NQAP also means the send is incomplete, 159 * Condition code 2 on NQAP also means the send is incomplete,
162 * because a segment boundary was reached. The NQAP is repeated. 160 * because a segment boundary was reached. The NQAP is repeated.
163 */ 161 */
@@ -198,23 +196,20 @@ int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
198} 196}
199EXPORT_SYMBOL(ap_send); 197EXPORT_SYMBOL(ap_send);
200 198
201/* 199/**
202 * Receive message from adjunct processor queue. 200 * __ap_recv(): Receive message from adjunct processor queue.
203 * @qid: the ap queue number 201 * @qid: The AP queue number
204 * @psmid: pointer to program supplied message identifier 202 * @psmid: Pointer to program supplied message identifier
205 * @msg: the message text 203 * @msg: The message text
206 * @length: the message length 204 * @length: The message length
207 *
208 * Returns ap queue status structure.
209 * 205 *
206 * Returns AP queue status structure.
210 * Condition code 1 on DQAP means the receive has taken place 207 * Condition code 1 on DQAP means the receive has taken place
211 * but only partially. The response is incomplete, hence the 208 * but only partially. The response is incomplete, hence the
212 * DQAP is repeated. 209 * DQAP is repeated.
213 *
214 * Condition code 2 on DQAP also means the receive is incomplete, 210 * Condition code 2 on DQAP also means the receive is incomplete,
215 * this time because a segment boundary was reached. Again, the 211 * this time because a segment boundary was reached. Again, the
216 * DQAP is repeated. 212 * DQAP is repeated.
217 *
218 * Note that gpr2 is used by the DQAP instruction to keep track of 213 * Note that gpr2 is used by the DQAP instruction to keep track of
219 * any 'residual' length, in case the instruction gets interrupted. 214 * any 'residual' length, in case the instruction gets interrupted.
220 * Hence it gets zeroed before the instruction. 215 * Hence it gets zeroed before the instruction.
@@ -263,11 +258,12 @@ int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
263EXPORT_SYMBOL(ap_recv); 258EXPORT_SYMBOL(ap_recv);
264 259
265/** 260/**
266 * Check if an AP queue is available. The test is repeated for 261 * ap_query_queue(): Check if an AP queue is available.
267 * AP_MAX_RESET times. 262 * @qid: The AP queue number
268 * @qid: the ap queue number 263 * @queue_depth: Pointer to queue depth value
269 * @queue_depth: pointer to queue depth value 264 * @device_type: Pointer to device type value
270 * @device_type: pointer to device type value 265 *
266 * The test is repeated for AP_MAX_RESET times.
271 */ 267 */
272static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type) 268static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type)
273{ 269{
@@ -308,8 +304,10 @@ static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type)
308} 304}
309 305
310/** 306/**
307 * ap_init_queue(): Reset an AP queue.
308 * @qid: The AP queue number
309 *
311 * Reset an AP queue and wait for it to become available again. 310 * Reset an AP queue and wait for it to become available again.
312 * @qid: the ap queue number
313 */ 311 */
314static int ap_init_queue(ap_qid_t qid) 312static int ap_init_queue(ap_qid_t qid)
315{ 313{
@@ -346,7 +344,10 @@ static int ap_init_queue(ap_qid_t qid)
346} 344}
347 345
348/** 346/**
349 * Arm request timeout if a AP device was idle and a new request is submitted. 347 * ap_increase_queue_count(): Arm request timeout.
348 * @ap_dev: Pointer to an AP device.
349 *
350 * Arm request timeout if an AP device was idle and a new request is submitted.
350 */ 351 */
351static void ap_increase_queue_count(struct ap_device *ap_dev) 352static void ap_increase_queue_count(struct ap_device *ap_dev)
352{ 353{
@@ -360,7 +361,10 @@ static void ap_increase_queue_count(struct ap_device *ap_dev)
360} 361}
361 362
362/** 363/**
363 * AP device is still alive, re-schedule request timeout if there are still 364 * ap_decrease_queue_count(): Decrease queue count.
365 * @ap_dev: Pointer to an AP device.
366 *
367 * If AP device is still alive, re-schedule request timeout if there are still
364 * pending requests. 368 * pending requests.
365 */ 369 */
366static void ap_decrease_queue_count(struct ap_device *ap_dev) 370static void ap_decrease_queue_count(struct ap_device *ap_dev)
@@ -371,7 +375,7 @@ static void ap_decrease_queue_count(struct ap_device *ap_dev)
371 if (ap_dev->queue_count > 0) 375 if (ap_dev->queue_count > 0)
372 mod_timer(&ap_dev->timeout, jiffies + timeout); 376 mod_timer(&ap_dev->timeout, jiffies + timeout);
373 else 377 else
374 /** 378 /*
375 * The timeout timer should to be disabled now - since 379 * The timeout timer should to be disabled now - since
376 * del_timer_sync() is very expensive, we just tell via the 380 * del_timer_sync() is very expensive, we just tell via the
377 * reset flag to ignore the pending timeout timer. 381 * reset flag to ignore the pending timeout timer.
@@ -379,7 +383,7 @@ static void ap_decrease_queue_count(struct ap_device *ap_dev)
379 ap_dev->reset = AP_RESET_IGNORE; 383 ap_dev->reset = AP_RESET_IGNORE;
380} 384}
381 385
382/** 386/*
383 * AP device related attributes. 387 * AP device related attributes.
384 */ 388 */
385static ssize_t ap_hwtype_show(struct device *dev, 389static ssize_t ap_hwtype_show(struct device *dev,
@@ -433,6 +437,10 @@ static struct attribute_group ap_dev_attr_group = {
433}; 437};
434 438
435/** 439/**
440 * ap_bus_match()
441 * @dev: Pointer to device
442 * @drv: Pointer to device_driver
443 *
436 * AP bus driver registration/unregistration. 444 * AP bus driver registration/unregistration.
437 */ 445 */
438static int ap_bus_match(struct device *dev, struct device_driver *drv) 446static int ap_bus_match(struct device *dev, struct device_driver *drv)
@@ -441,7 +449,7 @@ static int ap_bus_match(struct device *dev, struct device_driver *drv)
441 struct ap_driver *ap_drv = to_ap_drv(drv); 449 struct ap_driver *ap_drv = to_ap_drv(drv);
442 struct ap_device_id *id; 450 struct ap_device_id *id;
443 451
444 /** 452 /*
445 * Compare device type of the device with the list of 453 * Compare device type of the device with the list of
446 * supported types of the device_driver. 454 * supported types of the device_driver.
447 */ 455 */
@@ -455,8 +463,12 @@ static int ap_bus_match(struct device *dev, struct device_driver *drv)
455} 463}
456 464
457/** 465/**
458 * uevent function for AP devices. It sets up a single environment 466 * ap_uevent(): Uevent function for AP devices.
459 * variable DEV_TYPE which contains the hardware device type. 467 * @dev: Pointer to device
468 * @env: Pointer to kobj_uevent_env
469 *
470 * It sets up a single environment variable DEV_TYPE which contains the
471 * hardware device type.
460 */ 472 */
461static int ap_uevent (struct device *dev, struct kobj_uevent_env *env) 473static int ap_uevent (struct device *dev, struct kobj_uevent_env *env)
462{ 474{
@@ -500,8 +512,10 @@ static int ap_device_probe(struct device *dev)
500} 512}
501 513
502/** 514/**
515 * __ap_flush_queue(): Flush requests.
516 * @ap_dev: Pointer to the AP device
517 *
503 * Flush all requests from the request/pending queue of an AP device. 518 * Flush all requests from the request/pending queue of an AP device.
504 * @ap_dev: pointer to the AP device.
505 */ 519 */
506static void __ap_flush_queue(struct ap_device *ap_dev) 520static void __ap_flush_queue(struct ap_device *ap_dev)
507{ 521{
@@ -565,7 +579,7 @@ void ap_driver_unregister(struct ap_driver *ap_drv)
565} 579}
566EXPORT_SYMBOL(ap_driver_unregister); 580EXPORT_SYMBOL(ap_driver_unregister);
567 581
568/** 582/*
569 * AP bus attributes. 583 * AP bus attributes.
570 */ 584 */
571static ssize_t ap_domain_show(struct bus_type *bus, char *buf) 585static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
@@ -630,14 +644,16 @@ static struct bus_attribute *const ap_bus_attrs[] = {
630}; 644};
631 645
632/** 646/**
633 * Pick one of the 16 ap domains. 647 * ap_select_domain(): Select an AP domain.
648 *
649 * Pick one of the 16 AP domains.
634 */ 650 */
635static int ap_select_domain(void) 651static int ap_select_domain(void)
636{ 652{
637 int queue_depth, device_type, count, max_count, best_domain; 653 int queue_depth, device_type, count, max_count, best_domain;
638 int rc, i, j; 654 int rc, i, j;
639 655
640 /** 656 /*
641 * We want to use a single domain. Either the one specified with 657 * We want to use a single domain. Either the one specified with
642 * the "domain=" parameter or the domain with the maximum number 658 * the "domain=" parameter or the domain with the maximum number
643 * of devices. 659 * of devices.
@@ -669,8 +685,10 @@ static int ap_select_domain(void)
669} 685}
670 686
671/** 687/**
672 * Find the device type if query queue returned a device type of 0. 688 * ap_probe_device_type(): Find the device type of an AP.
673 * @ap_dev: pointer to the AP device. 689 * @ap_dev: pointer to the AP device.
690 *
691 * Find the device type if query queue returned a device type of 0.
674 */ 692 */
675static int ap_probe_device_type(struct ap_device *ap_dev) 693static int ap_probe_device_type(struct ap_device *ap_dev)
676{ 694{
@@ -764,7 +782,11 @@ out:
764} 782}
765 783
766/** 784/**
767 * Scan the ap bus for new devices. 785 * __ap_scan_bus(): Scan the AP bus.
786 * @dev: Pointer to device
787 * @data: Pointer to data
788 *
789 * Scan the AP bus for new devices.
768 */ 790 */
769static int __ap_scan_bus(struct device *dev, void *data) 791static int __ap_scan_bus(struct device *dev, void *data)
770{ 792{
@@ -867,6 +889,8 @@ ap_config_timeout(unsigned long ptr)
867} 889}
868 890
869/** 891/**
892 * ap_schedule_poll_timer(): Schedule poll timer.
893 *
870 * Set up the timer to run the poll tasklet 894 * Set up the timer to run the poll tasklet
871 */ 895 */
872static inline void ap_schedule_poll_timer(void) 896static inline void ap_schedule_poll_timer(void)
@@ -877,10 +901,11 @@ static inline void ap_schedule_poll_timer(void)
877} 901}
878 902
879/** 903/**
880 * Receive pending reply messages from an AP device. 904 * ap_poll_read(): Receive pending reply messages from an AP device.
881 * @ap_dev: pointer to the AP device 905 * @ap_dev: pointer to the AP device
882 * @flags: pointer to control flags, bit 2^0 is set if another poll is 906 * @flags: pointer to control flags, bit 2^0 is set if another poll is
883 * required, bit 2^1 is set if the poll timer needs to get armed 907 * required, bit 2^1 is set if the poll timer needs to get armed
908 *
884 * Returns 0 if the device is still present, -ENODEV if not. 909 * Returns 0 if the device is still present, -ENODEV if not.
885 */ 910 */
886static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags) 911static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
@@ -925,10 +950,11 @@ static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
925} 950}
926 951
927/** 952/**
928 * Send messages from the request queue to an AP device. 953 * ap_poll_write(): Send messages from the request queue to an AP device.
929 * @ap_dev: pointer to the AP device 954 * @ap_dev: pointer to the AP device
930 * @flags: pointer to control flags, bit 2^0 is set if another poll is 955 * @flags: pointer to control flags, bit 2^0 is set if another poll is
931 * required, bit 2^1 is set if the poll timer needs to get armed 956 * required, bit 2^1 is set if the poll timer needs to get armed
957 *
932 * Returns 0 if the device is still present, -ENODEV if not. 958 * Returns 0 if the device is still present, -ENODEV if not.
933 */ 959 */
934static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags) 960static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
@@ -968,11 +994,13 @@ static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
968} 994}
969 995
970/** 996/**
971 * Poll AP device for pending replies and send new messages. If either 997 * ap_poll_queue(): Poll AP device for pending replies and send new messages.
972 * ap_poll_read or ap_poll_write returns -ENODEV unregister the device.
973 * @ap_dev: pointer to the bus device 998 * @ap_dev: pointer to the bus device
974 * @flags: pointer to control flags, bit 2^0 is set if another poll is 999 * @flags: pointer to control flags, bit 2^0 is set if another poll is
975 * required, bit 2^1 is set if the poll timer needs to get armed 1000 * required, bit 2^1 is set if the poll timer needs to get armed
1001 *
1002 * Poll AP device for pending replies and send new messages. If either
1003 * ap_poll_read or ap_poll_write returns -ENODEV unregister the device.
976 * Returns 0. 1004 * Returns 0.
977 */ 1005 */
978static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags) 1006static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags)
@@ -986,9 +1014,11 @@ static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags)
986} 1014}
987 1015
988/** 1016/**
989 * Queue a message to a device. 1017 * __ap_queue_message(): Queue a message to a device.
990 * @ap_dev: pointer to the AP device 1018 * @ap_dev: pointer to the AP device
991 * @ap_msg: the message to be queued 1019 * @ap_msg: the message to be queued
1020 *
1021 * Queue a message to a device. Returns 0 if successful.
992 */ 1022 */
993static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg) 1023static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
994{ 1024{
@@ -1055,12 +1085,14 @@ void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1055EXPORT_SYMBOL(ap_queue_message); 1085EXPORT_SYMBOL(ap_queue_message);
1056 1086
1057/** 1087/**
1088 * ap_cancel_message(): Cancel a crypto request.
1089 * @ap_dev: The AP device that has the message queued
1090 * @ap_msg: The message that is to be removed
1091 *
1058 * Cancel a crypto request. This is done by removing the request 1092 * Cancel a crypto request. This is done by removing the request
1059 * from the devive pendingq or requestq queue. Note that the 1093 * from the device pending or request queue. Note that the
1060 * request stays on the AP queue. When it finishes the message 1094 * request stays on the AP queue. When it finishes the message
1061 * reply will be discarded because the psmid can't be found. 1095 * reply will be discarded because the psmid can't be found.
1062 * @ap_dev: AP device that has the message queued
1063 * @ap_msg: the message that is to be removed
1064 */ 1096 */
1065void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg) 1097void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1066{ 1098{
@@ -1082,7 +1114,10 @@ void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1082EXPORT_SYMBOL(ap_cancel_message); 1114EXPORT_SYMBOL(ap_cancel_message);
1083 1115
1084/** 1116/**
1085 * AP receive polling for finished AP requests 1117 * ap_poll_timeout(): AP receive polling for finished AP requests.
1118 * @unused: Unused variable.
1119 *
1120 * Schedules the AP tasklet.
1086 */ 1121 */
1087static void ap_poll_timeout(unsigned long unused) 1122static void ap_poll_timeout(unsigned long unused)
1088{ 1123{
@@ -1090,6 +1125,9 @@ static void ap_poll_timeout(unsigned long unused)
1090} 1125}
1091 1126
1092/** 1127/**
1128 * ap_reset(): Reset a not responding AP device.
1129 * @ap_dev: Pointer to the AP device
1130 *
1093 * Reset a not responding AP device and move all requests from the 1131 * Reset a not responding AP device and move all requests from the
1094 * pending queue to the request queue. 1132 * pending queue to the request queue.
1095 */ 1133 */
@@ -1108,11 +1146,6 @@ static void ap_reset(struct ap_device *ap_dev)
1108 ap_dev->unregistered = 1; 1146 ap_dev->unregistered = 1;
1109} 1147}
1110 1148
1111/**
1112 * Poll all AP devices on the bus in a round robin fashion. Continue
1113 * polling until bit 2^0 of the control flags is not set. If bit 2^1
1114 * of the control flags has been set arm the poll timer.
1115 */
1116static int __ap_poll_all(struct ap_device *ap_dev, unsigned long *flags) 1149static int __ap_poll_all(struct ap_device *ap_dev, unsigned long *flags)
1117{ 1150{
1118 spin_lock(&ap_dev->lock); 1151 spin_lock(&ap_dev->lock);
@@ -1126,6 +1159,14 @@ static int __ap_poll_all(struct ap_device *ap_dev, unsigned long *flags)
1126 return 0; 1159 return 0;
1127} 1160}
1128 1161
1162/**
1163 * ap_poll_all(): Poll all AP devices.
1164 * @dummy: Unused variable
1165 *
1166 * Poll all AP devices on the bus in a round robin fashion. Continue
1167 * polling until bit 2^0 of the control flags is not set. If bit 2^1
1168 * of the control flags has been set arm the poll timer.
1169 */
1129static void ap_poll_all(unsigned long dummy) 1170static void ap_poll_all(unsigned long dummy)
1130{ 1171{
1131 unsigned long flags; 1172 unsigned long flags;
@@ -1144,6 +1185,9 @@ static void ap_poll_all(unsigned long dummy)
1144} 1185}
1145 1186
1146/** 1187/**
1188 * ap_poll_thread(): Thread that polls for finished requests.
1189 * @data: Unused pointer
1190 *
1147 * AP bus poll thread. The purpose of this thread is to poll for 1191 * AP bus poll thread. The purpose of this thread is to poll for
1148 * finished requests in a loop if there is a "free" cpu - that is 1192 * finished requests in a loop if there is a "free" cpu - that is
1149 * a cpu that doesn't have anything better to do. The polling stops 1193 * a cpu that doesn't have anything better to do. The polling stops
@@ -1213,7 +1257,10 @@ static void ap_poll_thread_stop(void)
1213} 1257}
1214 1258
1215/** 1259/**
1216 * Handling of request timeouts 1260 * ap_request_timeout(): Handling of request timeouts
1261 * @data: Holds the AP device.
1262 *
1263 * Handles request timeouts.
1217 */ 1264 */
1218static void ap_request_timeout(unsigned long data) 1265static void ap_request_timeout(unsigned long data)
1219{ 1266{
@@ -1246,7 +1293,9 @@ static struct reset_call ap_reset_call = {
1246}; 1293};
1247 1294
1248/** 1295/**
1249 * The module initialization code. 1296 * ap_module_init(): The module initialization code.
1297 *
1298 * Initializes the module.
1250 */ 1299 */
1251int __init ap_module_init(void) 1300int __init ap_module_init(void)
1252{ 1301{
@@ -1288,7 +1337,7 @@ int __init ap_module_init(void)
1288 if (ap_select_domain() == 0) 1337 if (ap_select_domain() == 0)
1289 ap_scan_bus(NULL); 1338 ap_scan_bus(NULL);
1290 1339
1291 /* Setup the ap bus rescan timer. */ 1340 /* Setup the AP bus rescan timer. */
1292 init_timer(&ap_config_timer); 1341 init_timer(&ap_config_timer);
1293 ap_config_timer.function = ap_config_timeout; 1342 ap_config_timer.function = ap_config_timeout;
1294 ap_config_timer.data = 0; 1343 ap_config_timer.data = 0;
@@ -1325,7 +1374,9 @@ static int __ap_match_all(struct device *dev, void *data)
1325} 1374}
1326 1375
1327/** 1376/**
1328 * The module termination code 1377 * ap_modules_exit(): The module termination code
1378 *
1379 * Terminates the module.
1329 */ 1380 */
1330void ap_module_exit(void) 1381void ap_module_exit(void)
1331{ 1382{
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 87c2d6442875..c1e1200c43fc 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -50,6 +50,15 @@ typedef unsigned int ap_qid_t;
50#define AP_QID_QUEUE(_qid) ((_qid) & 15) 50#define AP_QID_QUEUE(_qid) ((_qid) & 15)
51 51
52/** 52/**
53 * structy ap_queue_status - Holds the AP queue status.
54 * @queue_empty: Shows if queue is empty
55 * @replies_waiting: Waiting replies
56 * @queue_full: Is 1 if the queue is full
57 * @pad: A 4 bit pad
58 * @int_enabled: Shows if interrupts are enabled for the AP
59 * @response_conde: Holds the 8 bit response code
60 * @pad2: A 16 bit pad
61 *
53 * The ap queue status word is returned by all three AP functions 62 * The ap queue status word is returned by all three AP functions
54 * (PQAP, NQAP and DQAP). There's a set of flags in the first 63 * (PQAP, NQAP and DQAP). There's a set of flags in the first
55 * byte, followed by a 1 byte response code. 64 * byte, followed by a 1 byte response code.
@@ -75,7 +84,7 @@ struct ap_queue_status {
75#define AP_RESPONSE_NO_FIRST_PART 0x13 84#define AP_RESPONSE_NO_FIRST_PART 0x13
76#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15 85#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15
77 86
78/** 87/*
79 * Known device types 88 * Known device types
80 */ 89 */
81#define AP_DEVICE_TYPE_PCICC 3 90#define AP_DEVICE_TYPE_PCICC 3
@@ -84,7 +93,7 @@ struct ap_queue_status {
84#define AP_DEVICE_TYPE_CEX2A 6 93#define AP_DEVICE_TYPE_CEX2A 6
85#define AP_DEVICE_TYPE_CEX2C 7 94#define AP_DEVICE_TYPE_CEX2C 7
86 95
87/** 96/*
88 * AP reset flag states 97 * AP reset flag states
89 */ 98 */
90#define AP_RESET_IGNORE 0 /* request timeout will be ignored */ 99#define AP_RESET_IGNORE 0 /* request timeout will be ignored */
@@ -152,7 +161,7 @@ struct ap_message {
152 .dev_type=(dt), \ 161 .dev_type=(dt), \
153 .match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE, 162 .match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE,
154 163
155/** 164/*
156 * Note: don't use ap_send/ap_recv after using ap_queue_message 165 * Note: don't use ap_send/ap_recv after using ap_queue_message
157 * for the first time. Otherwise the ap message queue will get 166 * for the first time. Otherwise the ap message queue will get
158 * confused. 167 * confused.
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index e3625a47a596..4d36e805a234 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -36,10 +36,11 @@
36#include <linux/compat.h> 36#include <linux/compat.h>
37#include <asm/atomic.h> 37#include <asm/atomic.h>
38#include <asm/uaccess.h> 38#include <asm/uaccess.h>
39#include <linux/hw_random.h>
39 40
40#include "zcrypt_api.h" 41#include "zcrypt_api.h"
41 42
42/** 43/*
43 * Module description. 44 * Module description.
44 */ 45 */
45MODULE_AUTHOR("IBM Corporation"); 46MODULE_AUTHOR("IBM Corporation");
@@ -52,7 +53,10 @@ static LIST_HEAD(zcrypt_device_list);
52static int zcrypt_device_count = 0; 53static int zcrypt_device_count = 0;
53static atomic_t zcrypt_open_count = ATOMIC_INIT(0); 54static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
54 55
55/** 56static int zcrypt_rng_device_add(void);
57static void zcrypt_rng_device_remove(void);
58
59/*
56 * Device attributes common for all crypto devices. 60 * Device attributes common for all crypto devices.
57 */ 61 */
58static ssize_t zcrypt_type_show(struct device *dev, 62static ssize_t zcrypt_type_show(struct device *dev,
@@ -99,6 +103,9 @@ static struct attribute_group zcrypt_device_attr_group = {
99}; 103};
100 104
101/** 105/**
106 * __zcrypt_increase_preference(): Increase preference of a crypto device.
107 * @zdev: Pointer the crypto device
108 *
102 * Move the device towards the head of the device list. 109 * Move the device towards the head of the device list.
103 * Need to be called while holding the zcrypt device list lock. 110 * Need to be called while holding the zcrypt device list lock.
104 * Note: cards with speed_rating of 0 are kept at the end of the list. 111 * Note: cards with speed_rating of 0 are kept at the end of the list.
@@ -125,6 +132,9 @@ static void __zcrypt_increase_preference(struct zcrypt_device *zdev)
125} 132}
126 133
127/** 134/**
135 * __zcrypt_decrease_preference(): Decrease preference of a crypto device.
136 * @zdev: Pointer to a crypto device.
137 *
128 * Move the device towards the tail of the device list. 138 * Move the device towards the tail of the device list.
129 * Need to be called while holding the zcrypt device list lock. 139 * Need to be called while holding the zcrypt device list lock.
130 * Note: cards with speed_rating of 0 are kept at the end of the list. 140 * Note: cards with speed_rating of 0 are kept at the end of the list.
@@ -198,7 +208,10 @@ void zcrypt_device_free(struct zcrypt_device *zdev)
198EXPORT_SYMBOL(zcrypt_device_free); 208EXPORT_SYMBOL(zcrypt_device_free);
199 209
200/** 210/**
201 * Register a crypto device. 211 * zcrypt_device_register() - Register a crypto device.
212 * @zdev: Pointer to a crypto device
213 *
214 * Register a crypto device. Returns 0 if successful.
202 */ 215 */
203int zcrypt_device_register(struct zcrypt_device *zdev) 216int zcrypt_device_register(struct zcrypt_device *zdev)
204{ 217{
@@ -216,16 +229,37 @@ int zcrypt_device_register(struct zcrypt_device *zdev)
216 __zcrypt_increase_preference(zdev); 229 __zcrypt_increase_preference(zdev);
217 zcrypt_device_count++; 230 zcrypt_device_count++;
218 spin_unlock_bh(&zcrypt_device_lock); 231 spin_unlock_bh(&zcrypt_device_lock);
232 if (zdev->ops->rng) {
233 rc = zcrypt_rng_device_add();
234 if (rc)
235 goto out_unregister;
236 }
237 return 0;
238
239out_unregister:
240 spin_lock_bh(&zcrypt_device_lock);
241 zcrypt_device_count--;
242 list_del_init(&zdev->list);
243 spin_unlock_bh(&zcrypt_device_lock);
244 sysfs_remove_group(&zdev->ap_dev->device.kobj,
245 &zcrypt_device_attr_group);
246 put_device(&zdev->ap_dev->device);
247 zcrypt_device_put(zdev);
219out: 248out:
220 return rc; 249 return rc;
221} 250}
222EXPORT_SYMBOL(zcrypt_device_register); 251EXPORT_SYMBOL(zcrypt_device_register);
223 252
224/** 253/**
254 * zcrypt_device_unregister(): Unregister a crypto device.
255 * @zdev: Pointer to crypto device
256 *
225 * Unregister a crypto device. 257 * Unregister a crypto device.
226 */ 258 */
227void zcrypt_device_unregister(struct zcrypt_device *zdev) 259void zcrypt_device_unregister(struct zcrypt_device *zdev)
228{ 260{
261 if (zdev->ops->rng)
262 zcrypt_rng_device_remove();
229 spin_lock_bh(&zcrypt_device_lock); 263 spin_lock_bh(&zcrypt_device_lock);
230 zcrypt_device_count--; 264 zcrypt_device_count--;
231 list_del_init(&zdev->list); 265 list_del_init(&zdev->list);
@@ -238,7 +272,9 @@ void zcrypt_device_unregister(struct zcrypt_device *zdev)
238EXPORT_SYMBOL(zcrypt_device_unregister); 272EXPORT_SYMBOL(zcrypt_device_unregister);
239 273
240/** 274/**
241 * zcrypt_read is not be supported beyond zcrypt 1.3.1 275 * zcrypt_read (): Not supported beyond zcrypt 1.3.1.
276 *
277 * This function is not supported beyond zcrypt 1.3.1.
242 */ 278 */
243static ssize_t zcrypt_read(struct file *filp, char __user *buf, 279static ssize_t zcrypt_read(struct file *filp, char __user *buf,
244 size_t count, loff_t *f_pos) 280 size_t count, loff_t *f_pos)
@@ -247,6 +283,8 @@ static ssize_t zcrypt_read(struct file *filp, char __user *buf,
247} 283}
248 284
249/** 285/**
286 * zcrypt_write(): Not allowed.
287 *
250 * Write is is not allowed 288 * Write is is not allowed
251 */ 289 */
252static ssize_t zcrypt_write(struct file *filp, const char __user *buf, 290static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
@@ -256,7 +294,9 @@ static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
256} 294}
257 295
258/** 296/**
259 * Device open/close functions to count number of users. 297 * zcrypt_open(): Count number of users.
298 *
299 * Device open function to count number of users.
260 */ 300 */
261static int zcrypt_open(struct inode *inode, struct file *filp) 301static int zcrypt_open(struct inode *inode, struct file *filp)
262{ 302{
@@ -264,13 +304,18 @@ static int zcrypt_open(struct inode *inode, struct file *filp)
264 return 0; 304 return 0;
265} 305}
266 306
307/**
308 * zcrypt_release(): Count number of users.
309 *
310 * Device close function to count number of users.
311 */
267static int zcrypt_release(struct inode *inode, struct file *filp) 312static int zcrypt_release(struct inode *inode, struct file *filp)
268{ 313{
269 atomic_dec(&zcrypt_open_count); 314 atomic_dec(&zcrypt_open_count);
270 return 0; 315 return 0;
271} 316}
272 317
273/** 318/*
274 * zcrypt ioctls. 319 * zcrypt ioctls.
275 */ 320 */
276static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex) 321static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
@@ -280,7 +325,7 @@ static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
280 325
281 if (mex->outputdatalength < mex->inputdatalength) 326 if (mex->outputdatalength < mex->inputdatalength)
282 return -EINVAL; 327 return -EINVAL;
283 /** 328 /*
284 * As long as outputdatalength is big enough, we can set the 329 * As long as outputdatalength is big enough, we can set the
285 * outputdatalength equal to the inputdatalength, since that is the 330 * outputdatalength equal to the inputdatalength, since that is the
286 * number of bytes we will copy in any case 331 * number of bytes we will copy in any case
@@ -326,7 +371,7 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
326 if (crt->outputdatalength < crt->inputdatalength || 371 if (crt->outputdatalength < crt->inputdatalength ||
327 (crt->inputdatalength & 1)) 372 (crt->inputdatalength & 1))
328 return -EINVAL; 373 return -EINVAL;
329 /** 374 /*
330 * As long as outputdatalength is big enough, we can set the 375 * As long as outputdatalength is big enough, we can set the
331 * outputdatalength equal to the inputdatalength, since that is the 376 * outputdatalength equal to the inputdatalength, since that is the
332 * number of bytes we will copy in any case 377 * number of bytes we will copy in any case
@@ -343,7 +388,7 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
343 zdev->max_mod_size < crt->inputdatalength) 388 zdev->max_mod_size < crt->inputdatalength)
344 continue; 389 continue;
345 if (zdev->short_crt && crt->inputdatalength > 240) { 390 if (zdev->short_crt && crt->inputdatalength > 240) {
346 /** 391 /*
347 * Check inputdata for leading zeros for cards 392 * Check inputdata for leading zeros for cards
348 * that can't handle np_prime, bp_key, or 393 * that can't handle np_prime, bp_key, or
349 * u_mult_inv > 128 bytes. 394 * u_mult_inv > 128 bytes.
@@ -359,7 +404,7 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
359 copy_from_user(&z3, crt->u_mult_inv, len)) 404 copy_from_user(&z3, crt->u_mult_inv, len))
360 return -EFAULT; 405 return -EFAULT;
361 copied = 1; 406 copied = 1;
362 /** 407 /*
363 * We have to restart device lookup - 408 * We have to restart device lookup -
364 * the device list may have changed by now. 409 * the device list may have changed by now.
365 */ 410 */
@@ -427,6 +472,37 @@ static long zcrypt_send_cprb(struct ica_xcRB *xcRB)
427 return -ENODEV; 472 return -ENODEV;
428} 473}
429 474
475static long zcrypt_rng(char *buffer)
476{
477 struct zcrypt_device *zdev;
478 int rc;
479
480 spin_lock_bh(&zcrypt_device_lock);
481 list_for_each_entry(zdev, &zcrypt_device_list, list) {
482 if (!zdev->online || !zdev->ops->rng)
483 continue;
484 zcrypt_device_get(zdev);
485 get_device(&zdev->ap_dev->device);
486 zdev->request_count++;
487 __zcrypt_decrease_preference(zdev);
488 if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
489 spin_unlock_bh(&zcrypt_device_lock);
490 rc = zdev->ops->rng(zdev, buffer);
491 spin_lock_bh(&zcrypt_device_lock);
492 module_put(zdev->ap_dev->drv->driver.owner);
493 } else
494 rc = -EAGAIN;
495 zdev->request_count--;
496 __zcrypt_increase_preference(zdev);
497 put_device(&zdev->ap_dev->device);
498 zcrypt_device_put(zdev);
499 spin_unlock_bh(&zcrypt_device_lock);
500 return rc;
501 }
502 spin_unlock_bh(&zcrypt_device_lock);
503 return -ENODEV;
504}
505
430static void zcrypt_status_mask(char status[AP_DEVICES]) 506static void zcrypt_status_mask(char status[AP_DEVICES])
431{ 507{
432 struct zcrypt_device *zdev; 508 struct zcrypt_device *zdev;
@@ -514,6 +590,8 @@ static int zcrypt_count_type(int type)
514} 590}
515 591
516/** 592/**
593 * zcrypt_ica_status(): Old, depracted combi status call.
594 *
517 * Old, deprecated combi status call. 595 * Old, deprecated combi status call.
518 */ 596 */
519static long zcrypt_ica_status(struct file *filp, unsigned long arg) 597static long zcrypt_ica_status(struct file *filp, unsigned long arg)
@@ -615,7 +693,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
615 (int __user *) arg); 693 (int __user *) arg);
616 case Z90STAT_DOMAIN_INDEX: 694 case Z90STAT_DOMAIN_INDEX:
617 return put_user(ap_domain_index, (int __user *) arg); 695 return put_user(ap_domain_index, (int __user *) arg);
618 /** 696 /*
619 * Deprecated ioctls. Don't add another device count ioctl, 697 * Deprecated ioctls. Don't add another device count ioctl,
620 * you can count them yourself in the user space with the 698 * you can count them yourself in the user space with the
621 * output of the Z90STAT_STATUS_MASK ioctl. 699 * output of the Z90STAT_STATUS_MASK ioctl.
@@ -653,7 +731,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
653} 731}
654 732
655#ifdef CONFIG_COMPAT 733#ifdef CONFIG_COMPAT
656/** 734/*
657 * ioctl32 conversion routines 735 * ioctl32 conversion routines
658 */ 736 */
659struct compat_ica_rsa_modexpo { 737struct compat_ica_rsa_modexpo {
@@ -804,7 +882,7 @@ static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd,
804} 882}
805#endif 883#endif
806 884
807/** 885/*
808 * Misc device file operations. 886 * Misc device file operations.
809 */ 887 */
810static const struct file_operations zcrypt_fops = { 888static const struct file_operations zcrypt_fops = {
@@ -819,7 +897,7 @@ static const struct file_operations zcrypt_fops = {
819 .release = zcrypt_release 897 .release = zcrypt_release
820}; 898};
821 899
822/** 900/*
823 * Misc device. 901 * Misc device.
824 */ 902 */
825static struct miscdevice zcrypt_misc_device = { 903static struct miscdevice zcrypt_misc_device = {
@@ -828,7 +906,7 @@ static struct miscdevice zcrypt_misc_device = {
828 .fops = &zcrypt_fops, 906 .fops = &zcrypt_fops,
829}; 907};
830 908
831/** 909/*
832 * Deprecated /proc entry support. 910 * Deprecated /proc entry support.
833 */ 911 */
834static struct proc_dir_entry *zcrypt_entry; 912static struct proc_dir_entry *zcrypt_entry;
@@ -1022,7 +1100,7 @@ static int zcrypt_status_write(struct file *file, const char __user *buffer,
1022 } 1100 }
1023 1101
1024 for (j = 0; j < 64 && *ptr; ptr++) { 1102 for (j = 0; j < 64 && *ptr; ptr++) {
1025 /** 1103 /*
1026 * '0' for no device, '1' for PCICA, '2' for PCICC, 1104 * '0' for no device, '1' for PCICA, '2' for PCICC,
1027 * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3, 1105 * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3,
1028 * '5' for CEX2C and '6' for CEX2A' 1106 * '5' for CEX2C and '6' for CEX2A'
@@ -1041,7 +1119,76 @@ out:
1041 return count; 1119 return count;
1042} 1120}
1043 1121
1122static int zcrypt_rng_device_count;
1123static u32 *zcrypt_rng_buffer;
1124static int zcrypt_rng_buffer_index;
1125static DEFINE_MUTEX(zcrypt_rng_mutex);
1126
1127static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
1128{
1129 int rc;
1130
1131 /*
1132 * We don't need locking here because the RNG API guarantees serialized
1133 * read method calls.
1134 */
1135 if (zcrypt_rng_buffer_index == 0) {
1136 rc = zcrypt_rng((char *) zcrypt_rng_buffer);
1137 if (rc < 0)
1138 return -EIO;
1139 zcrypt_rng_buffer_index = rc / sizeof *data;
1140 }
1141 *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index];
1142 return sizeof *data;
1143}
1144
1145static struct hwrng zcrypt_rng_dev = {
1146 .name = "zcrypt",
1147 .data_read = zcrypt_rng_data_read,
1148};
1149
1150static int zcrypt_rng_device_add(void)
1151{
1152 int rc = 0;
1153
1154 mutex_lock(&zcrypt_rng_mutex);
1155 if (zcrypt_rng_device_count == 0) {
1156 zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL);
1157 if (!zcrypt_rng_buffer) {
1158 rc = -ENOMEM;
1159 goto out;
1160 }
1161 zcrypt_rng_buffer_index = 0;
1162 rc = hwrng_register(&zcrypt_rng_dev);
1163 if (rc)
1164 goto out_free;
1165 zcrypt_rng_device_count = 1;
1166 } else
1167 zcrypt_rng_device_count++;
1168 mutex_unlock(&zcrypt_rng_mutex);
1169 return 0;
1170
1171out_free:
1172 free_page((unsigned long) zcrypt_rng_buffer);
1173out:
1174 mutex_unlock(&zcrypt_rng_mutex);
1175 return rc;
1176}
1177
1178static void zcrypt_rng_device_remove(void)
1179{
1180 mutex_lock(&zcrypt_rng_mutex);
1181 zcrypt_rng_device_count--;
1182 if (zcrypt_rng_device_count == 0) {
1183 hwrng_unregister(&zcrypt_rng_dev);
1184 free_page((unsigned long) zcrypt_rng_buffer);
1185 }
1186 mutex_unlock(&zcrypt_rng_mutex);
1187}
1188
1044/** 1189/**
1190 * zcrypt_api_init(): Module initialization.
1191 *
1045 * The module initialization code. 1192 * The module initialization code.
1046 */ 1193 */
1047int __init zcrypt_api_init(void) 1194int __init zcrypt_api_init(void)
@@ -1076,6 +1223,8 @@ out:
1076} 1223}
1077 1224
1078/** 1225/**
1226 * zcrypt_api_exit(): Module termination.
1227 *
1079 * The module termination code. 1228 * The module termination code.
1080 */ 1229 */
1081void zcrypt_api_exit(void) 1230void zcrypt_api_exit(void)
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index de4877ee618f..5c6e222b2ac4 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -43,17 +43,17 @@
43#define DEV_NAME "zcrypt" 43#define DEV_NAME "zcrypt"
44 44
45#define PRINTK(fmt, args...) \ 45#define PRINTK(fmt, args...) \
46 printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args) 46 printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __func__ , ## args)
47#define PRINTKN(fmt, args...) \ 47#define PRINTKN(fmt, args...) \
48 printk(KERN_DEBUG DEV_NAME ": " fmt, ## args) 48 printk(KERN_DEBUG DEV_NAME ": " fmt, ## args)
49#define PRINTKW(fmt, args...) \ 49#define PRINTKW(fmt, args...) \
50 printk(KERN_WARNING DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args) 50 printk(KERN_WARNING DEV_NAME ": %s -> " fmt, __func__ , ## args)
51#define PRINTKC(fmt, args...) \ 51#define PRINTKC(fmt, args...) \
52 printk(KERN_CRIT DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args) 52 printk(KERN_CRIT DEV_NAME ": %s -> " fmt, __func__ , ## args)
53 53
54#ifdef ZCRYPT_DEBUG 54#ifdef ZCRYPT_DEBUG
55#define PDEBUG(fmt, args...) \ 55#define PDEBUG(fmt, args...) \
56 printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args) 56 printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __func__ , ## args)
57#else 57#else
58#define PDEBUG(fmt, args...) do {} while (0) 58#define PDEBUG(fmt, args...) do {} while (0)
59#endif 59#endif
@@ -100,6 +100,13 @@ struct ica_z90_status {
100#define ZCRYPT_CEX2C 5 100#define ZCRYPT_CEX2C 5
101#define ZCRYPT_CEX2A 6 101#define ZCRYPT_CEX2A 6
102 102
103/**
104 * Large random numbers are pulled in 4096 byte chunks from the crypto cards
105 * and stored in a page. Be carefull when increasing this buffer due to size
106 * limitations for AP requests.
107 */
108#define ZCRYPT_RNG_BUFFER_SIZE 4096
109
103struct zcrypt_device; 110struct zcrypt_device;
104 111
105struct zcrypt_ops { 112struct zcrypt_ops {
@@ -107,6 +114,7 @@ struct zcrypt_ops {
107 long (*rsa_modexpo_crt)(struct zcrypt_device *, 114 long (*rsa_modexpo_crt)(struct zcrypt_device *,
108 struct ica_rsa_modexpo_crt *); 115 struct ica_rsa_modexpo_crt *);
109 long (*send_cprb)(struct zcrypt_device *, struct ica_xcRB *); 116 long (*send_cprb)(struct zcrypt_device *, struct ica_xcRB *);
117 long (*rng)(struct zcrypt_device *, char *);
110}; 118};
111 119
112struct zcrypt_device { 120struct zcrypt_device {
diff --git a/drivers/s390/crypto/zcrypt_cca_key.h b/drivers/s390/crypto/zcrypt_cca_key.h
index 8dbcf0eef3e5..ed82f2f59b17 100644
--- a/drivers/s390/crypto/zcrypt_cca_key.h
+++ b/drivers/s390/crypto/zcrypt_cca_key.h
@@ -174,7 +174,7 @@ static inline int zcrypt_type6_mex_key_de(struct ica_rsa_modexpo *mex,
174 key->pvtMeHdr = static_pvt_me_hdr; 174 key->pvtMeHdr = static_pvt_me_hdr;
175 key->pvtMeSec = static_pvt_me_sec; 175 key->pvtMeSec = static_pvt_me_sec;
176 key->pubMeSec = static_pub_me_sec; 176 key->pubMeSec = static_pub_me_sec;
177 /** 177 /*
178 * In a private key, the modulus doesn't appear in the public 178 * In a private key, the modulus doesn't appear in the public
179 * section. So, an arbitrary public exponent of 0x010001 will be 179 * section. So, an arbitrary public exponent of 0x010001 will be
180 * used. 180 * used.
@@ -338,7 +338,7 @@ static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt,
338 pub = (struct cca_public_sec *)(key->key_parts + key_len); 338 pub = (struct cca_public_sec *)(key->key_parts + key_len);
339 *pub = static_cca_pub_sec; 339 *pub = static_cca_pub_sec;
340 pub->modulus_bit_len = 8 * crt->inputdatalength; 340 pub->modulus_bit_len = 8 * crt->inputdatalength;
341 /** 341 /*
342 * In a private key, the modulus doesn't appear in the public 342 * In a private key, the modulus doesn't appear in the public
343 * section. So, an arbitrary public exponent of 0x010001 will be 343 * section. So, an arbitrary public exponent of 0x010001 will be
344 * used. 344 * used.
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index 2cb616ba8bec..3e27fe77d207 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -108,7 +108,7 @@ static inline int convert_error(struct zcrypt_device *zdev,
108 return -EINVAL; 108 return -EINVAL;
109 case REP82_ERROR_MESSAGE_TYPE: 109 case REP82_ERROR_MESSAGE_TYPE:
110 // REP88_ERROR_MESSAGE_TYPE // '20' CEX2A 110 // REP88_ERROR_MESSAGE_TYPE // '20' CEX2A
111 /** 111 /*
112 * To sent a message of the wrong type is a bug in the 112 * To sent a message of the wrong type is a bug in the
113 * device driver. Warn about it, disable the device 113 * device driver. Warn about it, disable the device
114 * and then repeat the request. 114 * and then repeat the request.
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
index d6d59bf9ac38..17ea56ce1c11 100644
--- a/drivers/s390/crypto/zcrypt_pcicc.c
+++ b/drivers/s390/crypto/zcrypt_pcicc.c
@@ -42,7 +42,7 @@
42#define PCICC_MAX_MOD_SIZE_OLD 128 /* 1024 bits */ 42#define PCICC_MAX_MOD_SIZE_OLD 128 /* 1024 bits */
43#define PCICC_MAX_MOD_SIZE 256 /* 2048 bits */ 43#define PCICC_MAX_MOD_SIZE 256 /* 2048 bits */
44 44
45/** 45/*
46 * PCICC cards need a speed rating of 0. This keeps them at the end of 46 * PCICC cards need a speed rating of 0. This keeps them at the end of
47 * the zcrypt device list (see zcrypt_api.c). PCICC cards are only 47 * the zcrypt device list (see zcrypt_api.c). PCICC cards are only
48 * used if no other cards are present because they are slow and can only 48 * used if no other cards are present because they are slow and can only
@@ -388,7 +388,7 @@ static int convert_type86(struct zcrypt_device *zdev,
388 reply_len = le16_to_cpu(msg->length) - 2; 388 reply_len = le16_to_cpu(msg->length) - 2;
389 if (reply_len > outputdatalength) 389 if (reply_len > outputdatalength)
390 return -EINVAL; 390 return -EINVAL;
391 /** 391 /*
392 * For all encipher requests, the length of the ciphertext (reply_len) 392 * For all encipher requests, the length of the ciphertext (reply_len)
393 * will always equal the modulus length. For MEX decipher requests 393 * will always equal the modulus length. For MEX decipher requests
394 * the output needs to get padded. Minimum pad size is 10. 394 * the output needs to get padded. Minimum pad size is 10.
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index 70b9ddc8cf9d..0bc9b3188e64 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -356,6 +356,55 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
356} 356}
357 357
358/** 358/**
359 * Prepare a type6 CPRB message for random number generation
360 *
361 * @ap_dev: AP device pointer
362 * @ap_msg: pointer to AP message
363 */
364static void rng_type6CPRB_msgX(struct ap_device *ap_dev,
365 struct ap_message *ap_msg,
366 unsigned random_number_length)
367{
368 struct {
369 struct type6_hdr hdr;
370 struct CPRBX cprbx;
371 char function_code[2];
372 short int rule_length;
373 char rule[8];
374 short int verb_length;
375 short int key_length;
376 } __attribute__((packed)) *msg = ap_msg->message;
377 static struct type6_hdr static_type6_hdrX = {
378 .type = 0x06,
379 .offset1 = 0x00000058,
380 .agent_id = {'C', 'A'},
381 .function_code = {'R', 'L'},
382 .ToCardLen1 = sizeof *msg - sizeof(msg->hdr),
383 .FromCardLen1 = sizeof *msg - sizeof(msg->hdr),
384 };
385 static struct CPRBX static_cprbx = {
386 .cprb_len = 0x00dc,
387 .cprb_ver_id = 0x02,
388 .func_id = {0x54, 0x32},
389 .req_parml = sizeof *msg - sizeof(msg->hdr) -
390 sizeof(msg->cprbx),
391 .rpl_msgbl = sizeof *msg - sizeof(msg->hdr),
392 };
393
394 msg->hdr = static_type6_hdrX;
395 msg->hdr.FromCardLen2 = random_number_length,
396 msg->cprbx = static_cprbx;
397 msg->cprbx.rpl_datal = random_number_length,
398 msg->cprbx.domain = AP_QID_QUEUE(ap_dev->qid);
399 memcpy(msg->function_code, msg->hdr.function_code, 0x02);
400 msg->rule_length = 0x0a;
401 memcpy(msg->rule, "RANDOM ", 8);
402 msg->verb_length = 0x02;
403 msg->key_length = 0x02;
404 ap_msg->length = sizeof *msg;
405}
406
407/**
359 * Copy results from a type 86 ICA reply message back to user space. 408 * Copy results from a type 86 ICA reply message back to user space.
360 * 409 *
361 * @zdev: crypto device pointer 410 * @zdev: crypto device pointer
@@ -452,7 +501,7 @@ static int convert_type86_ica(struct zcrypt_device *zdev,
452 reply_len = msg->length - 2; 501 reply_len = msg->length - 2;
453 if (reply_len > outputdatalength) 502 if (reply_len > outputdatalength)
454 return -EINVAL; 503 return -EINVAL;
455 /** 504 /*
456 * For all encipher requests, the length of the ciphertext (reply_len) 505 * For all encipher requests, the length of the ciphertext (reply_len)
457 * will always equal the modulus length. For MEX decipher requests 506 * will always equal the modulus length. For MEX decipher requests
458 * the output needs to get padded. Minimum pad size is 10. 507 * the output needs to get padded. Minimum pad size is 10.
@@ -509,6 +558,26 @@ static int convert_type86_xcrb(struct zcrypt_device *zdev,
509 return 0; 558 return 0;
510} 559}
511 560
561static int convert_type86_rng(struct zcrypt_device *zdev,
562 struct ap_message *reply,
563 char *buffer)
564{
565 struct {
566 struct type86_hdr hdr;
567 struct type86_fmt2_ext fmt2;
568 struct CPRBX cprbx;
569 } __attribute__((packed)) *msg = reply->message;
570 char *data = reply->message;
571
572 if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0) {
573 PDEBUG("RNG response error on PCIXCC/CEX2C rc=%hu/rs=%hu\n",
574 rc, rs);
575 return -EINVAL;
576 }
577 memcpy(buffer, data + msg->fmt2.offset2, msg->fmt2.count2);
578 return msg->fmt2.count2;
579}
580
512static int convert_response_ica(struct zcrypt_device *zdev, 581static int convert_response_ica(struct zcrypt_device *zdev,
513 struct ap_message *reply, 582 struct ap_message *reply,
514 char __user *outputdata, 583 char __user *outputdata,
@@ -567,6 +636,31 @@ static int convert_response_xcrb(struct zcrypt_device *zdev,
567 } 636 }
568} 637}
569 638
639static int convert_response_rng(struct zcrypt_device *zdev,
640 struct ap_message *reply,
641 char *data)
642{
643 struct type86x_reply *msg = reply->message;
644
645 switch (msg->hdr.type) {
646 case TYPE82_RSP_CODE:
647 case TYPE88_RSP_CODE:
648 return -EINVAL;
649 case TYPE86_RSP_CODE:
650 if (msg->hdr.reply_code)
651 return -EINVAL;
652 if (msg->cprbx.cprb_ver_id == 0x02)
653 return convert_type86_rng(zdev, reply, data);
654 /* no break, incorrect cprb version is an unknown response */
655 default: /* Unknown response type, this should NEVER EVER happen */
656 PRINTK("Unrecognized Message Header: %08x%08x\n",
657 *(unsigned int *) reply->message,
658 *(unsigned int *) (reply->message+4));
659 zdev->online = 0;
660 return -EAGAIN; /* repeat the request on a different device. */
661 }
662}
663
570/** 664/**
571 * This function is called from the AP bus code after a crypto request 665 * This function is called from the AP bus code after a crypto request
572 * "msg" has finished with the reply message "reply". 666 * "msg" has finished with the reply message "reply".
@@ -736,6 +830,42 @@ out_free:
736} 830}
737 831
738/** 832/**
833 * The request distributor calls this function if it picked the PCIXCC/CEX2C
834 * device to generate random data.
835 * @zdev: pointer to zcrypt_device structure that identifies the
836 * PCIXCC/CEX2C device to the request distributor
837 * @buffer: pointer to a memory page to return random data
838 */
839
840static long zcrypt_pcixcc_rng(struct zcrypt_device *zdev,
841 char *buffer)
842{
843 struct ap_message ap_msg;
844 struct response_type resp_type = {
845 .type = PCIXCC_RESPONSE_TYPE_XCRB,
846 };
847 int rc;
848
849 ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL);
850 if (!ap_msg.message)
851 return -ENOMEM;
852 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
853 atomic_inc_return(&zcrypt_step);
854 ap_msg.private = &resp_type;
855 rng_type6CPRB_msgX(zdev->ap_dev, &ap_msg, ZCRYPT_RNG_BUFFER_SIZE);
856 init_completion(&resp_type.work);
857 ap_queue_message(zdev->ap_dev, &ap_msg);
858 rc = wait_for_completion_interruptible(&resp_type.work);
859 if (rc == 0)
860 rc = convert_response_rng(zdev, &ap_msg, buffer);
861 else
862 /* Signal pending. */
863 ap_cancel_message(zdev->ap_dev, &ap_msg);
864 kfree(ap_msg.message);
865 return rc;
866}
867
868/**
739 * The crypto operations for a PCIXCC/CEX2C card. 869 * The crypto operations for a PCIXCC/CEX2C card.
740 */ 870 */
741static struct zcrypt_ops zcrypt_pcixcc_ops = { 871static struct zcrypt_ops zcrypt_pcixcc_ops = {
@@ -744,6 +874,13 @@ static struct zcrypt_ops zcrypt_pcixcc_ops = {
744 .send_cprb = zcrypt_pcixcc_send_cprb, 874 .send_cprb = zcrypt_pcixcc_send_cprb,
745}; 875};
746 876
877static struct zcrypt_ops zcrypt_pcixcc_with_rng_ops = {
878 .rsa_modexpo = zcrypt_pcixcc_modexpo,
879 .rsa_modexpo_crt = zcrypt_pcixcc_modexpo_crt,
880 .send_cprb = zcrypt_pcixcc_send_cprb,
881 .rng = zcrypt_pcixcc_rng,
882};
883
747/** 884/**
748 * Micro-code detection function. Its sends a message to a pcixcc card 885 * Micro-code detection function. Its sends a message to a pcixcc card
749 * to find out the microcode level. 886 * to find out the microcode level.
@@ -859,6 +996,58 @@ out_free:
859} 996}
860 997
861/** 998/**
999 * Large random number detection function. Its sends a message to a pcixcc
1000 * card to find out if large random numbers are supported.
1001 * @ap_dev: pointer to the AP device.
1002 *
1003 * Returns 1 if large random numbers are supported, 0 if not and < 0 on error.
1004 */
1005static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev)
1006{
1007 struct ap_message ap_msg;
1008 unsigned long long psmid;
1009 struct {
1010 struct type86_hdr hdr;
1011 struct type86_fmt2_ext fmt2;
1012 struct CPRBX cprbx;
1013 } __attribute__((packed)) *reply;
1014 int rc, i;
1015
1016 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
1017 if (!ap_msg.message)
1018 return -ENOMEM;
1019
1020 rng_type6CPRB_msgX(ap_dev, &ap_msg, 4);
1021 rc = ap_send(ap_dev->qid, 0x0102030405060708ULL, ap_msg.message,
1022 ap_msg.length);
1023 if (rc)
1024 goto out_free;
1025
1026 /* Wait for the test message to complete. */
1027 for (i = 0; i < 2 * HZ; i++) {
1028 msleep(1000 / HZ);
1029 rc = ap_recv(ap_dev->qid, &psmid, ap_msg.message, 4096);
1030 if (rc == 0 && psmid == 0x0102030405060708ULL)
1031 break;
1032 }
1033
1034 if (i >= 2 * HZ) {
1035 /* Got no answer. */
1036 rc = -ENODEV;
1037 goto out_free;
1038 }
1039
1040 reply = ap_msg.message;
1041 if (reply->cprbx.ccp_rtcode == 0 && reply->cprbx.ccp_rscode == 0)
1042 rc = 1;
1043 else
1044 rc = 0;
1045out_free:
1046 free_page((unsigned long) ap_msg.message);
1047 return rc;
1048}
1049
1050/**
862 * Probe function for PCIXCC/CEX2C cards. It always accepts the AP device 1051 * Probe function for PCIXCC/CEX2C cards. It always accepts the AP device
863 * since the bus_match already checked the hardware type. The PCIXCC 1052 * since the bus_match already checked the hardware type. The PCIXCC
864 * cards come in two flavours: micro code level 2 and micro code level 3. 1053 * cards come in two flavours: micro code level 2 and micro code level 3.
@@ -874,7 +1063,6 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
874 if (!zdev) 1063 if (!zdev)
875 return -ENOMEM; 1064 return -ENOMEM;
876 zdev->ap_dev = ap_dev; 1065 zdev->ap_dev = ap_dev;
877 zdev->ops = &zcrypt_pcixcc_ops;
878 zdev->online = 1; 1066 zdev->online = 1;
879 if (ap_dev->device_type == AP_DEVICE_TYPE_PCIXCC) { 1067 if (ap_dev->device_type == AP_DEVICE_TYPE_PCIXCC) {
880 rc = zcrypt_pcixcc_mcl(ap_dev); 1068 rc = zcrypt_pcixcc_mcl(ap_dev);
@@ -901,6 +1089,15 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
901 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE; 1089 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
902 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE; 1090 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
903 } 1091 }
1092 rc = zcrypt_pcixcc_rng_supported(ap_dev);
1093 if (rc < 0) {
1094 zcrypt_device_free(zdev);
1095 return rc;
1096 }
1097 if (rc)
1098 zdev->ops = &zcrypt_pcixcc_with_rng_ops;
1099 else
1100 zdev->ops = &zcrypt_pcixcc_ops;
904 ap_dev->reply = &zdev->reply; 1101 ap_dev->reply = &zdev->reply;
905 ap_dev->private = zdev; 1102 ap_dev->private = zdev;
906 rc = zcrypt_device_register(zdev); 1103 rc = zcrypt_device_register(zdev);
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index d8a5c229c5a7..04a1d7bf678c 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -299,7 +299,7 @@ claw_probe(struct ccwgroup_device *cgdev)
299 struct claw_privbk *privptr=NULL; 299 struct claw_privbk *privptr=NULL;
300 300
301#ifdef FUNCTRACE 301#ifdef FUNCTRACE
302 printk(KERN_INFO "%s Enter\n",__FUNCTION__); 302 printk(KERN_INFO "%s Enter\n",__func__);
303#endif 303#endif
304 CLAW_DBF_TEXT(2,setup,"probe"); 304 CLAW_DBF_TEXT(2,setup,"probe");
305 if (!get_device(&cgdev->dev)) 305 if (!get_device(&cgdev->dev))
@@ -313,7 +313,7 @@ claw_probe(struct ccwgroup_device *cgdev)
313 probe_error(cgdev); 313 probe_error(cgdev);
314 put_device(&cgdev->dev); 314 put_device(&cgdev->dev);
315 printk(KERN_WARNING "Out of memory %s %s Exit Line %d \n", 315 printk(KERN_WARNING "Out of memory %s %s Exit Line %d \n",
316 cgdev->cdev[0]->dev.bus_id,__FUNCTION__,__LINE__); 316 cgdev->cdev[0]->dev.bus_id,__func__,__LINE__);
317 CLAW_DBF_TEXT_(2,setup,"probex%d",-ENOMEM); 317 CLAW_DBF_TEXT_(2,setup,"probex%d",-ENOMEM);
318 return -ENOMEM; 318 return -ENOMEM;
319 } 319 }
@@ -323,7 +323,7 @@ claw_probe(struct ccwgroup_device *cgdev)
323 probe_error(cgdev); 323 probe_error(cgdev);
324 put_device(&cgdev->dev); 324 put_device(&cgdev->dev);
325 printk(KERN_WARNING "Out of memory %s %s Exit Line %d \n", 325 printk(KERN_WARNING "Out of memory %s %s Exit Line %d \n",
326 cgdev->cdev[0]->dev.bus_id,__FUNCTION__,__LINE__); 326 cgdev->cdev[0]->dev.bus_id,__func__,__LINE__);
327 CLAW_DBF_TEXT_(2,setup,"probex%d",-ENOMEM); 327 CLAW_DBF_TEXT_(2,setup,"probex%d",-ENOMEM);
328 return -ENOMEM; 328 return -ENOMEM;
329 } 329 }
@@ -340,7 +340,7 @@ claw_probe(struct ccwgroup_device *cgdev)
340 probe_error(cgdev); 340 probe_error(cgdev);
341 put_device(&cgdev->dev); 341 put_device(&cgdev->dev);
342 printk(KERN_WARNING "add_files failed %s %s Exit Line %d \n", 342 printk(KERN_WARNING "add_files failed %s %s Exit Line %d \n",
343 cgdev->cdev[0]->dev.bus_id,__FUNCTION__,__LINE__); 343 cgdev->cdev[0]->dev.bus_id,__func__,__LINE__);
344 CLAW_DBF_TEXT_(2,setup,"probex%d",rc); 344 CLAW_DBF_TEXT_(2,setup,"probex%d",rc);
345 return rc; 345 return rc;
346 } 346 }
@@ -351,7 +351,7 @@ claw_probe(struct ccwgroup_device *cgdev)
351 cgdev->dev.driver_data = privptr; 351 cgdev->dev.driver_data = privptr;
352#ifdef FUNCTRACE 352#ifdef FUNCTRACE
353 printk(KERN_INFO "claw:%s exit on line %d, " 353 printk(KERN_INFO "claw:%s exit on line %d, "
354 "rc = 0\n",__FUNCTION__,__LINE__); 354 "rc = 0\n",__func__,__LINE__);
355#endif 355#endif
356 CLAW_DBF_TEXT(2,setup,"prbext 0"); 356 CLAW_DBF_TEXT(2,setup,"prbext 0");
357 357
@@ -371,7 +371,7 @@ claw_tx(struct sk_buff *skb, struct net_device *dev)
371 struct chbk *p_ch; 371 struct chbk *p_ch;
372 372
373#ifdef FUNCTRACE 373#ifdef FUNCTRACE
374 printk(KERN_INFO "%s:%s enter\n",dev->name,__FUNCTION__); 374 printk(KERN_INFO "%s:%s enter\n",dev->name,__func__);
375#endif 375#endif
376 CLAW_DBF_TEXT(4,trace,"claw_tx"); 376 CLAW_DBF_TEXT(4,trace,"claw_tx");
377 p_ch=&privptr->channel[WRITE]; 377 p_ch=&privptr->channel[WRITE];
@@ -381,7 +381,7 @@ claw_tx(struct sk_buff *skb, struct net_device *dev)
381 privptr->stats.tx_dropped++; 381 privptr->stats.tx_dropped++;
382#ifdef FUNCTRACE 382#ifdef FUNCTRACE
383 printk(KERN_INFO "%s: %s() exit on line %d, rc = EIO\n", 383 printk(KERN_INFO "%s: %s() exit on line %d, rc = EIO\n",
384 dev->name,__FUNCTION__, __LINE__); 384 dev->name,__func__, __LINE__);
385#endif 385#endif
386 CLAW_DBF_TEXT_(2,trace,"clawtx%d",-EIO); 386 CLAW_DBF_TEXT_(2,trace,"clawtx%d",-EIO);
387 return -EIO; 387 return -EIO;
@@ -398,7 +398,7 @@ claw_tx(struct sk_buff *skb, struct net_device *dev)
398 spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags); 398 spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
399#ifdef FUNCTRACE 399#ifdef FUNCTRACE
400 printk(KERN_INFO "%s:%s exit on line %d, rc = %d\n", 400 printk(KERN_INFO "%s:%s exit on line %d, rc = %d\n",
401 dev->name, __FUNCTION__, __LINE__, rc); 401 dev->name, __func__, __LINE__, rc);
402#endif 402#endif
403 CLAW_DBF_TEXT_(4,trace,"clawtx%d",rc); 403 CLAW_DBF_TEXT_(4,trace,"clawtx%d",rc);
404 return rc; 404 return rc;
@@ -460,7 +460,7 @@ claw_pack_skb(struct claw_privbk *privptr)
460#ifdef IOTRACE 460#ifdef IOTRACE
461 printk(KERN_INFO "%s: %s() Packed %d len %d\n", 461 printk(KERN_INFO "%s: %s() Packed %d len %d\n",
462 p_env->ndev->name, 462 p_env->ndev->name,
463 __FUNCTION__,pkt_cnt,new_skb->len); 463 __func__,pkt_cnt,new_skb->len);
464#endif 464#endif
465 } 465 }
466 CLAW_DBF_TEXT(4,trace,"PackSKBx"); 466 CLAW_DBF_TEXT(4,trace,"PackSKBx");
@@ -478,7 +478,7 @@ claw_change_mtu(struct net_device *dev, int new_mtu)
478 struct claw_privbk *privptr=dev->priv; 478 struct claw_privbk *privptr=dev->priv;
479 int buff_size; 479 int buff_size;
480#ifdef FUNCTRACE 480#ifdef FUNCTRACE
481 printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__); 481 printk(KERN_INFO "%s:%s Enter \n",dev->name,__func__);
482#endif 482#endif
483#ifdef DEBUGMSG 483#ifdef DEBUGMSG
484 printk(KERN_INFO "variable dev =\n"); 484 printk(KERN_INFO "variable dev =\n");
@@ -491,14 +491,14 @@ claw_change_mtu(struct net_device *dev, int new_mtu)
491#ifdef FUNCTRACE 491#ifdef FUNCTRACE
492 printk(KERN_INFO "%s:%s Exit on line %d, rc=EINVAL\n", 492 printk(KERN_INFO "%s:%s Exit on line %d, rc=EINVAL\n",
493 dev->name, 493 dev->name,
494 __FUNCTION__, __LINE__); 494 __func__, __LINE__);
495#endif 495#endif
496 return -EINVAL; 496 return -EINVAL;
497 } 497 }
498 dev->mtu = new_mtu; 498 dev->mtu = new_mtu;
499#ifdef FUNCTRACE 499#ifdef FUNCTRACE
500 printk(KERN_INFO "%s:%s Exit on line %d\n",dev->name, 500 printk(KERN_INFO "%s:%s Exit on line %d\n",dev->name,
501 __FUNCTION__, __LINE__); 501 __func__, __LINE__);
502#endif 502#endif
503 return 0; 503 return 0;
504} /* end of claw_change_mtu */ 504} /* end of claw_change_mtu */
@@ -522,7 +522,7 @@ claw_open(struct net_device *dev)
522 struct ccwbk *p_buf; 522 struct ccwbk *p_buf;
523 523
524#ifdef FUNCTRACE 524#ifdef FUNCTRACE
525 printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__); 525 printk(KERN_INFO "%s:%s Enter \n",dev->name,__func__);
526#endif 526#endif
527 CLAW_DBF_TEXT(4,trace,"open"); 527 CLAW_DBF_TEXT(4,trace,"open");
528 if (!dev || (dev->name[0] == 0x00)) { 528 if (!dev || (dev->name[0] == 0x00)) {
@@ -537,7 +537,7 @@ claw_open(struct net_device *dev)
537 if (rc) { 537 if (rc) {
538 printk(KERN_INFO "%s:%s Exit on line %d, rc=ENOMEM\n", 538 printk(KERN_INFO "%s:%s Exit on line %d, rc=ENOMEM\n",
539 dev->name, 539 dev->name,
540 __FUNCTION__, __LINE__); 540 __func__, __LINE__);
541 CLAW_DBF_TEXT(2,trace,"openmem"); 541 CLAW_DBF_TEXT(2,trace,"openmem");
542 return -ENOMEM; 542 return -ENOMEM;
543 } 543 }
@@ -661,7 +661,7 @@ claw_open(struct net_device *dev)
661 claw_clear_busy(dev); 661 claw_clear_busy(dev);
662#ifdef FUNCTRACE 662#ifdef FUNCTRACE
663 printk(KERN_INFO "%s:%s Exit on line %d, rc=EIO\n", 663 printk(KERN_INFO "%s:%s Exit on line %d, rc=EIO\n",
664 dev->name,__FUNCTION__,__LINE__); 664 dev->name,__func__,__LINE__);
665#endif 665#endif
666 CLAW_DBF_TEXT(2,trace,"open EIO"); 666 CLAW_DBF_TEXT(2,trace,"open EIO");
667 return -EIO; 667 return -EIO;
@@ -673,7 +673,7 @@ claw_open(struct net_device *dev)
673 673
674#ifdef FUNCTRACE 674#ifdef FUNCTRACE
675 printk(KERN_INFO "%s:%s Exit on line %d, rc=0\n", 675 printk(KERN_INFO "%s:%s Exit on line %d, rc=0\n",
676 dev->name,__FUNCTION__,__LINE__); 676 dev->name,__func__,__LINE__);
677#endif 677#endif
678 CLAW_DBF_TEXT(4,trace,"openok"); 678 CLAW_DBF_TEXT(4,trace,"openok");
679 return 0; 679 return 0;
@@ -696,7 +696,7 @@ claw_irq_handler(struct ccw_device *cdev,
696 696
697 697
698#ifdef FUNCTRACE 698#ifdef FUNCTRACE
699 printk(KERN_INFO "%s enter \n",__FUNCTION__); 699 printk(KERN_INFO "%s enter \n",__func__);
700#endif 700#endif
701 CLAW_DBF_TEXT(4,trace,"clawirq"); 701 CLAW_DBF_TEXT(4,trace,"clawirq");
702 /* Bypass all 'unsolicited interrupts' */ 702 /* Bypass all 'unsolicited interrupts' */
@@ -706,7 +706,7 @@ claw_irq_handler(struct ccw_device *cdev,
706 cdev->dev.bus_id,irb->scsw.cstat, irb->scsw.dstat); 706 cdev->dev.bus_id,irb->scsw.cstat, irb->scsw.dstat);
707#ifdef FUNCTRACE 707#ifdef FUNCTRACE
708 printk(KERN_INFO "claw: %s() " 708 printk(KERN_INFO "claw: %s() "
709 "exit on line %d\n",__FUNCTION__,__LINE__); 709 "exit on line %d\n",__func__,__LINE__);
710#endif 710#endif
711 CLAW_DBF_TEXT(2,trace,"badirq"); 711 CLAW_DBF_TEXT(2,trace,"badirq");
712 return; 712 return;
@@ -752,7 +752,7 @@ claw_irq_handler(struct ccw_device *cdev,
752#endif 752#endif
753#ifdef FUNCTRACE 753#ifdef FUNCTRACE
754 printk(KERN_INFO "%s:%s Exit on line %d\n", 754 printk(KERN_INFO "%s:%s Exit on line %d\n",
755 dev->name,__FUNCTION__,__LINE__); 755 dev->name,__func__,__LINE__);
756#endif 756#endif
757 CLAW_DBF_TEXT(2,trace,"chanchk"); 757 CLAW_DBF_TEXT(2,trace,"chanchk");
758 /* return; */ 758 /* return; */
@@ -777,7 +777,7 @@ claw_irq_handler(struct ccw_device *cdev,
777 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { 777 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
778#ifdef FUNCTRACE 778#ifdef FUNCTRACE
779 printk(KERN_INFO "%s:%s Exit on line %d\n", 779 printk(KERN_INFO "%s:%s Exit on line %d\n",
780 dev->name,__FUNCTION__,__LINE__); 780 dev->name,__func__,__LINE__);
781#endif 781#endif
782 return; 782 return;
783 } 783 }
@@ -788,7 +788,7 @@ claw_irq_handler(struct ccw_device *cdev,
788#endif 788#endif
789#ifdef FUNCTRACE 789#ifdef FUNCTRACE
790 printk(KERN_INFO "%s:%s Exit on line %d\n", 790 printk(KERN_INFO "%s:%s Exit on line %d\n",
791 dev->name,__FUNCTION__,__LINE__); 791 dev->name,__func__,__LINE__);
792#endif 792#endif
793 CLAW_DBF_TEXT(4,trace,"stop"); 793 CLAW_DBF_TEXT(4,trace,"stop");
794 return; 794 return;
@@ -804,7 +804,7 @@ claw_irq_handler(struct ccw_device *cdev,
804 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { 804 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
805#ifdef FUNCTRACE 805#ifdef FUNCTRACE
806 printk(KERN_INFO "%s:%s Exit on line %d\n", 806 printk(KERN_INFO "%s:%s Exit on line %d\n",
807 dev->name,__FUNCTION__,__LINE__); 807 dev->name,__func__,__LINE__);
808#endif 808#endif
809 CLAW_DBF_TEXT(4,trace,"haltio"); 809 CLAW_DBF_TEXT(4,trace,"haltio");
810 return; 810 return;
@@ -838,7 +838,7 @@ claw_irq_handler(struct ccw_device *cdev,
838#endif 838#endif
839#ifdef FUNCTRACE 839#ifdef FUNCTRACE
840 printk(KERN_INFO "%s:%s Exit on line %d\n", 840 printk(KERN_INFO "%s:%s Exit on line %d\n",
841 dev->name,__FUNCTION__,__LINE__); 841 dev->name,__func__,__LINE__);
842#endif 842#endif
843 CLAW_DBF_TEXT(4,trace,"haltio"); 843 CLAW_DBF_TEXT(4,trace,"haltio");
844 return; 844 return;
@@ -858,7 +858,7 @@ claw_irq_handler(struct ccw_device *cdev,
858 } 858 }
859#ifdef FUNCTRACE 859#ifdef FUNCTRACE
860 printk(KERN_INFO "%s:%s Exit on line %d\n", 860 printk(KERN_INFO "%s:%s Exit on line %d\n",
861 dev->name,__FUNCTION__,__LINE__); 861 dev->name,__func__,__LINE__);
862#endif 862#endif
863 CLAW_DBF_TEXT(4,trace,"notrdy"); 863 CLAW_DBF_TEXT(4,trace,"notrdy");
864 return; 864 return;
@@ -874,7 +874,7 @@ claw_irq_handler(struct ccw_device *cdev,
874 } 874 }
875#ifdef FUNCTRACE 875#ifdef FUNCTRACE
876 printk(KERN_INFO "%s:%s Exit on line %d\n", 876 printk(KERN_INFO "%s:%s Exit on line %d\n",
877 dev->name,__FUNCTION__,__LINE__); 877 dev->name,__func__,__LINE__);
878#endif 878#endif
879 CLAW_DBF_TEXT(4,trace,"PCI_read"); 879 CLAW_DBF_TEXT(4,trace,"PCI_read");
880 return; 880 return;
@@ -885,7 +885,7 @@ claw_irq_handler(struct ccw_device *cdev,
885 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { 885 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
886#ifdef FUNCTRACE 886#ifdef FUNCTRACE
887 printk(KERN_INFO "%s:%s Exit on line %d\n", 887 printk(KERN_INFO "%s:%s Exit on line %d\n",
888 dev->name,__FUNCTION__,__LINE__); 888 dev->name,__func__,__LINE__);
889#endif 889#endif
890 CLAW_DBF_TEXT(4,trace,"SPend_rd"); 890 CLAW_DBF_TEXT(4,trace,"SPend_rd");
891 return; 891 return;
@@ -906,7 +906,7 @@ claw_irq_handler(struct ccw_device *cdev,
906#endif 906#endif
907#ifdef FUNCTRACE 907#ifdef FUNCTRACE
908 printk(KERN_INFO "%s:%s Exit on line %d\n", 908 printk(KERN_INFO "%s:%s Exit on line %d\n",
909 dev->name,__FUNCTION__,__LINE__); 909 dev->name,__func__,__LINE__);
910#endif 910#endif
911 CLAW_DBF_TEXT(4,trace,"RdIRQXit"); 911 CLAW_DBF_TEXT(4,trace,"RdIRQXit");
912 return; 912 return;
@@ -929,7 +929,7 @@ claw_irq_handler(struct ccw_device *cdev,
929 } 929 }
930#ifdef FUNCTRACE 930#ifdef FUNCTRACE
931 printk(KERN_INFO "%s:%s Exit on line %d\n", 931 printk(KERN_INFO "%s:%s Exit on line %d\n",
932 dev->name,__FUNCTION__,__LINE__); 932 dev->name,__func__,__LINE__);
933#endif 933#endif
934 CLAW_DBF_TEXT(4,trace,"rstrtwrt"); 934 CLAW_DBF_TEXT(4,trace,"rstrtwrt");
935 return; 935 return;
@@ -946,7 +946,7 @@ claw_irq_handler(struct ccw_device *cdev,
946 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { 946 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
947#ifdef FUNCTRACE 947#ifdef FUNCTRACE
948 printk(KERN_INFO "%s:%s Exit on line %d\n", 948 printk(KERN_INFO "%s:%s Exit on line %d\n",
949 dev->name,__FUNCTION__,__LINE__); 949 dev->name,__func__,__LINE__);
950#endif 950#endif
951 CLAW_DBF_TEXT(4,trace,"writeUE"); 951 CLAW_DBF_TEXT(4,trace,"writeUE");
952 return; 952 return;
@@ -969,7 +969,7 @@ claw_irq_handler(struct ccw_device *cdev,
969#endif 969#endif
970#ifdef FUNCTRACE 970#ifdef FUNCTRACE
971 printk(KERN_INFO "%s:%s Exit on line %d\n", 971 printk(KERN_INFO "%s:%s Exit on line %d\n",
972 dev->name,__FUNCTION__,__LINE__); 972 dev->name,__func__,__LINE__);
973#endif 973#endif
974 CLAW_DBF_TEXT(4,trace,"StWtExit"); 974 CLAW_DBF_TEXT(4,trace,"StWtExit");
975 return; 975 return;
@@ -978,7 +978,7 @@ claw_irq_handler(struct ccw_device *cdev,
978 "state=%d\n",dev->name,p_ch->claw_state); 978 "state=%d\n",dev->name,p_ch->claw_state);
979#ifdef FUNCTRACE 979#ifdef FUNCTRACE
980 printk(KERN_INFO "%s:%s Exit on line %d\n", 980 printk(KERN_INFO "%s:%s Exit on line %d\n",
981 dev->name,__FUNCTION__,__LINE__); 981 dev->name,__func__,__LINE__);
982#endif 982#endif
983 CLAW_DBF_TEXT(2,trace,"badIRQ"); 983 CLAW_DBF_TEXT(2,trace,"badIRQ");
984 return; 984 return;
@@ -1001,7 +1001,7 @@ claw_irq_tasklet ( unsigned long data )
1001 p_ch = (struct chbk *) data; 1001 p_ch = (struct chbk *) data;
1002 dev = (struct net_device *)p_ch->ndev; 1002 dev = (struct net_device *)p_ch->ndev;
1003#ifdef FUNCTRACE 1003#ifdef FUNCTRACE
1004 printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__); 1004 printk(KERN_INFO "%s:%s Enter \n",dev->name,__func__);
1005#endif 1005#endif
1006#ifdef DEBUGMSG 1006#ifdef DEBUGMSG
1007 printk(KERN_INFO "%s: variable p_ch =\n",dev->name); 1007 printk(KERN_INFO "%s: variable p_ch =\n",dev->name);
@@ -1021,7 +1021,7 @@ claw_irq_tasklet ( unsigned long data )
1021 CLAW_DBF_TEXT(4,trace,"TskletXt"); 1021 CLAW_DBF_TEXT(4,trace,"TskletXt");
1022#ifdef FUNCTRACE 1022#ifdef FUNCTRACE
1023 printk(KERN_INFO "%s:%s Exit on line %d\n", 1023 printk(KERN_INFO "%s:%s Exit on line %d\n",
1024 dev->name,__FUNCTION__,__LINE__); 1024 dev->name,__func__,__LINE__);
1025#endif 1025#endif
1026 return; 1026 return;
1027} /* end of claw_irq_bh */ 1027} /* end of claw_irq_bh */
@@ -1048,7 +1048,7 @@ claw_release(struct net_device *dev)
1048 if (!privptr) 1048 if (!privptr)
1049 return 0; 1049 return 0;
1050#ifdef FUNCTRACE 1050#ifdef FUNCTRACE
1051 printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__); 1051 printk(KERN_INFO "%s:%s Enter \n",dev->name,__func__);
1052#endif 1052#endif
1053 CLAW_DBF_TEXT(4,trace,"release"); 1053 CLAW_DBF_TEXT(4,trace,"release");
1054#ifdef DEBUGMSG 1054#ifdef DEBUGMSG
@@ -1090,7 +1090,7 @@ claw_release(struct net_device *dev)
1090 if(privptr->buffs_alloc != 1) { 1090 if(privptr->buffs_alloc != 1) {
1091#ifdef FUNCTRACE 1091#ifdef FUNCTRACE
1092 printk(KERN_INFO "%s:%s Exit on line %d\n", 1092 printk(KERN_INFO "%s:%s Exit on line %d\n",
1093 dev->name,__FUNCTION__,__LINE__); 1093 dev->name,__func__,__LINE__);
1094#endif 1094#endif
1095 CLAW_DBF_TEXT(4,trace,"none2fre"); 1095 CLAW_DBF_TEXT(4,trace,"none2fre");
1096 return 0; 1096 return 0;
@@ -1171,7 +1171,7 @@ claw_release(struct net_device *dev)
1171 } 1171 }
1172#ifdef FUNCTRACE 1172#ifdef FUNCTRACE
1173 printk(KERN_INFO "%s:%s Exit on line %d\n", 1173 printk(KERN_INFO "%s:%s Exit on line %d\n",
1174 dev->name,__FUNCTION__,__LINE__); 1174 dev->name,__func__,__LINE__);
1175#endif 1175#endif
1176 CLAW_DBF_TEXT(4,trace,"rlsexit"); 1176 CLAW_DBF_TEXT(4,trace,"rlsexit");
1177 return 0; 1177 return 0;
@@ -1192,7 +1192,7 @@ claw_write_retry ( struct chbk *p_ch )
1192 1192
1193 1193
1194#ifdef FUNCTRACE 1194#ifdef FUNCTRACE
1195 printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__); 1195 printk(KERN_INFO "%s:%s Enter\n",dev->name,__func__);
1196 printk(KERN_INFO "claw: variable p_ch =\n"); 1196 printk(KERN_INFO "claw: variable p_ch =\n");
1197 dumpit((char *) p_ch, sizeof(struct chbk)); 1197 dumpit((char *) p_ch, sizeof(struct chbk));
1198#endif 1198#endif
@@ -1200,20 +1200,20 @@ claw_write_retry ( struct chbk *p_ch )
1200 if (p_ch->claw_state == CLAW_STOP) { 1200 if (p_ch->claw_state == CLAW_STOP) {
1201#ifdef FUNCTRACE 1201#ifdef FUNCTRACE
1202 printk(KERN_INFO "%s:%s Exit on line %d\n", 1202 printk(KERN_INFO "%s:%s Exit on line %d\n",
1203 dev->name,__FUNCTION__,__LINE__); 1203 dev->name,__func__,__LINE__);
1204#endif 1204#endif
1205 return; 1205 return;
1206 } 1206 }
1207#ifdef DEBUGMSG 1207#ifdef DEBUGMSG
1208 printk( KERN_INFO "%s:%s state-%02x\n" , 1208 printk( KERN_INFO "%s:%s state-%02x\n" ,
1209 dev->name, 1209 dev->name,
1210 __FUNCTION__, 1210 __func__,
1211 p_ch->claw_state); 1211 p_ch->claw_state);
1212#endif 1212#endif
1213 claw_strt_out_IO( dev ); 1213 claw_strt_out_IO( dev );
1214#ifdef FUNCTRACE 1214#ifdef FUNCTRACE
1215 printk(KERN_INFO "%s:%s Exit on line %d\n", 1215 printk(KERN_INFO "%s:%s Exit on line %d\n",
1216 dev->name,__FUNCTION__,__LINE__); 1216 dev->name,__func__,__LINE__);
1217#endif 1217#endif
1218 CLAW_DBF_TEXT(4,trace,"rtry_xit"); 1218 CLAW_DBF_TEXT(4,trace,"rtry_xit");
1219 return; 1219 return;
@@ -1235,7 +1235,7 @@ claw_write_next ( struct chbk * p_ch )
1235 int rc; 1235 int rc;
1236 1236
1237#ifdef FUNCTRACE 1237#ifdef FUNCTRACE
1238 printk(KERN_INFO "%s:%s Enter \n",p_ch->ndev->name,__FUNCTION__); 1238 printk(KERN_INFO "%s:%s Enter \n",p_ch->ndev->name,__func__);
1239 printk(KERN_INFO "%s: variable p_ch =\n",p_ch->ndev->name); 1239 printk(KERN_INFO "%s: variable p_ch =\n",p_ch->ndev->name);
1240 dumpit((char *) p_ch, sizeof(struct chbk)); 1240 dumpit((char *) p_ch, sizeof(struct chbk));
1241#endif 1241#endif
@@ -1262,7 +1262,7 @@ claw_write_next ( struct chbk * p_ch )
1262 1262
1263#ifdef FUNCTRACE 1263#ifdef FUNCTRACE
1264 printk(KERN_INFO "%s:%s Exit on line %d\n", 1264 printk(KERN_INFO "%s:%s Exit on line %d\n",
1265 dev->name,__FUNCTION__,__LINE__); 1265 dev->name,__func__,__LINE__);
1266#endif 1266#endif
1267 return; 1267 return;
1268} /* end of claw_write_next */ 1268} /* end of claw_write_next */
@@ -1276,7 +1276,7 @@ static void
1276claw_timer ( struct chbk * p_ch ) 1276claw_timer ( struct chbk * p_ch )
1277{ 1277{
1278#ifdef FUNCTRACE 1278#ifdef FUNCTRACE
1279 printk(KERN_INFO "%s:%s Entry\n",p_ch->ndev->name,__FUNCTION__); 1279 printk(KERN_INFO "%s:%s Entry\n",p_ch->ndev->name,__func__);
1280 printk(KERN_INFO "%s: variable p_ch =\n",p_ch->ndev->name); 1280 printk(KERN_INFO "%s: variable p_ch =\n",p_ch->ndev->name);
1281 dumpit((char *) p_ch, sizeof(struct chbk)); 1281 dumpit((char *) p_ch, sizeof(struct chbk));
1282#endif 1282#endif
@@ -1285,7 +1285,7 @@ claw_timer ( struct chbk * p_ch )
1285 wake_up(&p_ch->wait); 1285 wake_up(&p_ch->wait);
1286#ifdef FUNCTRACE 1286#ifdef FUNCTRACE
1287 printk(KERN_INFO "%s:%s Exit on line %d\n", 1287 printk(KERN_INFO "%s:%s Exit on line %d\n",
1288 p_ch->ndev->name,__FUNCTION__,__LINE__); 1288 p_ch->ndev->name,__func__,__LINE__);
1289#endif 1289#endif
1290 return; 1290 return;
1291} /* end of claw_timer */ 1291} /* end of claw_timer */
@@ -1312,7 +1312,7 @@ pages_to_order_of_mag(int num_of_pages)
1312 int order_of_mag=1; /* assume 2 pages */ 1312 int order_of_mag=1; /* assume 2 pages */
1313 int nump=2; 1313 int nump=2;
1314#ifdef FUNCTRACE 1314#ifdef FUNCTRACE
1315 printk(KERN_INFO "%s Enter pages = %d \n",__FUNCTION__,num_of_pages); 1315 printk(KERN_INFO "%s Enter pages = %d \n",__func__,num_of_pages);
1316#endif 1316#endif
1317 CLAW_DBF_TEXT_(5,trace,"pages%d",num_of_pages); 1317 CLAW_DBF_TEXT_(5,trace,"pages%d",num_of_pages);
1318 if (num_of_pages == 1) {return 0; } /* magnitude of 0 = 1 page */ 1318 if (num_of_pages == 1) {return 0; } /* magnitude of 0 = 1 page */
@@ -1327,7 +1327,7 @@ pages_to_order_of_mag(int num_of_pages)
1327 if (order_of_mag > 9) { order_of_mag = 9; } /* I know it's paranoid */ 1327 if (order_of_mag > 9) { order_of_mag = 9; } /* I know it's paranoid */
1328#ifdef FUNCTRACE 1328#ifdef FUNCTRACE
1329 printk(KERN_INFO "%s Exit on line %d, order = %d\n", 1329 printk(KERN_INFO "%s Exit on line %d, order = %d\n",
1330 __FUNCTION__,__LINE__, order_of_mag); 1330 __func__,__LINE__, order_of_mag);
1331#endif 1331#endif
1332 CLAW_DBF_TEXT_(5,trace,"mag%d",order_of_mag); 1332 CLAW_DBF_TEXT_(5,trace,"mag%d",order_of_mag);
1333 return order_of_mag; 1333 return order_of_mag;
@@ -1349,7 +1349,7 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
1349 struct ccwbk* p_buf; 1349 struct ccwbk* p_buf;
1350#endif 1350#endif
1351#ifdef FUNCTRACE 1351#ifdef FUNCTRACE
1352 printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__); 1352 printk(KERN_INFO "%s:%s Enter \n",dev->name,__func__);
1353#endif 1353#endif
1354#ifdef DEBUGMSG 1354#ifdef DEBUGMSG
1355 printk(KERN_INFO "dev\n"); 1355 printk(KERN_INFO "dev\n");
@@ -1369,7 +1369,7 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
1369 if ( p_first==NULL) { 1369 if ( p_first==NULL) {
1370#ifdef FUNCTRACE 1370#ifdef FUNCTRACE
1371 printk(KERN_INFO "%s:%s Exit on line %d\n", 1371 printk(KERN_INFO "%s:%s Exit on line %d\n",
1372 dev->name,__FUNCTION__,__LINE__); 1372 dev->name,__func__,__LINE__);
1373#endif 1373#endif
1374 CLAW_DBF_TEXT(4,trace,"addexit"); 1374 CLAW_DBF_TEXT(4,trace,"addexit");
1375 return 0; 1375 return 0;
@@ -1400,9 +1400,9 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
1400 if ( privptr-> p_read_active_first ==NULL ) { 1400 if ( privptr-> p_read_active_first ==NULL ) {
1401#ifdef DEBUGMSG 1401#ifdef DEBUGMSG
1402 printk(KERN_INFO "%s:%s p_read_active_first == NULL \n", 1402 printk(KERN_INFO "%s:%s p_read_active_first == NULL \n",
1403 dev->name,__FUNCTION__); 1403 dev->name,__func__);
1404 printk(KERN_INFO "%s:%s Read active first/last changed \n", 1404 printk(KERN_INFO "%s:%s Read active first/last changed \n",
1405 dev->name,__FUNCTION__); 1405 dev->name,__func__);
1406#endif 1406#endif
1407 privptr-> p_read_active_first= p_first; /* set new first */ 1407 privptr-> p_read_active_first= p_first; /* set new first */
1408 privptr-> p_read_active_last = p_last; /* set new last */ 1408 privptr-> p_read_active_last = p_last; /* set new last */
@@ -1411,7 +1411,7 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
1411 1411
1412#ifdef DEBUGMSG 1412#ifdef DEBUGMSG
1413 printk(KERN_INFO "%s:%s Read in progress \n", 1413 printk(KERN_INFO "%s:%s Read in progress \n",
1414 dev->name,__FUNCTION__); 1414 dev->name,__func__);
1415#endif 1415#endif
1416 /* set up TIC ccw */ 1416 /* set up TIC ccw */
1417 temp_ccw.cda= (__u32)__pa(&p_first->read); 1417 temp_ccw.cda= (__u32)__pa(&p_first->read);
@@ -1450,15 +1450,15 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
1450 privptr->p_read_active_last=p_last; 1450 privptr->p_read_active_last=p_last;
1451 } /* end of if ( privptr-> p_read_active_first ==NULL) */ 1451 } /* end of if ( privptr-> p_read_active_first ==NULL) */
1452#ifdef IOTRACE 1452#ifdef IOTRACE
1453 printk(KERN_INFO "%s:%s dump p_last CCW BK \n",dev->name,__FUNCTION__); 1453 printk(KERN_INFO "%s:%s dump p_last CCW BK \n",dev->name,__func__);
1454 dumpit((char *)p_last, sizeof(struct ccwbk)); 1454 dumpit((char *)p_last, sizeof(struct ccwbk));
1455 printk(KERN_INFO "%s:%s dump p_end CCW BK \n",dev->name,__FUNCTION__); 1455 printk(KERN_INFO "%s:%s dump p_end CCW BK \n",dev->name,__func__);
1456 dumpit((char *)p_end, sizeof(struct endccw)); 1456 dumpit((char *)p_end, sizeof(struct endccw));
1457 1457
1458 printk(KERN_INFO "%s:%s dump p_first CCW BK \n",dev->name,__FUNCTION__); 1458 printk(KERN_INFO "%s:%s dump p_first CCW BK \n",dev->name,__func__);
1459 dumpit((char *)p_first, sizeof(struct ccwbk)); 1459 dumpit((char *)p_first, sizeof(struct ccwbk));
1460 printk(KERN_INFO "%s:%s Dump Active CCW chain \n", 1460 printk(KERN_INFO "%s:%s Dump Active CCW chain \n",
1461 dev->name,__FUNCTION__); 1461 dev->name,__func__);
1462 p_buf=privptr->p_read_active_first; 1462 p_buf=privptr->p_read_active_first;
1463 while (p_buf!=NULL) { 1463 while (p_buf!=NULL) {
1464 dumpit((char *)p_buf, sizeof(struct ccwbk)); 1464 dumpit((char *)p_buf, sizeof(struct ccwbk));
@@ -1467,7 +1467,7 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
1467#endif 1467#endif
1468#ifdef FUNCTRACE 1468#ifdef FUNCTRACE
1469 printk(KERN_INFO "%s:%s Exit on line %d\n", 1469 printk(KERN_INFO "%s:%s Exit on line %d\n",
1470 dev->name,__FUNCTION__,__LINE__); 1470 dev->name,__func__,__LINE__);
1471#endif 1471#endif
1472 CLAW_DBF_TEXT(4,trace,"addexit"); 1472 CLAW_DBF_TEXT(4,trace,"addexit");
1473 return 0; 1473 return 0;
@@ -1483,7 +1483,7 @@ ccw_check_return_code(struct ccw_device *cdev, int return_code)
1483{ 1483{
1484#ifdef FUNCTRACE 1484#ifdef FUNCTRACE
1485 printk(KERN_INFO "%s: %s() > enter \n", 1485 printk(KERN_INFO "%s: %s() > enter \n",
1486 cdev->dev.bus_id,__FUNCTION__); 1486 cdev->dev.bus_id,__func__);
1487#endif 1487#endif
1488 CLAW_DBF_TEXT(4,trace,"ccwret"); 1488 CLAW_DBF_TEXT(4,trace,"ccwret");
1489#ifdef DEBUGMSG 1489#ifdef DEBUGMSG
@@ -1516,7 +1516,7 @@ ccw_check_return_code(struct ccw_device *cdev, int return_code)
1516 } 1516 }
1517#ifdef FUNCTRACE 1517#ifdef FUNCTRACE
1518 printk(KERN_INFO "%s: %s() > exit on line %d\n", 1518 printk(KERN_INFO "%s: %s() > exit on line %d\n",
1519 cdev->dev.bus_id,__FUNCTION__,__LINE__); 1519 cdev->dev.bus_id,__func__,__LINE__);
1520#endif 1520#endif
1521 CLAW_DBF_TEXT(4,trace,"ccwret"); 1521 CLAW_DBF_TEXT(4,trace,"ccwret");
1522} /* end of ccw_check_return_code */ 1522} /* end of ccw_check_return_code */
@@ -1531,7 +1531,7 @@ ccw_check_unit_check(struct chbk * p_ch, unsigned char sense )
1531 struct net_device *dev = p_ch->ndev; 1531 struct net_device *dev = p_ch->ndev;
1532 1532
1533#ifdef FUNCTRACE 1533#ifdef FUNCTRACE
1534 printk(KERN_INFO "%s: %s() > enter\n",dev->name,__FUNCTION__); 1534 printk(KERN_INFO "%s: %s() > enter\n",dev->name,__func__);
1535#endif 1535#endif
1536#ifdef DEBUGMSG 1536#ifdef DEBUGMSG
1537 printk(KERN_INFO "%s: variable dev =\n",dev->name); 1537 printk(KERN_INFO "%s: variable dev =\n",dev->name);
@@ -1578,7 +1578,7 @@ ccw_check_unit_check(struct chbk * p_ch, unsigned char sense )
1578 1578
1579#ifdef FUNCTRACE 1579#ifdef FUNCTRACE
1580 printk(KERN_INFO "%s: %s() exit on line %d\n", 1580 printk(KERN_INFO "%s: %s() exit on line %d\n",
1581 dev->name,__FUNCTION__,__LINE__); 1581 dev->name,__func__,__LINE__);
1582#endif 1582#endif
1583} /* end of ccw_check_unit_check */ 1583} /* end of ccw_check_unit_check */
1584 1584
@@ -1706,7 +1706,7 @@ find_link(struct net_device *dev, char *host_name, char *ws_name )
1706 int rc=0; 1706 int rc=0;
1707 1707
1708#ifdef FUNCTRACE 1708#ifdef FUNCTRACE
1709 printk(KERN_INFO "%s:%s > enter \n",dev->name,__FUNCTION__); 1709 printk(KERN_INFO "%s:%s > enter \n",dev->name,__func__);
1710#endif 1710#endif
1711 CLAW_DBF_TEXT(2,setup,"findlink"); 1711 CLAW_DBF_TEXT(2,setup,"findlink");
1712#ifdef DEBUGMSG 1712#ifdef DEBUGMSG
@@ -1739,7 +1739,7 @@ find_link(struct net_device *dev, char *host_name, char *ws_name )
1739 1739
1740#ifdef FUNCTRACE 1740#ifdef FUNCTRACE
1741 printk(KERN_INFO "%s:%s Exit on line %d\n", 1741 printk(KERN_INFO "%s:%s Exit on line %d\n",
1742 dev->name,__FUNCTION__,__LINE__); 1742 dev->name,__func__,__LINE__);
1743#endif 1743#endif
1744 return 0; 1744 return 0;
1745} /* end of find_link */ 1745} /* end of find_link */
@@ -1773,7 +1773,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
1773 struct ccwbk *p_buf; 1773 struct ccwbk *p_buf;
1774#endif 1774#endif
1775#ifdef FUNCTRACE 1775#ifdef FUNCTRACE
1776 printk(KERN_INFO "%s: %s() > enter\n",dev->name,__FUNCTION__); 1776 printk(KERN_INFO "%s: %s() > enter\n",dev->name,__func__);
1777#endif 1777#endif
1778 CLAW_DBF_TEXT(4,trace,"hw_tx"); 1778 CLAW_DBF_TEXT(4,trace,"hw_tx");
1779#ifdef DEBUGMSG 1779#ifdef DEBUGMSG
@@ -1787,7 +1787,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
1787 p_ch=(struct chbk *)&privptr->channel[WRITE]; 1787 p_ch=(struct chbk *)&privptr->channel[WRITE];
1788 p_env =privptr->p_env; 1788 p_env =privptr->p_env;
1789#ifdef IOTRACE 1789#ifdef IOTRACE
1790 printk(KERN_INFO "%s: %s() dump sk_buff \n",dev->name,__FUNCTION__); 1790 printk(KERN_INFO "%s: %s() dump sk_buff \n",dev->name,__func__);
1791 dumpit((char *)skb ,sizeof(struct sk_buff)); 1791 dumpit((char *)skb ,sizeof(struct sk_buff));
1792#endif 1792#endif
1793 claw_free_wrt_buf(dev); /* Clean up free chain if posible */ 1793 claw_free_wrt_buf(dev); /* Clean up free chain if posible */
@@ -1877,7 +1877,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
1877 while (len_of_data > 0) { 1877 while (len_of_data > 0) {
1878#ifdef DEBUGMSG 1878#ifdef DEBUGMSG
1879 printk(KERN_INFO "%s: %s() length-of-data is %ld \n", 1879 printk(KERN_INFO "%s: %s() length-of-data is %ld \n",
1880 dev->name ,__FUNCTION__,len_of_data); 1880 dev->name ,__func__,len_of_data);
1881 dumpit((char *)pDataAddress ,64); 1881 dumpit((char *)pDataAddress ,64);
1882#endif 1882#endif
1883 p_this_ccw=privptr->p_write_free_chain; /* get a block */ 1883 p_this_ccw=privptr->p_write_free_chain; /* get a block */
@@ -1913,7 +1913,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
1913 p_last_ccw=p_this_ccw; /* save new last block */ 1913 p_last_ccw=p_this_ccw; /* save new last block */
1914#ifdef IOTRACE 1914#ifdef IOTRACE
1915 printk(KERN_INFO "%s: %s() > CCW and Buffer %ld bytes long \n", 1915 printk(KERN_INFO "%s: %s() > CCW and Buffer %ld bytes long \n",
1916 dev->name,__FUNCTION__,bytesInThisBuffer); 1916 dev->name,__func__,bytesInThisBuffer);
1917 dumpit((char *)p_this_ccw, sizeof(struct ccwbk)); 1917 dumpit((char *)p_this_ccw, sizeof(struct ccwbk));
1918 dumpit((char *)p_this_ccw->p_buffer, 64); 1918 dumpit((char *)p_this_ccw->p_buffer, 64);
1919#endif 1919#endif
@@ -1998,7 +1998,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
1998 1998
1999#ifdef IOTRACE 1999#ifdef IOTRACE
2000 printk(KERN_INFO "%s: %s() > Dump Active CCW chain \n", 2000 printk(KERN_INFO "%s: %s() > Dump Active CCW chain \n",
2001 dev->name,__FUNCTION__); 2001 dev->name,__func__);
2002 p_buf=privptr->p_write_active_first; 2002 p_buf=privptr->p_write_active_first;
2003 while (p_buf!=NULL) { 2003 while (p_buf!=NULL) {
2004 dumpit((char *)p_buf, sizeof(struct ccwbk)); 2004 dumpit((char *)p_buf, sizeof(struct ccwbk));
@@ -2018,7 +2018,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
2018 /* if write free count is zero , set NOBUFFER */ 2018 /* if write free count is zero , set NOBUFFER */
2019#ifdef DEBUGMSG 2019#ifdef DEBUGMSG
2020 printk(KERN_INFO "%s: %s() > free_count is %d\n", 2020 printk(KERN_INFO "%s: %s() > free_count is %d\n",
2021 dev->name,__FUNCTION__, 2021 dev->name,__func__,
2022 (int) privptr->write_free_count ); 2022 (int) privptr->write_free_count );
2023#endif 2023#endif
2024 if (privptr->write_free_count==0) { 2024 if (privptr->write_free_count==0) {
@@ -2029,7 +2029,7 @@ Done2:
2029Done: 2029Done:
2030#ifdef FUNCTRACE 2030#ifdef FUNCTRACE
2031 printk(KERN_INFO "%s: %s() > exit on line %d, rc = %d \n", 2031 printk(KERN_INFO "%s: %s() > exit on line %d, rc = %d \n",
2032 dev->name,__FUNCTION__,__LINE__, rc); 2032 dev->name,__func__,__LINE__, rc);
2033#endif 2033#endif
2034 return(rc); 2034 return(rc);
2035} /* end of claw_hw_tx */ 2035} /* end of claw_hw_tx */
@@ -2063,7 +2063,7 @@ init_ccw_bk(struct net_device *dev)
2063 addr_t real_TIC_address; 2063 addr_t real_TIC_address;
2064 int i,j; 2064 int i,j;
2065#ifdef FUNCTRACE 2065#ifdef FUNCTRACE
2066 printk(KERN_INFO "%s: %s() enter \n",dev->name,__FUNCTION__); 2066 printk(KERN_INFO "%s: %s() enter \n",dev->name,__func__);
2067#endif 2067#endif
2068 CLAW_DBF_TEXT(4,trace,"init_ccw"); 2068 CLAW_DBF_TEXT(4,trace,"init_ccw");
2069#ifdef DEBUGMSG 2069#ifdef DEBUGMSG
@@ -2097,15 +2097,15 @@ init_ccw_bk(struct net_device *dev)
2097#ifdef DEBUGMSG 2097#ifdef DEBUGMSG
2098 printk(KERN_INFO "%s: %s() " 2098 printk(KERN_INFO "%s: %s() "
2099 "ccw_blocks_required=%d\n", 2099 "ccw_blocks_required=%d\n",
2100 dev->name,__FUNCTION__, 2100 dev->name,__func__,
2101 ccw_blocks_required); 2101 ccw_blocks_required);
2102 printk(KERN_INFO "%s: %s() " 2102 printk(KERN_INFO "%s: %s() "
2103 "PAGE_SIZE=0x%x\n", 2103 "PAGE_SIZE=0x%x\n",
2104 dev->name,__FUNCTION__, 2104 dev->name,__func__,
2105 (unsigned int)PAGE_SIZE); 2105 (unsigned int)PAGE_SIZE);
2106 printk(KERN_INFO "%s: %s() > " 2106 printk(KERN_INFO "%s: %s() > "
2107 "PAGE_MASK=0x%x\n", 2107 "PAGE_MASK=0x%x\n",
2108 dev->name,__FUNCTION__, 2108 dev->name,__func__,
2109 (unsigned int)PAGE_MASK); 2109 (unsigned int)PAGE_MASK);
2110#endif 2110#endif
2111 /* 2111 /*
@@ -2117,10 +2117,10 @@ init_ccw_bk(struct net_device *dev)
2117 2117
2118#ifdef DEBUGMSG 2118#ifdef DEBUGMSG
2119 printk(KERN_INFO "%s: %s() > ccw_blocks_perpage=%d\n", 2119 printk(KERN_INFO "%s: %s() > ccw_blocks_perpage=%d\n",
2120 dev->name,__FUNCTION__, 2120 dev->name,__func__,
2121 ccw_blocks_perpage); 2121 ccw_blocks_perpage);
2122 printk(KERN_INFO "%s: %s() > ccw_pages_required=%d\n", 2122 printk(KERN_INFO "%s: %s() > ccw_pages_required=%d\n",
2123 dev->name,__FUNCTION__, 2123 dev->name,__func__,
2124 ccw_pages_required); 2124 ccw_pages_required);
2125#endif 2125#endif
2126 /* 2126 /*
@@ -2156,29 +2156,29 @@ init_ccw_bk(struct net_device *dev)
2156#ifdef DEBUGMSG 2156#ifdef DEBUGMSG
2157 if (privptr->p_env->read_size < PAGE_SIZE) { 2157 if (privptr->p_env->read_size < PAGE_SIZE) {
2158 printk(KERN_INFO "%s: %s() reads_perpage=%d\n", 2158 printk(KERN_INFO "%s: %s() reads_perpage=%d\n",
2159 dev->name,__FUNCTION__, 2159 dev->name,__func__,
2160 claw_reads_perpage); 2160 claw_reads_perpage);
2161 } 2161 }
2162 else { 2162 else {
2163 printk(KERN_INFO "%s: %s() pages_perread=%d\n", 2163 printk(KERN_INFO "%s: %s() pages_perread=%d\n",
2164 dev->name,__FUNCTION__, 2164 dev->name,__func__,
2165 privptr->p_buff_pages_perread); 2165 privptr->p_buff_pages_perread);
2166 } 2166 }
2167 printk(KERN_INFO "%s: %s() read_pages=%d\n", 2167 printk(KERN_INFO "%s: %s() read_pages=%d\n",
2168 dev->name,__FUNCTION__, 2168 dev->name,__func__,
2169 claw_read_pages); 2169 claw_read_pages);
2170 if (privptr->p_env->write_size < PAGE_SIZE) { 2170 if (privptr->p_env->write_size < PAGE_SIZE) {
2171 printk(KERN_INFO "%s: %s() writes_perpage=%d\n", 2171 printk(KERN_INFO "%s: %s() writes_perpage=%d\n",
2172 dev->name,__FUNCTION__, 2172 dev->name,__func__,
2173 claw_writes_perpage); 2173 claw_writes_perpage);
2174 } 2174 }
2175 else { 2175 else {
2176 printk(KERN_INFO "%s: %s() pages_perwrite=%d\n", 2176 printk(KERN_INFO "%s: %s() pages_perwrite=%d\n",
2177 dev->name,__FUNCTION__, 2177 dev->name,__func__,
2178 privptr->p_buff_pages_perwrite); 2178 privptr->p_buff_pages_perwrite);
2179 } 2179 }
2180 printk(KERN_INFO "%s: %s() write_pages=%d\n", 2180 printk(KERN_INFO "%s: %s() write_pages=%d\n",
2181 dev->name,__FUNCTION__, 2181 dev->name,__func__,
2182 claw_write_pages); 2182 claw_write_pages);
2183#endif 2183#endif
2184 2184
@@ -2194,12 +2194,12 @@ init_ccw_bk(struct net_device *dev)
2194 printk(KERN_INFO "%s: %s() " 2194 printk(KERN_INFO "%s: %s() "
2195 "__get_free_pages for CCWs failed : " 2195 "__get_free_pages for CCWs failed : "
2196 "pages is %d\n", 2196 "pages is %d\n",
2197 dev->name,__FUNCTION__, 2197 dev->name,__func__,
2198 ccw_pages_required ); 2198 ccw_pages_required );
2199#ifdef FUNCTRACE 2199#ifdef FUNCTRACE
2200 printk(KERN_INFO "%s: %s() > " 2200 printk(KERN_INFO "%s: %s() > "
2201 "exit on line %d, rc = ENOMEM\n", 2201 "exit on line %d, rc = ENOMEM\n",
2202 dev->name,__FUNCTION__, 2202 dev->name,__func__,
2203 __LINE__); 2203 __LINE__);
2204#endif 2204#endif
2205 return -ENOMEM; 2205 return -ENOMEM;
@@ -2218,7 +2218,7 @@ init_ccw_bk(struct net_device *dev)
2218 /* Initialize ending CCW block */ 2218 /* Initialize ending CCW block */
2219#ifdef DEBUGMSG 2219#ifdef DEBUGMSG
2220 printk(KERN_INFO "%s: %s() begin initialize ending CCW blocks\n", 2220 printk(KERN_INFO "%s: %s() begin initialize ending CCW blocks\n",
2221 dev->name,__FUNCTION__); 2221 dev->name,__func__);
2222#endif 2222#endif
2223 2223
2224 p_endccw=privptr->p_end_ccw; 2224 p_endccw=privptr->p_end_ccw;
@@ -2276,7 +2276,7 @@ init_ccw_bk(struct net_device *dev)
2276 2276
2277#ifdef IOTRACE 2277#ifdef IOTRACE
2278 printk(KERN_INFO "%s: %s() dump claw ending CCW BK \n", 2278 printk(KERN_INFO "%s: %s() dump claw ending CCW BK \n",
2279 dev->name,__FUNCTION__); 2279 dev->name,__func__);
2280 dumpit((char *)p_endccw, sizeof(struct endccw)); 2280 dumpit((char *)p_endccw, sizeof(struct endccw));
2281#endif 2281#endif
2282 2282
@@ -2287,7 +2287,7 @@ init_ccw_bk(struct net_device *dev)
2287 2287
2288#ifdef DEBUGMSG 2288#ifdef DEBUGMSG
2289 printk(KERN_INFO "%s: %s() Begin build a chain of CCW buffer \n", 2289 printk(KERN_INFO "%s: %s() Begin build a chain of CCW buffer \n",
2290 dev->name,__FUNCTION__); 2290 dev->name,__func__);
2291#endif 2291#endif
2292 p_buff=privptr->p_buff_ccw; 2292 p_buff=privptr->p_buff_ccw;
2293 2293
@@ -2306,7 +2306,7 @@ init_ccw_bk(struct net_device *dev)
2306#ifdef DEBUGMSG 2306#ifdef DEBUGMSG
2307 printk(KERN_INFO "%s: %s() " 2307 printk(KERN_INFO "%s: %s() "
2308 "End build a chain of CCW buffer \n", 2308 "End build a chain of CCW buffer \n",
2309 dev->name,__FUNCTION__); 2309 dev->name,__func__);
2310 p_buf=p_free_chain; 2310 p_buf=p_free_chain;
2311 while (p_buf!=NULL) { 2311 while (p_buf!=NULL) {
2312 dumpit((char *)p_buf, sizeof(struct ccwbk)); 2312 dumpit((char *)p_buf, sizeof(struct ccwbk));
@@ -2321,7 +2321,7 @@ init_ccw_bk(struct net_device *dev)
2321#ifdef DEBUGMSG 2321#ifdef DEBUGMSG
2322 printk(KERN_INFO "%s: %s() " 2322 printk(KERN_INFO "%s: %s() "
2323 "Begin initialize ClawSignalBlock \n", 2323 "Begin initialize ClawSignalBlock \n",
2324 dev->name,__FUNCTION__); 2324 dev->name,__func__);
2325#endif 2325#endif
2326 if (privptr->p_claw_signal_blk==NULL) { 2326 if (privptr->p_claw_signal_blk==NULL) {
2327 privptr->p_claw_signal_blk=p_free_chain; 2327 privptr->p_claw_signal_blk=p_free_chain;
@@ -2334,7 +2334,7 @@ init_ccw_bk(struct net_device *dev)
2334#ifdef DEBUGMSG 2334#ifdef DEBUGMSG
2335 printk(KERN_INFO "%s: %s() > End initialize " 2335 printk(KERN_INFO "%s: %s() > End initialize "
2336 "ClawSignalBlock\n", 2336 "ClawSignalBlock\n",
2337 dev->name,__FUNCTION__); 2337 dev->name,__func__);
2338 dumpit((char *)privptr->p_claw_signal_blk, sizeof(struct ccwbk)); 2338 dumpit((char *)privptr->p_claw_signal_blk, sizeof(struct ccwbk));
2339#endif 2339#endif
2340 2340
@@ -2349,14 +2349,14 @@ init_ccw_bk(struct net_device *dev)
2349 if (privptr->p_buff_write==NULL) { 2349 if (privptr->p_buff_write==NULL) {
2350 printk(KERN_INFO "%s: %s() __get_free_pages for write" 2350 printk(KERN_INFO "%s: %s() __get_free_pages for write"
2351 " bufs failed : get is for %d pages\n", 2351 " bufs failed : get is for %d pages\n",
2352 dev->name,__FUNCTION__,claw_write_pages ); 2352 dev->name,__func__,claw_write_pages );
2353 free_pages((unsigned long)privptr->p_buff_ccw, 2353 free_pages((unsigned long)privptr->p_buff_ccw,
2354 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num)); 2354 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
2355 privptr->p_buff_ccw=NULL; 2355 privptr->p_buff_ccw=NULL;
2356#ifdef FUNCTRACE 2356#ifdef FUNCTRACE
2357 printk(KERN_INFO "%s: %s() > exit on line %d," 2357 printk(KERN_INFO "%s: %s() > exit on line %d,"
2358 "rc = ENOMEM\n", 2358 "rc = ENOMEM\n",
2359 dev->name,__FUNCTION__,__LINE__); 2359 dev->name,__func__,__LINE__);
2360#endif 2360#endif
2361 return -ENOMEM; 2361 return -ENOMEM;
2362 } 2362 }
@@ -2369,7 +2369,7 @@ init_ccw_bk(struct net_device *dev)
2369 ccw_pages_required * PAGE_SIZE); 2369 ccw_pages_required * PAGE_SIZE);
2370#ifdef DEBUGMSG 2370#ifdef DEBUGMSG
2371 printk(KERN_INFO "%s: %s() Begin build claw write free " 2371 printk(KERN_INFO "%s: %s() Begin build claw write free "
2372 "chain \n",dev->name,__FUNCTION__); 2372 "chain \n",dev->name,__func__);
2373#endif 2373#endif
2374 privptr->p_write_free_chain=NULL; 2374 privptr->p_write_free_chain=NULL;
2375 2375
@@ -2409,14 +2409,14 @@ init_ccw_bk(struct net_device *dev)
2409#ifdef IOTRACE 2409#ifdef IOTRACE
2410 printk(KERN_INFO "%s:%s __get_free_pages " 2410 printk(KERN_INFO "%s:%s __get_free_pages "
2411 "for writes buf: get for %d pages\n", 2411 "for writes buf: get for %d pages\n",
2412 dev->name,__FUNCTION__, 2412 dev->name,__func__,
2413 privptr->p_buff_pages_perwrite); 2413 privptr->p_buff_pages_perwrite);
2414#endif 2414#endif
2415 if (p_buff==NULL) { 2415 if (p_buff==NULL) {
2416 printk(KERN_INFO "%s:%s __get_free_pages " 2416 printk(KERN_INFO "%s:%s __get_free_pages "
2417 "for writes buf failed : get is for %d pages\n", 2417 "for writes buf failed : get is for %d pages\n",
2418 dev->name, 2418 dev->name,
2419 __FUNCTION__, 2419 __func__,
2420 privptr->p_buff_pages_perwrite ); 2420 privptr->p_buff_pages_perwrite );
2421 free_pages((unsigned long)privptr->p_buff_ccw, 2421 free_pages((unsigned long)privptr->p_buff_ccw,
2422 (int)pages_to_order_of_mag( 2422 (int)pages_to_order_of_mag(
@@ -2433,7 +2433,7 @@ init_ccw_bk(struct net_device *dev)
2433#ifdef FUNCTRACE 2433#ifdef FUNCTRACE
2434 printk(KERN_INFO "%s: %s exit on line %d, rc = ENOMEM\n", 2434 printk(KERN_INFO "%s: %s exit on line %d, rc = ENOMEM\n",
2435 dev->name, 2435 dev->name,
2436 __FUNCTION__, 2436 __func__,
2437 __LINE__); 2437 __LINE__);
2438#endif 2438#endif
2439 return -ENOMEM; 2439 return -ENOMEM;
@@ -2466,7 +2466,7 @@ init_ccw_bk(struct net_device *dev)
2466 2466
2467#ifdef DEBUGMSG 2467#ifdef DEBUGMSG
2468 printk(KERN_INFO "%s:%s End build claw write free chain \n", 2468 printk(KERN_INFO "%s:%s End build claw write free chain \n",
2469 dev->name,__FUNCTION__); 2469 dev->name,__func__);
2470 p_buf=privptr->p_write_free_chain; 2470 p_buf=privptr->p_write_free_chain;
2471 while (p_buf!=NULL) { 2471 while (p_buf!=NULL) {
2472 dumpit((char *)p_buf, sizeof(struct ccwbk)); 2472 dumpit((char *)p_buf, sizeof(struct ccwbk));
@@ -2485,7 +2485,7 @@ init_ccw_bk(struct net_device *dev)
2485 printk(KERN_INFO "%s: %s() " 2485 printk(KERN_INFO "%s: %s() "
2486 "__get_free_pages for read buf failed : " 2486 "__get_free_pages for read buf failed : "
2487 "get is for %d pages\n", 2487 "get is for %d pages\n",
2488 dev->name,__FUNCTION__,claw_read_pages ); 2488 dev->name,__func__,claw_read_pages );
2489 free_pages((unsigned long)privptr->p_buff_ccw, 2489 free_pages((unsigned long)privptr->p_buff_ccw,
2490 (int)pages_to_order_of_mag( 2490 (int)pages_to_order_of_mag(
2491 privptr->p_buff_ccw_num)); 2491 privptr->p_buff_ccw_num));
@@ -2497,7 +2497,7 @@ init_ccw_bk(struct net_device *dev)
2497 privptr->p_buff_write=NULL; 2497 privptr->p_buff_write=NULL;
2498#ifdef FUNCTRACE 2498#ifdef FUNCTRACE
2499 printk(KERN_INFO "%s: %s() > exit on line %d, rc =" 2499 printk(KERN_INFO "%s: %s() > exit on line %d, rc ="
2500 " ENOMEM\n",dev->name,__FUNCTION__,__LINE__); 2500 " ENOMEM\n",dev->name,__func__,__LINE__);
2501#endif 2501#endif
2502 return -ENOMEM; 2502 return -ENOMEM;
2503 } 2503 }
@@ -2509,7 +2509,7 @@ init_ccw_bk(struct net_device *dev)
2509 */ 2509 */
2510#ifdef DEBUGMSG 2510#ifdef DEBUGMSG
2511 printk(KERN_INFO "%s: %s() Begin build claw read free chain \n", 2511 printk(KERN_INFO "%s: %s() Begin build claw read free chain \n",
2512 dev->name,__FUNCTION__); 2512 dev->name,__func__);
2513#endif 2513#endif
2514 p_buff=privptr->p_buff_read; 2514 p_buff=privptr->p_buff_read;
2515 for (i=0 ; i< privptr->p_env->read_buffers ; i++) { 2515 for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
@@ -2590,7 +2590,7 @@ init_ccw_bk(struct net_device *dev)
2590 2590
2591#ifdef DEBUGMSG 2591#ifdef DEBUGMSG
2592 printk(KERN_INFO "%s: %s() Begin build claw read free chain \n", 2592 printk(KERN_INFO "%s: %s() Begin build claw read free chain \n",
2593 dev->name,__FUNCTION__); 2593 dev->name,__func__);
2594#endif 2594#endif
2595 for (i=0 ; i< privptr->p_env->read_buffers ; i++) { 2595 for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
2596 p_buff = (void *)__get_free_pages(__GFP_DMA, 2596 p_buff = (void *)__get_free_pages(__GFP_DMA,
@@ -2598,7 +2598,7 @@ init_ccw_bk(struct net_device *dev)
2598 if (p_buff==NULL) { 2598 if (p_buff==NULL) {
2599 printk(KERN_INFO "%s: %s() __get_free_pages for read " 2599 printk(KERN_INFO "%s: %s() __get_free_pages for read "
2600 "buf failed : get is for %d pages\n", 2600 "buf failed : get is for %d pages\n",
2601 dev->name,__FUNCTION__, 2601 dev->name,__func__,
2602 privptr->p_buff_pages_perread ); 2602 privptr->p_buff_pages_perread );
2603 free_pages((unsigned long)privptr->p_buff_ccw, 2603 free_pages((unsigned long)privptr->p_buff_ccw,
2604 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num)); 2604 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
@@ -2622,7 +2622,7 @@ init_ccw_bk(struct net_device *dev)
2622 privptr->p_buff_write=NULL; 2622 privptr->p_buff_write=NULL;
2623#ifdef FUNCTRACE 2623#ifdef FUNCTRACE
2624 printk(KERN_INFO "%s: %s() exit on line %d, rc = ENOMEM\n", 2624 printk(KERN_INFO "%s: %s() exit on line %d, rc = ENOMEM\n",
2625 dev->name,__FUNCTION__, 2625 dev->name,__func__,
2626 __LINE__); 2626 __LINE__);
2627#endif 2627#endif
2628 return -ENOMEM; 2628 return -ENOMEM;
@@ -2695,7 +2695,7 @@ init_ccw_bk(struct net_device *dev)
2695 } /* pBuffread = NULL */ 2695 } /* pBuffread = NULL */
2696#ifdef DEBUGMSG 2696#ifdef DEBUGMSG
2697 printk(KERN_INFO "%s: %s() > End build claw read free chain \n", 2697 printk(KERN_INFO "%s: %s() > End build claw read free chain \n",
2698 dev->name,__FUNCTION__); 2698 dev->name,__func__);
2699 p_buf=p_first_CCWB; 2699 p_buf=p_first_CCWB;
2700 while (p_buf!=NULL) { 2700 while (p_buf!=NULL) {
2701 dumpit((char *)p_buf, sizeof(struct ccwbk)); 2701 dumpit((char *)p_buf, sizeof(struct ccwbk));
@@ -2707,7 +2707,7 @@ init_ccw_bk(struct net_device *dev)
2707 privptr->buffs_alloc = 1; 2707 privptr->buffs_alloc = 1;
2708#ifdef FUNCTRACE 2708#ifdef FUNCTRACE
2709 printk(KERN_INFO "%s: %s() exit on line %d\n", 2709 printk(KERN_INFO "%s: %s() exit on line %d\n",
2710 dev->name,__FUNCTION__,__LINE__); 2710 dev->name,__func__,__LINE__);
2711#endif 2711#endif
2712 return 0; 2712 return 0;
2713} /* end of init_ccw_bk */ 2713} /* end of init_ccw_bk */
@@ -2723,11 +2723,11 @@ probe_error( struct ccwgroup_device *cgdev)
2723{ 2723{
2724 struct claw_privbk *privptr; 2724 struct claw_privbk *privptr;
2725#ifdef FUNCTRACE 2725#ifdef FUNCTRACE
2726 printk(KERN_INFO "%s enter \n",__FUNCTION__); 2726 printk(KERN_INFO "%s enter \n",__func__);
2727#endif 2727#endif
2728 CLAW_DBF_TEXT(4,trace,"proberr"); 2728 CLAW_DBF_TEXT(4,trace,"proberr");
2729#ifdef DEBUGMSG 2729#ifdef DEBUGMSG
2730 printk(KERN_INFO "%s variable cgdev =\n",__FUNCTION__); 2730 printk(KERN_INFO "%s variable cgdev =\n",__func__);
2731 dumpit((char *) cgdev, sizeof(struct ccwgroup_device)); 2731 dumpit((char *) cgdev, sizeof(struct ccwgroup_device));
2732#endif 2732#endif
2733 privptr=(struct claw_privbk *)cgdev->dev.driver_data; 2733 privptr=(struct claw_privbk *)cgdev->dev.driver_data;
@@ -2741,7 +2741,7 @@ probe_error( struct ccwgroup_device *cgdev)
2741 } 2741 }
2742#ifdef FUNCTRACE 2742#ifdef FUNCTRACE
2743 printk(KERN_INFO "%s > exit on line %d\n", 2743 printk(KERN_INFO "%s > exit on line %d\n",
2744 __FUNCTION__,__LINE__); 2744 __func__,__LINE__);
2745#endif 2745#endif
2746 2746
2747 return; 2747 return;
@@ -2772,7 +2772,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
2772 struct chbk *p_ch = NULL; 2772 struct chbk *p_ch = NULL;
2773#ifdef FUNCTRACE 2773#ifdef FUNCTRACE
2774 printk(KERN_INFO "%s: %s() > enter \n", 2774 printk(KERN_INFO "%s: %s() > enter \n",
2775 dev->name,__FUNCTION__); 2775 dev->name,__func__);
2776#endif 2776#endif
2777 CLAW_DBF_TEXT(2,setup,"clw_cntl"); 2777 CLAW_DBF_TEXT(2,setup,"clw_cntl");
2778#ifdef DEBUGMSG 2778#ifdef DEBUGMSG
@@ -2794,7 +2794,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
2794#ifdef FUNCTRACE 2794#ifdef FUNCTRACE
2795 printk(KERN_INFO "%s: %s() > " 2795 printk(KERN_INFO "%s: %s() > "
2796 "exit on line %d, rc=0\n", 2796 "exit on line %d, rc=0\n",
2797 dev->name,__FUNCTION__,__LINE__); 2797 dev->name,__func__,__LINE__);
2798#endif 2798#endif
2799 return 0; 2799 return 0;
2800 } 2800 }
@@ -3057,7 +3057,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
3057 3057
3058#ifdef FUNCTRACE 3058#ifdef FUNCTRACE
3059 printk(KERN_INFO "%s: %s() exit on line %d, rc = 0\n", 3059 printk(KERN_INFO "%s: %s() exit on line %d, rc = 0\n",
3060 dev->name,__FUNCTION__,__LINE__); 3060 dev->name,__func__,__LINE__);
3061#endif 3061#endif
3062 3062
3063 return 0; 3063 return 0;
@@ -3080,7 +3080,7 @@ claw_send_control(struct net_device *dev, __u8 type, __u8 link,
3080 struct sk_buff *skb; 3080 struct sk_buff *skb;
3081 3081
3082#ifdef FUNCTRACE 3082#ifdef FUNCTRACE
3083 printk(KERN_INFO "%s:%s > enter \n",dev->name,__FUNCTION__); 3083 printk(KERN_INFO "%s:%s > enter \n",dev->name,__func__);
3084#endif 3084#endif
3085 CLAW_DBF_TEXT(2,setup,"sndcntl"); 3085 CLAW_DBF_TEXT(2,setup,"sndcntl");
3086#ifdef DEBUGMSG 3086#ifdef DEBUGMSG
@@ -3143,10 +3143,10 @@ claw_send_control(struct net_device *dev, __u8 type, __u8 link,
3143 skb = dev_alloc_skb(sizeof(struct clawctl)); 3143 skb = dev_alloc_skb(sizeof(struct clawctl));
3144 if (!skb) { 3144 if (!skb) {
3145 printk( "%s:%s low on mem, returning...\n", 3145 printk( "%s:%s low on mem, returning...\n",
3146 dev->name,__FUNCTION__); 3146 dev->name,__func__);
3147#ifdef DEBUG 3147#ifdef DEBUG
3148 printk(KERN_INFO "%s:%s Exit, rc = ENOMEM\n", 3148 printk(KERN_INFO "%s:%s Exit, rc = ENOMEM\n",
3149 dev->name,__FUNCTION__); 3149 dev->name,__func__);
3150#endif 3150#endif
3151 return -ENOMEM; 3151 return -ENOMEM;
3152 } 3152 }
@@ -3162,7 +3162,7 @@ claw_send_control(struct net_device *dev, __u8 type, __u8 link,
3162 claw_hw_tx(skb, dev, 0); 3162 claw_hw_tx(skb, dev, 0);
3163#ifdef FUNCTRACE 3163#ifdef FUNCTRACE
3164 printk(KERN_INFO "%s:%s Exit on line %d\n", 3164 printk(KERN_INFO "%s:%s Exit on line %d\n",
3165 dev->name,__FUNCTION__,__LINE__); 3165 dev->name,__func__,__LINE__);
3166#endif 3166#endif
3167 3167
3168 return 0; 3168 return 0;
@@ -3180,7 +3180,7 @@ claw_snd_conn_req(struct net_device *dev, __u8 link)
3180 struct clawctl *p_ctl; 3180 struct clawctl *p_ctl;
3181 3181
3182#ifdef FUNCTRACE 3182#ifdef FUNCTRACE
3183 printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__); 3183 printk(KERN_INFO "%s:%s Enter \n",dev->name,__func__);
3184#endif 3184#endif
3185 CLAW_DBF_TEXT(2,setup,"snd_conn"); 3185 CLAW_DBF_TEXT(2,setup,"snd_conn");
3186#ifdef DEBUGMSG 3186#ifdef DEBUGMSG
@@ -3193,7 +3193,7 @@ claw_snd_conn_req(struct net_device *dev, __u8 link)
3193 if ( privptr->system_validate_comp==0x00 ) { 3193 if ( privptr->system_validate_comp==0x00 ) {
3194#ifdef FUNCTRACE 3194#ifdef FUNCTRACE
3195 printk(KERN_INFO "%s:%s Exit on line %d, rc = 1\n", 3195 printk(KERN_INFO "%s:%s Exit on line %d, rc = 1\n",
3196 dev->name,__FUNCTION__,__LINE__); 3196 dev->name,__func__,__LINE__);
3197#endif 3197#endif
3198 return rc; 3198 return rc;
3199 } 3199 }
@@ -3209,7 +3209,7 @@ claw_snd_conn_req(struct net_device *dev, __u8 link)
3209 HOST_APPL_NAME, privptr->p_env->api_type); 3209 HOST_APPL_NAME, privptr->p_env->api_type);
3210#ifdef FUNCTRACE 3210#ifdef FUNCTRACE
3211 printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n", 3211 printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n",
3212 dev->name,__FUNCTION__,__LINE__, rc); 3212 dev->name,__func__,__LINE__, rc);
3213#endif 3213#endif
3214 return rc; 3214 return rc;
3215 3215
@@ -3228,7 +3228,7 @@ claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl)
3228 struct conncmd * p_connect; 3228 struct conncmd * p_connect;
3229 3229
3230#ifdef FUNCTRACE 3230#ifdef FUNCTRACE
3231 printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__); 3231 printk(KERN_INFO "%s:%s Enter\n",dev->name,__func__);
3232#endif 3232#endif
3233 CLAW_DBF_TEXT(2,setup,"snd_dsc"); 3233 CLAW_DBF_TEXT(2,setup,"snd_dsc");
3234#ifdef DEBUGMSG 3234#ifdef DEBUGMSG
@@ -3244,7 +3244,7 @@ claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl)
3244 p_connect->host_name, p_connect->WS_name); 3244 p_connect->host_name, p_connect->WS_name);
3245#ifdef FUNCTRACE 3245#ifdef FUNCTRACE
3246 printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n", 3246 printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n",
3247 dev->name,__FUNCTION__, __LINE__, rc); 3247 dev->name,__func__, __LINE__, rc);
3248#endif 3248#endif
3249 return rc; 3249 return rc;
3250} /* end of claw_snd_disc */ 3250} /* end of claw_snd_disc */
@@ -3265,7 +3265,7 @@ claw_snd_sys_validate_rsp(struct net_device *dev,
3265 3265
3266#ifdef FUNCTRACE 3266#ifdef FUNCTRACE
3267 printk(KERN_INFO "%s:%s Enter\n", 3267 printk(KERN_INFO "%s:%s Enter\n",
3268 dev->name,__FUNCTION__); 3268 dev->name,__func__);
3269#endif 3269#endif
3270 CLAW_DBF_TEXT(2,setup,"chkresp"); 3270 CLAW_DBF_TEXT(2,setup,"chkresp");
3271#ifdef DEBUGMSG 3271#ifdef DEBUGMSG
@@ -3285,7 +3285,7 @@ claw_snd_sys_validate_rsp(struct net_device *dev,
3285 p_env->adapter_name ); 3285 p_env->adapter_name );
3286#ifdef FUNCTRACE 3286#ifdef FUNCTRACE
3287 printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n", 3287 printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n",
3288 dev->name,__FUNCTION__,__LINE__, rc); 3288 dev->name,__func__,__LINE__, rc);
3289#endif 3289#endif
3290 return rc; 3290 return rc;
3291} /* end of claw_snd_sys_validate_rsp */ 3291} /* end of claw_snd_sys_validate_rsp */
@@ -3301,7 +3301,7 @@ claw_strt_conn_req(struct net_device *dev )
3301 int rc; 3301 int rc;
3302 3302
3303#ifdef FUNCTRACE 3303#ifdef FUNCTRACE
3304 printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__); 3304 printk(KERN_INFO "%s:%s Enter\n",dev->name,__func__);
3305#endif 3305#endif
3306 CLAW_DBF_TEXT(2,setup,"conn_req"); 3306 CLAW_DBF_TEXT(2,setup,"conn_req");
3307#ifdef DEBUGMSG 3307#ifdef DEBUGMSG
@@ -3311,7 +3311,7 @@ claw_strt_conn_req(struct net_device *dev )
3311 rc=claw_snd_conn_req(dev, 1); 3311 rc=claw_snd_conn_req(dev, 1);
3312#ifdef FUNCTRACE 3312#ifdef FUNCTRACE
3313 printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n", 3313 printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n",
3314 dev->name,__FUNCTION__,__LINE__, rc); 3314 dev->name,__func__,__LINE__, rc);
3315#endif 3315#endif
3316 return rc; 3316 return rc;
3317} /* end of claw_strt_conn_req */ 3317} /* end of claw_strt_conn_req */
@@ -3327,13 +3327,13 @@ net_device_stats *claw_stats(struct net_device *dev)
3327{ 3327{
3328 struct claw_privbk *privptr; 3328 struct claw_privbk *privptr;
3329#ifdef FUNCTRACE 3329#ifdef FUNCTRACE
3330 printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__); 3330 printk(KERN_INFO "%s:%s Enter\n",dev->name,__func__);
3331#endif 3331#endif
3332 CLAW_DBF_TEXT(4,trace,"stats"); 3332 CLAW_DBF_TEXT(4,trace,"stats");
3333 privptr = dev->priv; 3333 privptr = dev->priv;
3334#ifdef FUNCTRACE 3334#ifdef FUNCTRACE
3335 printk(KERN_INFO "%s:%s Exit on line %d\n", 3335 printk(KERN_INFO "%s:%s Exit on line %d\n",
3336 dev->name,__FUNCTION__,__LINE__); 3336 dev->name,__func__,__LINE__);
3337#endif 3337#endif
3338 return &privptr->stats; 3338 return &privptr->stats;
3339} /* end of claw_stats */ 3339} /* end of claw_stats */
@@ -3366,7 +3366,7 @@ unpack_read(struct net_device *dev )
3366 int p=0; 3366 int p=0;
3367 3367
3368#ifdef FUNCTRACE 3368#ifdef FUNCTRACE
3369 printk(KERN_INFO "%s:%s enter \n",dev->name,__FUNCTION__); 3369 printk(KERN_INFO "%s:%s enter \n",dev->name,__func__);
3370#endif 3370#endif
3371 CLAW_DBF_TEXT(4,trace,"unpkread"); 3371 CLAW_DBF_TEXT(4,trace,"unpkread");
3372 p_first_ccw=NULL; 3372 p_first_ccw=NULL;
@@ -3408,7 +3408,7 @@ unpack_read(struct net_device *dev )
3408 if ((p_this_ccw->header.opcode & MORE_to_COME_FLAG)!=0) { 3408 if ((p_this_ccw->header.opcode & MORE_to_COME_FLAG)!=0) {
3409#ifdef DEBUGMSG 3409#ifdef DEBUGMSG
3410 printk(KERN_INFO "%s: %s > More_to_come is ON\n", 3410 printk(KERN_INFO "%s: %s > More_to_come is ON\n",
3411 dev->name,__FUNCTION__); 3411 dev->name,__func__);
3412#endif 3412#endif
3413 mtc_this_frm=1; 3413 mtc_this_frm=1;
3414 if (p_this_ccw->header.length!= 3414 if (p_this_ccw->header.length!=
@@ -3435,7 +3435,7 @@ unpack_read(struct net_device *dev )
3435#ifdef DEBUGMSG 3435#ifdef DEBUGMSG
3436 printk(KERN_INFO "%s:%s goto next " 3436 printk(KERN_INFO "%s:%s goto next "
3437 "frame from MoretoComeSkip \n", 3437 "frame from MoretoComeSkip \n",
3438 dev->name,__FUNCTION__); 3438 dev->name,__func__);
3439#endif 3439#endif
3440 goto NextFrame; 3440 goto NextFrame;
3441 } 3441 }
@@ -3445,7 +3445,7 @@ unpack_read(struct net_device *dev )
3445#ifdef DEBUGMSG 3445#ifdef DEBUGMSG
3446 printk(KERN_INFO "%s:%s goto next " 3446 printk(KERN_INFO "%s:%s goto next "
3447 "frame from claw_process_control \n", 3447 "frame from claw_process_control \n",
3448 dev->name,__FUNCTION__); 3448 dev->name,__func__);
3449#endif 3449#endif
3450 CLAW_DBF_TEXT(4,trace,"UnpkCntl"); 3450 CLAW_DBF_TEXT(4,trace,"UnpkCntl");
3451 goto NextFrame; 3451 goto NextFrame;
@@ -3468,7 +3468,7 @@ unpack_next:
3468 if (privptr->mtc_logical_link<0) { 3468 if (privptr->mtc_logical_link<0) {
3469#ifdef DEBUGMSG 3469#ifdef DEBUGMSG
3470 printk(KERN_INFO "%s: %s mtc_logical_link < 0 \n", 3470 printk(KERN_INFO "%s: %s mtc_logical_link < 0 \n",
3471 dev->name,__FUNCTION__); 3471 dev->name,__func__);
3472#endif 3472#endif
3473 3473
3474 /* 3474 /*
@@ -3487,7 +3487,7 @@ unpack_next:
3487 printk(KERN_INFO "%s: %s > goto next " 3487 printk(KERN_INFO "%s: %s > goto next "
3488 "frame from MoretoComeSkip \n", 3488 "frame from MoretoComeSkip \n",
3489 dev->name, 3489 dev->name,
3490 __FUNCTION__); 3490 __func__);
3491 printk(KERN_INFO " bytes_to_mov %d > (MAX_ENVELOPE_" 3491 printk(KERN_INFO " bytes_to_mov %d > (MAX_ENVELOPE_"
3492 "SIZE-privptr->mtc_offset %d)\n", 3492 "SIZE-privptr->mtc_offset %d)\n",
3493 bytes_to_mov,(MAX_ENVELOPE_SIZE- privptr->mtc_offset)); 3493 bytes_to_mov,(MAX_ENVELOPE_SIZE- privptr->mtc_offset));
@@ -3505,13 +3505,13 @@ unpack_next:
3505 } 3505 }
3506#ifdef DEBUGMSG 3506#ifdef DEBUGMSG
3507 printk(KERN_INFO "%s: %s() received data \n", 3507 printk(KERN_INFO "%s: %s() received data \n",
3508 dev->name,__FUNCTION__); 3508 dev->name,__func__);
3509 if (p_env->packing == DO_PACKED) 3509 if (p_env->packing == DO_PACKED)
3510 dumpit((char *)p_packd+sizeof(struct clawph),32); 3510 dumpit((char *)p_packd+sizeof(struct clawph),32);
3511 else 3511 else
3512 dumpit((char *)p_this_ccw->p_buffer, 32); 3512 dumpit((char *)p_this_ccw->p_buffer, 32);
3513 printk(KERN_INFO "%s: %s() bytelength %d \n", 3513 printk(KERN_INFO "%s: %s() bytelength %d \n",
3514 dev->name,__FUNCTION__,bytes_to_mov); 3514 dev->name,__func__,bytes_to_mov);
3515#endif 3515#endif
3516 if (mtc_this_frm==0) { 3516 if (mtc_this_frm==0) {
3517 len_of_data=privptr->mtc_offset+bytes_to_mov; 3517 len_of_data=privptr->mtc_offset+bytes_to_mov;
@@ -3530,13 +3530,13 @@ unpack_next:
3530#ifdef DEBUGMSG 3530#ifdef DEBUGMSG
3531 printk(KERN_INFO "%s: %s() netif_" 3531 printk(KERN_INFO "%s: %s() netif_"
3532 "rx(skb) completed \n", 3532 "rx(skb) completed \n",
3533 dev->name,__FUNCTION__); 3533 dev->name,__func__);
3534#endif 3534#endif
3535 } 3535 }
3536 else { 3536 else {
3537 privptr->stats.rx_dropped++; 3537 privptr->stats.rx_dropped++;
3538 printk(KERN_WARNING "%s: %s() low on memory\n", 3538 printk(KERN_WARNING "%s: %s() low on memory\n",
3539 dev->name,__FUNCTION__); 3539 dev->name,__func__);
3540 } 3540 }
3541 privptr->mtc_offset=0; 3541 privptr->mtc_offset=0;
3542 privptr->mtc_logical_link=-1; 3542 privptr->mtc_logical_link=-1;
@@ -3575,10 +3575,10 @@ NextFrame:
3575 3575
3576#ifdef IOTRACE 3576#ifdef IOTRACE
3577 printk(KERN_INFO "%s:%s processed frame is %d \n", 3577 printk(KERN_INFO "%s:%s processed frame is %d \n",
3578 dev->name,__FUNCTION__,i); 3578 dev->name,__func__,i);
3579 printk(KERN_INFO "%s:%s F:%lx L:%lx\n", 3579 printk(KERN_INFO "%s:%s F:%lx L:%lx\n",
3580 dev->name, 3580 dev->name,
3581 __FUNCTION__, 3581 __func__,
3582 (unsigned long)p_first_ccw, 3582 (unsigned long)p_first_ccw,
3583 (unsigned long)p_last_ccw); 3583 (unsigned long)p_last_ccw);
3584#endif 3584#endif
@@ -3588,7 +3588,7 @@ NextFrame:
3588 claw_strt_read(dev, LOCK_YES); 3588 claw_strt_read(dev, LOCK_YES);
3589#ifdef FUNCTRACE 3589#ifdef FUNCTRACE
3590 printk(KERN_INFO "%s: %s exit on line %d\n", 3590 printk(KERN_INFO "%s: %s exit on line %d\n",
3591 dev->name, __FUNCTION__, __LINE__); 3591 dev->name, __func__, __LINE__);
3592#endif 3592#endif
3593 return; 3593 return;
3594} /* end of unpack_read */ 3594} /* end of unpack_read */
@@ -3610,7 +3610,7 @@ claw_strt_read (struct net_device *dev, int lock )
3610 p_ch=&privptr->channel[READ]; 3610 p_ch=&privptr->channel[READ];
3611 3611
3612#ifdef FUNCTRACE 3612#ifdef FUNCTRACE
3613 printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__); 3613 printk(KERN_INFO "%s:%s Enter \n",dev->name,__func__);
3614 printk(KERN_INFO "%s: variable lock = %d, dev =\n",dev->name, lock); 3614 printk(KERN_INFO "%s: variable lock = %d, dev =\n",dev->name, lock);
3615 dumpit((char *) dev, sizeof(struct net_device)); 3615 dumpit((char *) dev, sizeof(struct net_device));
3616#endif 3616#endif
@@ -3626,7 +3626,7 @@ claw_strt_read (struct net_device *dev, int lock )
3626 } 3626 }
3627#ifdef DEBUGMSG 3627#ifdef DEBUGMSG
3628 printk(KERN_INFO "%s:%s state-%02x\n" , 3628 printk(KERN_INFO "%s:%s state-%02x\n" ,
3629 dev->name,__FUNCTION__, p_ch->claw_state); 3629 dev->name,__func__, p_ch->claw_state);
3630#endif 3630#endif
3631 if (lock==LOCK_YES) { 3631 if (lock==LOCK_YES) {
3632 spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags); 3632 spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
@@ -3634,7 +3634,7 @@ claw_strt_read (struct net_device *dev, int lock )
3634 if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) { 3634 if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
3635#ifdef DEBUGMSG 3635#ifdef DEBUGMSG
3636 printk(KERN_INFO "%s: HOT READ started in %s\n" , 3636 printk(KERN_INFO "%s: HOT READ started in %s\n" ,
3637 dev->name,__FUNCTION__); 3637 dev->name,__func__);
3638 p_clawh=(struct clawh *)privptr->p_claw_signal_blk; 3638 p_clawh=(struct clawh *)privptr->p_claw_signal_blk;
3639 dumpit((char *)&p_clawh->flag , 1); 3639 dumpit((char *)&p_clawh->flag , 1);
3640#endif 3640#endif
@@ -3650,7 +3650,7 @@ claw_strt_read (struct net_device *dev, int lock )
3650 else { 3650 else {
3651#ifdef DEBUGMSG 3651#ifdef DEBUGMSG
3652 printk(KERN_INFO "%s: No READ started by %s() In progress\n" , 3652 printk(KERN_INFO "%s: No READ started by %s() In progress\n" ,
3653 dev->name,__FUNCTION__); 3653 dev->name,__func__);
3654#endif 3654#endif
3655 CLAW_DBF_TEXT(2,trace,"ReadAct"); 3655 CLAW_DBF_TEXT(2,trace,"ReadAct");
3656 } 3656 }
@@ -3660,7 +3660,7 @@ claw_strt_read (struct net_device *dev, int lock )
3660 } 3660 }
3661#ifdef FUNCTRACE 3661#ifdef FUNCTRACE
3662 printk(KERN_INFO "%s:%s Exit on line %d\n", 3662 printk(KERN_INFO "%s:%s Exit on line %d\n",
3663 dev->name,__FUNCTION__,__LINE__); 3663 dev->name,__func__,__LINE__);
3664#endif 3664#endif
3665 CLAW_DBF_TEXT(4,trace,"StRdExit"); 3665 CLAW_DBF_TEXT(4,trace,"StRdExit");
3666 return; 3666 return;
@@ -3681,7 +3681,7 @@ claw_strt_out_IO( struct net_device *dev )
3681 struct ccwbk *p_first_ccw; 3681 struct ccwbk *p_first_ccw;
3682 3682
3683#ifdef FUNCTRACE 3683#ifdef FUNCTRACE
3684 printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__); 3684 printk(KERN_INFO "%s:%s Enter\n",dev->name,__func__);
3685#endif 3685#endif
3686 if (!dev) { 3686 if (!dev) {
3687 return; 3687 return;
@@ -3691,7 +3691,7 @@ claw_strt_out_IO( struct net_device *dev )
3691 3691
3692#ifdef DEBUGMSG 3692#ifdef DEBUGMSG
3693 printk(KERN_INFO "%s:%s state-%02x\n" , 3693 printk(KERN_INFO "%s:%s state-%02x\n" ,
3694 dev->name,__FUNCTION__,p_ch->claw_state); 3694 dev->name,__func__,p_ch->claw_state);
3695#endif 3695#endif
3696 CLAW_DBF_TEXT(4,trace,"strt_io"); 3696 CLAW_DBF_TEXT(4,trace,"strt_io");
3697 p_first_ccw=privptr->p_write_active_first; 3697 p_first_ccw=privptr->p_write_active_first;
@@ -3701,14 +3701,14 @@ claw_strt_out_IO( struct net_device *dev )
3701 if (p_first_ccw == NULL) { 3701 if (p_first_ccw == NULL) {
3702#ifdef FUNCTRACE 3702#ifdef FUNCTRACE
3703 printk(KERN_INFO "%s:%s Exit on line %d\n", 3703 printk(KERN_INFO "%s:%s Exit on line %d\n",
3704 dev->name,__FUNCTION__,__LINE__); 3704 dev->name,__func__,__LINE__);
3705#endif 3705#endif
3706 return; 3706 return;
3707 } 3707 }
3708 if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) { 3708 if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
3709 parm = (unsigned long) p_ch; 3709 parm = (unsigned long) p_ch;
3710#ifdef DEBUGMSG 3710#ifdef DEBUGMSG
3711 printk(KERN_INFO "%s:%s do_io \n" ,dev->name,__FUNCTION__); 3711 printk(KERN_INFO "%s:%s do_io \n" ,dev->name,__func__);
3712 dumpit((char *)p_first_ccw, sizeof(struct ccwbk)); 3712 dumpit((char *)p_first_ccw, sizeof(struct ccwbk));
3713#endif 3713#endif
3714 CLAW_DBF_TEXT(2,trace,"StWrtIO"); 3714 CLAW_DBF_TEXT(2,trace,"StWrtIO");
@@ -3721,7 +3721,7 @@ claw_strt_out_IO( struct net_device *dev )
3721 dev->trans_start = jiffies; 3721 dev->trans_start = jiffies;
3722#ifdef FUNCTRACE 3722#ifdef FUNCTRACE
3723 printk(KERN_INFO "%s:%s Exit on line %d\n", 3723 printk(KERN_INFO "%s:%s Exit on line %d\n",
3724 dev->name,__FUNCTION__,__LINE__); 3724 dev->name,__func__,__LINE__);
3725#endif 3725#endif
3726 3726
3727 return; 3727 return;
@@ -3745,7 +3745,7 @@ claw_free_wrt_buf( struct net_device *dev )
3745 struct ccwbk*p_buf; 3745 struct ccwbk*p_buf;
3746#endif 3746#endif
3747#ifdef FUNCTRACE 3747#ifdef FUNCTRACE
3748 printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__); 3748 printk(KERN_INFO "%s:%s Enter\n",dev->name,__func__);
3749 printk(KERN_INFO "%s: free count = %d variable dev =\n", 3749 printk(KERN_INFO "%s: free count = %d variable dev =\n",
3750 dev->name,privptr->write_free_count); 3750 dev->name,privptr->write_free_count);
3751#endif 3751#endif
@@ -3798,7 +3798,7 @@ claw_free_wrt_buf( struct net_device *dev )
3798 privptr->p_write_active_last=NULL; 3798 privptr->p_write_active_last=NULL;
3799#ifdef DEBUGMSG 3799#ifdef DEBUGMSG
3800 printk(KERN_INFO "%s:%s p_write_" 3800 printk(KERN_INFO "%s:%s p_write_"
3801 "active_first==NULL\n",dev->name,__FUNCTION__); 3801 "active_first==NULL\n",dev->name,__func__);
3802#endif 3802#endif
3803 } 3803 }
3804#ifdef IOTRACE 3804#ifdef IOTRACE
@@ -3819,7 +3819,7 @@ claw_free_wrt_buf( struct net_device *dev )
3819 CLAW_DBF_TEXT_(4,trace,"FWC=%d",privptr->write_free_count); 3819 CLAW_DBF_TEXT_(4,trace,"FWC=%d",privptr->write_free_count);
3820#ifdef FUNCTRACE 3820#ifdef FUNCTRACE
3821 printk(KERN_INFO "%s:%s Exit on line %d free_count =%d\n", 3821 printk(KERN_INFO "%s:%s Exit on line %d free_count =%d\n",
3822 dev->name,__FUNCTION__, __LINE__,privptr->write_free_count); 3822 dev->name,__func__, __LINE__,privptr->write_free_count);
3823#endif 3823#endif
3824 return; 3824 return;
3825} 3825}
@@ -3833,7 +3833,7 @@ claw_free_netdevice(struct net_device * dev, int free_dev)
3833{ 3833{
3834 struct claw_privbk *privptr; 3834 struct claw_privbk *privptr;
3835#ifdef FUNCTRACE 3835#ifdef FUNCTRACE
3836 printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__); 3836 printk(KERN_INFO "%s:%s Enter\n",dev->name,__func__);
3837#endif 3837#endif
3838 CLAW_DBF_TEXT(2,setup,"free_dev"); 3838 CLAW_DBF_TEXT(2,setup,"free_dev");
3839 3839
@@ -3854,7 +3854,7 @@ claw_free_netdevice(struct net_device * dev, int free_dev)
3854#endif 3854#endif
3855 CLAW_DBF_TEXT(2,setup,"feee_ok"); 3855 CLAW_DBF_TEXT(2,setup,"feee_ok");
3856#ifdef FUNCTRACE 3856#ifdef FUNCTRACE
3857 printk(KERN_INFO "%s:%s Exit\n",dev->name,__FUNCTION__); 3857 printk(KERN_INFO "%s:%s Exit\n",dev->name,__func__);
3858#endif 3858#endif
3859} 3859}
3860 3860
@@ -3867,13 +3867,13 @@ static void
3867claw_init_netdevice(struct net_device * dev) 3867claw_init_netdevice(struct net_device * dev)
3868{ 3868{
3869#ifdef FUNCTRACE 3869#ifdef FUNCTRACE
3870 printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__); 3870 printk(KERN_INFO "%s:%s Enter\n",dev->name,__func__);
3871#endif 3871#endif
3872 CLAW_DBF_TEXT(2,setup,"init_dev"); 3872 CLAW_DBF_TEXT(2,setup,"init_dev");
3873 CLAW_DBF_TEXT_(2,setup,"%s",dev->name); 3873 CLAW_DBF_TEXT_(2,setup,"%s",dev->name);
3874 if (!dev) { 3874 if (!dev) {
3875 printk(KERN_WARNING "claw:%s BAD Device exit line %d\n", 3875 printk(KERN_WARNING "claw:%s BAD Device exit line %d\n",
3876 __FUNCTION__,__LINE__); 3876 __func__,__LINE__);
3877 CLAW_DBF_TEXT(2,setup,"baddev"); 3877 CLAW_DBF_TEXT(2,setup,"baddev");
3878 return; 3878 return;
3879 } 3879 }
@@ -3889,7 +3889,7 @@ claw_init_netdevice(struct net_device * dev)
3889 dev->tx_queue_len = 1300; 3889 dev->tx_queue_len = 1300;
3890 dev->flags = IFF_POINTOPOINT | IFF_NOARP; 3890 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
3891#ifdef FUNCTRACE 3891#ifdef FUNCTRACE
3892 printk(KERN_INFO "%s:%s Exit\n",dev->name,__FUNCTION__); 3892 printk(KERN_INFO "%s:%s Exit\n",dev->name,__func__);
3893#endif 3893#endif
3894 CLAW_DBF_TEXT(2,setup,"initok"); 3894 CLAW_DBF_TEXT(2,setup,"initok");
3895 return; 3895 return;
@@ -3909,7 +3909,7 @@ add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr)
3909 struct ccw_dev_id dev_id; 3909 struct ccw_dev_id dev_id;
3910 3910
3911#ifdef FUNCTRACE 3911#ifdef FUNCTRACE
3912 printk(KERN_INFO "%s:%s Enter\n",cdev->dev.bus_id,__FUNCTION__); 3912 printk(KERN_INFO "%s:%s Enter\n",cdev->dev.bus_id,__func__);
3913#endif 3913#endif
3914 CLAW_DBF_TEXT_(2,setup,"%s",cdev->dev.bus_id); 3914 CLAW_DBF_TEXT_(2,setup,"%s",cdev->dev.bus_id);
3915 privptr->channel[i].flag = i+1; /* Read is 1 Write is 2 */ 3915 privptr->channel[i].flag = i+1; /* Read is 1 Write is 2 */
@@ -3920,16 +3920,16 @@ add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr)
3920 p_ch->devno = dev_id.devno; 3920 p_ch->devno = dev_id.devno;
3921 if ((p_ch->irb = kzalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) { 3921 if ((p_ch->irb = kzalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) {
3922 printk(KERN_WARNING "%s Out of memory in %s for irb\n", 3922 printk(KERN_WARNING "%s Out of memory in %s for irb\n",
3923 p_ch->id,__FUNCTION__); 3923 p_ch->id,__func__);
3924#ifdef FUNCTRACE 3924#ifdef FUNCTRACE
3925 printk(KERN_INFO "%s:%s Exit on line %d\n", 3925 printk(KERN_INFO "%s:%s Exit on line %d\n",
3926 p_ch->id,__FUNCTION__,__LINE__); 3926 p_ch->id,__func__,__LINE__);
3927#endif 3927#endif
3928 return -ENOMEM; 3928 return -ENOMEM;
3929 } 3929 }
3930#ifdef FUNCTRACE 3930#ifdef FUNCTRACE
3931 printk(KERN_INFO "%s:%s Exit on line %d\n", 3931 printk(KERN_INFO "%s:%s Exit on line %d\n",
3932 cdev->dev.bus_id,__FUNCTION__,__LINE__); 3932 cdev->dev.bus_id,__func__,__LINE__);
3933#endif 3933#endif
3934 return 0; 3934 return 0;
3935} 3935}
@@ -3952,7 +3952,7 @@ claw_new_device(struct ccwgroup_device *cgdev)
3952 int ret; 3952 int ret;
3953 struct ccw_dev_id dev_id; 3953 struct ccw_dev_id dev_id;
3954 3954
3955 pr_debug("%s() called\n", __FUNCTION__); 3955 pr_debug("%s() called\n", __func__);
3956 printk(KERN_INFO "claw: add for %s\n",cgdev->cdev[READ]->dev.bus_id); 3956 printk(KERN_INFO "claw: add for %s\n",cgdev->cdev[READ]->dev.bus_id);
3957 CLAW_DBF_TEXT(2,setup,"new_dev"); 3957 CLAW_DBF_TEXT(2,setup,"new_dev");
3958 privptr = cgdev->dev.driver_data; 3958 privptr = cgdev->dev.driver_data;
@@ -3990,7 +3990,7 @@ claw_new_device(struct ccwgroup_device *cgdev)
3990 } 3990 }
3991 dev = alloc_netdev(0,"claw%d",claw_init_netdevice); 3991 dev = alloc_netdev(0,"claw%d",claw_init_netdevice);
3992 if (!dev) { 3992 if (!dev) {
3993 printk(KERN_WARNING "%s:alloc_netdev failed\n",__FUNCTION__); 3993 printk(KERN_WARNING "%s:alloc_netdev failed\n",__func__);
3994 goto out; 3994 goto out;
3995 } 3995 }
3996 dev->priv = privptr; 3996 dev->priv = privptr;
@@ -4065,7 +4065,7 @@ claw_shutdown_device(struct ccwgroup_device *cgdev)
4065 struct net_device *ndev; 4065 struct net_device *ndev;
4066 int ret; 4066 int ret;
4067 4067
4068 pr_debug("%s() called\n", __FUNCTION__); 4068 pr_debug("%s() called\n", __func__);
4069 CLAW_DBF_TEXT_(2,setup,"%s",cgdev->dev.bus_id); 4069 CLAW_DBF_TEXT_(2,setup,"%s",cgdev->dev.bus_id);
4070 priv = cgdev->dev.driver_data; 4070 priv = cgdev->dev.driver_data;
4071 if (!priv) 4071 if (!priv)
@@ -4095,15 +4095,15 @@ claw_remove_device(struct ccwgroup_device *cgdev)
4095{ 4095{
4096 struct claw_privbk *priv; 4096 struct claw_privbk *priv;
4097 4097
4098 pr_debug("%s() called\n", __FUNCTION__); 4098 pr_debug("%s() called\n", __func__);
4099 CLAW_DBF_TEXT_(2,setup,"%s",cgdev->dev.bus_id); 4099 CLAW_DBF_TEXT_(2,setup,"%s",cgdev->dev.bus_id);
4100 priv = cgdev->dev.driver_data; 4100 priv = cgdev->dev.driver_data;
4101 if (!priv) { 4101 if (!priv) {
4102 printk(KERN_WARNING "claw: %s() no Priv exiting\n",__FUNCTION__); 4102 printk(KERN_WARNING "claw: %s() no Priv exiting\n",__func__);
4103 return; 4103 return;
4104 } 4104 }
4105 printk(KERN_INFO "claw: %s() called %s will be removed.\n", 4105 printk(KERN_INFO "claw: %s() called %s will be removed.\n",
4106 __FUNCTION__,cgdev->cdev[0]->dev.bus_id); 4106 __func__,cgdev->cdev[0]->dev.bus_id);
4107 if (cgdev->state == CCWGROUP_ONLINE) 4107 if (cgdev->state == CCWGROUP_ONLINE)
4108 claw_shutdown_device(cgdev); 4108 claw_shutdown_device(cgdev);
4109 claw_remove_files(&cgdev->dev); 4109 claw_remove_files(&cgdev->dev);
@@ -4346,7 +4346,7 @@ static struct attribute_group claw_attr_group = {
4346static int 4346static int
4347claw_add_files(struct device *dev) 4347claw_add_files(struct device *dev)
4348{ 4348{
4349 pr_debug("%s() called\n", __FUNCTION__); 4349 pr_debug("%s() called\n", __func__);
4350 CLAW_DBF_TEXT(2,setup,"add_file"); 4350 CLAW_DBF_TEXT(2,setup,"add_file");
4351 return sysfs_create_group(&dev->kobj, &claw_attr_group); 4351 return sysfs_create_group(&dev->kobj, &claw_attr_group);
4352} 4352}
@@ -4354,7 +4354,7 @@ claw_add_files(struct device *dev)
4354static void 4354static void
4355claw_remove_files(struct device *dev) 4355claw_remove_files(struct device *dev)
4356{ 4356{
4357 pr_debug("%s() called\n", __FUNCTION__); 4357 pr_debug("%s() called\n", __func__);
4358 CLAW_DBF_TEXT(2,setup,"rem_file"); 4358 CLAW_DBF_TEXT(2,setup,"rem_file");
4359 sysfs_remove_group(&dev->kobj, &claw_attr_group); 4359 sysfs_remove_group(&dev->kobj, &claw_attr_group);
4360} 4360}
@@ -4385,12 +4385,12 @@ claw_init(void)
4385 printk(KERN_INFO "claw: starting driver\n"); 4385 printk(KERN_INFO "claw: starting driver\n");
4386 4386
4387#ifdef FUNCTRACE 4387#ifdef FUNCTRACE
4388 printk(KERN_INFO "claw: %s() enter \n",__FUNCTION__); 4388 printk(KERN_INFO "claw: %s() enter \n",__func__);
4389#endif 4389#endif
4390 ret = claw_register_debug_facility(); 4390 ret = claw_register_debug_facility();
4391 if (ret) { 4391 if (ret) {
4392 printk(KERN_WARNING "claw: %s() debug_register failed %d\n", 4392 printk(KERN_WARNING "claw: %s() debug_register failed %d\n",
4393 __FUNCTION__,ret); 4393 __func__,ret);
4394 return ret; 4394 return ret;
4395 } 4395 }
4396 CLAW_DBF_TEXT(2,setup,"init_mod"); 4396 CLAW_DBF_TEXT(2,setup,"init_mod");
@@ -4398,10 +4398,10 @@ claw_init(void)
4398 if (ret) { 4398 if (ret) {
4399 claw_unregister_debug_facility(); 4399 claw_unregister_debug_facility();
4400 printk(KERN_WARNING "claw; %s() cu3088 register failed %d\n", 4400 printk(KERN_WARNING "claw; %s() cu3088 register failed %d\n",
4401 __FUNCTION__,ret); 4401 __func__,ret);
4402 } 4402 }
4403#ifdef FUNCTRACE 4403#ifdef FUNCTRACE
4404 printk(KERN_INFO "claw: %s() exit \n",__FUNCTION__); 4404 printk(KERN_INFO "claw: %s() exit \n",__func__);
4405#endif 4405#endif
4406 return ret; 4406 return ret;
4407} 4407}
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 874a19994489..8f876f6ab367 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -670,7 +670,7 @@ static void conn_action_rx(fsm_instance *fi, int event, void *arg)
670 struct netiucv_priv *privptr = netdev_priv(conn->netdev); 670 struct netiucv_priv *privptr = netdev_priv(conn->netdev);
671 int rc; 671 int rc;
672 672
673 IUCV_DBF_TEXT(trace, 4, __FUNCTION__); 673 IUCV_DBF_TEXT(trace, 4, __func__);
674 674
675 if (!conn->netdev) { 675 if (!conn->netdev) {
676 iucv_message_reject(conn->path, msg); 676 iucv_message_reject(conn->path, msg);
@@ -718,7 +718,7 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
718 struct ll_header header; 718 struct ll_header header;
719 int rc; 719 int rc;
720 720
721 IUCV_DBF_TEXT(trace, 4, __FUNCTION__); 721 IUCV_DBF_TEXT(trace, 4, __func__);
722 722
723 if (conn && conn->netdev) 723 if (conn && conn->netdev)
724 privptr = netdev_priv(conn->netdev); 724 privptr = netdev_priv(conn->netdev);
@@ -799,7 +799,7 @@ static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
799 struct netiucv_priv *privptr = netdev_priv(netdev); 799 struct netiucv_priv *privptr = netdev_priv(netdev);
800 int rc; 800 int rc;
801 801
802 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 802 IUCV_DBF_TEXT(trace, 3, __func__);
803 803
804 conn->path = path; 804 conn->path = path;
805 path->msglim = NETIUCV_QUEUELEN_DEFAULT; 805 path->msglim = NETIUCV_QUEUELEN_DEFAULT;
@@ -821,7 +821,7 @@ static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
821 struct iucv_event *ev = arg; 821 struct iucv_event *ev = arg;
822 struct iucv_path *path = ev->data; 822 struct iucv_path *path = ev->data;
823 823
824 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 824 IUCV_DBF_TEXT(trace, 3, __func__);
825 iucv_path_sever(path, NULL); 825 iucv_path_sever(path, NULL);
826} 826}
827 827
@@ -831,7 +831,7 @@ static void conn_action_connack(fsm_instance *fi, int event, void *arg)
831 struct net_device *netdev = conn->netdev; 831 struct net_device *netdev = conn->netdev;
832 struct netiucv_priv *privptr = netdev_priv(netdev); 832 struct netiucv_priv *privptr = netdev_priv(netdev);
833 833
834 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 834 IUCV_DBF_TEXT(trace, 3, __func__);
835 fsm_deltimer(&conn->timer); 835 fsm_deltimer(&conn->timer);
836 fsm_newstate(fi, CONN_STATE_IDLE); 836 fsm_newstate(fi, CONN_STATE_IDLE);
837 netdev->tx_queue_len = conn->path->msglim; 837 netdev->tx_queue_len = conn->path->msglim;
@@ -842,7 +842,7 @@ static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
842{ 842{
843 struct iucv_connection *conn = arg; 843 struct iucv_connection *conn = arg;
844 844
845 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 845 IUCV_DBF_TEXT(trace, 3, __func__);
846 fsm_deltimer(&conn->timer); 846 fsm_deltimer(&conn->timer);
847 iucv_path_sever(conn->path, NULL); 847 iucv_path_sever(conn->path, NULL);
848 fsm_newstate(fi, CONN_STATE_STARTWAIT); 848 fsm_newstate(fi, CONN_STATE_STARTWAIT);
@@ -854,7 +854,7 @@ static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
854 struct net_device *netdev = conn->netdev; 854 struct net_device *netdev = conn->netdev;
855 struct netiucv_priv *privptr = netdev_priv(netdev); 855 struct netiucv_priv *privptr = netdev_priv(netdev);
856 856
857 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 857 IUCV_DBF_TEXT(trace, 3, __func__);
858 858
859 fsm_deltimer(&conn->timer); 859 fsm_deltimer(&conn->timer);
860 iucv_path_sever(conn->path, NULL); 860 iucv_path_sever(conn->path, NULL);
@@ -870,7 +870,7 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg)
870 struct iucv_connection *conn = arg; 870 struct iucv_connection *conn = arg;
871 int rc; 871 int rc;
872 872
873 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 873 IUCV_DBF_TEXT(trace, 3, __func__);
874 874
875 fsm_newstate(fi, CONN_STATE_STARTWAIT); 875 fsm_newstate(fi, CONN_STATE_STARTWAIT);
876 PRINT_DEBUG("%s('%s'): connecting ...\n", 876 PRINT_DEBUG("%s('%s'): connecting ...\n",
@@ -948,7 +948,7 @@ static void conn_action_stop(fsm_instance *fi, int event, void *arg)
948 struct net_device *netdev = conn->netdev; 948 struct net_device *netdev = conn->netdev;
949 struct netiucv_priv *privptr = netdev_priv(netdev); 949 struct netiucv_priv *privptr = netdev_priv(netdev);
950 950
951 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 951 IUCV_DBF_TEXT(trace, 3, __func__);
952 952
953 fsm_deltimer(&conn->timer); 953 fsm_deltimer(&conn->timer);
954 fsm_newstate(fi, CONN_STATE_STOPPED); 954 fsm_newstate(fi, CONN_STATE_STOPPED);
@@ -1024,7 +1024,7 @@ static void dev_action_start(fsm_instance *fi, int event, void *arg)
1024 struct net_device *dev = arg; 1024 struct net_device *dev = arg;
1025 struct netiucv_priv *privptr = netdev_priv(dev); 1025 struct netiucv_priv *privptr = netdev_priv(dev);
1026 1026
1027 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1027 IUCV_DBF_TEXT(trace, 3, __func__);
1028 1028
1029 fsm_newstate(fi, DEV_STATE_STARTWAIT); 1029 fsm_newstate(fi, DEV_STATE_STARTWAIT);
1030 fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn); 1030 fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
@@ -1044,7 +1044,7 @@ dev_action_stop(fsm_instance *fi, int event, void *arg)
1044 struct netiucv_priv *privptr = netdev_priv(dev); 1044 struct netiucv_priv *privptr = netdev_priv(dev);
1045 struct iucv_event ev; 1045 struct iucv_event ev;
1046 1046
1047 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1047 IUCV_DBF_TEXT(trace, 3, __func__);
1048 1048
1049 ev.conn = privptr->conn; 1049 ev.conn = privptr->conn;
1050 1050
@@ -1066,7 +1066,7 @@ dev_action_connup(fsm_instance *fi, int event, void *arg)
1066 struct net_device *dev = arg; 1066 struct net_device *dev = arg;
1067 struct netiucv_priv *privptr = netdev_priv(dev); 1067 struct netiucv_priv *privptr = netdev_priv(dev);
1068 1068
1069 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1069 IUCV_DBF_TEXT(trace, 3, __func__);
1070 1070
1071 switch (fsm_getstate(fi)) { 1071 switch (fsm_getstate(fi)) {
1072 case DEV_STATE_STARTWAIT: 1072 case DEV_STATE_STARTWAIT:
@@ -1097,7 +1097,7 @@ dev_action_connup(fsm_instance *fi, int event, void *arg)
1097static void 1097static void
1098dev_action_conndown(fsm_instance *fi, int event, void *arg) 1098dev_action_conndown(fsm_instance *fi, int event, void *arg)
1099{ 1099{
1100 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1100 IUCV_DBF_TEXT(trace, 3, __func__);
1101 1101
1102 switch (fsm_getstate(fi)) { 1102 switch (fsm_getstate(fi)) {
1103 case DEV_STATE_RUNNING: 1103 case DEV_STATE_RUNNING:
@@ -1288,7 +1288,7 @@ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1288 struct netiucv_priv *privptr = netdev_priv(dev); 1288 struct netiucv_priv *privptr = netdev_priv(dev);
1289 int rc; 1289 int rc;
1290 1290
1291 IUCV_DBF_TEXT(trace, 4, __FUNCTION__); 1291 IUCV_DBF_TEXT(trace, 4, __func__);
1292 /** 1292 /**
1293 * Some sanity checks ... 1293 * Some sanity checks ...
1294 */ 1294 */
@@ -1344,7 +1344,7 @@ static struct net_device_stats *netiucv_stats (struct net_device * dev)
1344{ 1344{
1345 struct netiucv_priv *priv = netdev_priv(dev); 1345 struct netiucv_priv *priv = netdev_priv(dev);
1346 1346
1347 IUCV_DBF_TEXT(trace, 5, __FUNCTION__); 1347 IUCV_DBF_TEXT(trace, 5, __func__);
1348 return &priv->stats; 1348 return &priv->stats;
1349} 1349}
1350 1350
@@ -1360,7 +1360,7 @@ static struct net_device_stats *netiucv_stats (struct net_device * dev)
1360 */ 1360 */
1361static int netiucv_change_mtu(struct net_device * dev, int new_mtu) 1361static int netiucv_change_mtu(struct net_device * dev, int new_mtu)
1362{ 1362{
1363 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1363 IUCV_DBF_TEXT(trace, 3, __func__);
1364 if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) { 1364 if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) {
1365 IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n"); 1365 IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
1366 return -EINVAL; 1366 return -EINVAL;
@@ -1378,7 +1378,7 @@ static ssize_t user_show(struct device *dev, struct device_attribute *attr,
1378{ 1378{
1379 struct netiucv_priv *priv = dev->driver_data; 1379 struct netiucv_priv *priv = dev->driver_data;
1380 1380
1381 IUCV_DBF_TEXT(trace, 5, __FUNCTION__); 1381 IUCV_DBF_TEXT(trace, 5, __func__);
1382 return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid)); 1382 return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
1383} 1383}
1384 1384
@@ -1393,7 +1393,7 @@ static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1393 int i; 1393 int i;
1394 struct iucv_connection *cp; 1394 struct iucv_connection *cp;
1395 1395
1396 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1396 IUCV_DBF_TEXT(trace, 3, __func__);
1397 if (count > 9) { 1397 if (count > 9) {
1398 PRINT_WARN("netiucv: username too long (%d)!\n", (int) count); 1398 PRINT_WARN("netiucv: username too long (%d)!\n", (int) count);
1399 IUCV_DBF_TEXT_(setup, 2, 1399 IUCV_DBF_TEXT_(setup, 2,
@@ -1449,7 +1449,7 @@ static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
1449 char *buf) 1449 char *buf)
1450{ struct netiucv_priv *priv = dev->driver_data; 1450{ struct netiucv_priv *priv = dev->driver_data;
1451 1451
1452 IUCV_DBF_TEXT(trace, 5, __FUNCTION__); 1452 IUCV_DBF_TEXT(trace, 5, __func__);
1453 return sprintf(buf, "%d\n", priv->conn->max_buffsize); 1453 return sprintf(buf, "%d\n", priv->conn->max_buffsize);
1454} 1454}
1455 1455
@@ -1461,7 +1461,7 @@ static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1461 char *e; 1461 char *e;
1462 int bs1; 1462 int bs1;
1463 1463
1464 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1464 IUCV_DBF_TEXT(trace, 3, __func__);
1465 if (count >= 39) 1465 if (count >= 39)
1466 return -EINVAL; 1466 return -EINVAL;
1467 1467
@@ -1513,7 +1513,7 @@ static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
1513{ 1513{
1514 struct netiucv_priv *priv = dev->driver_data; 1514 struct netiucv_priv *priv = dev->driver_data;
1515 1515
1516 IUCV_DBF_TEXT(trace, 5, __FUNCTION__); 1516 IUCV_DBF_TEXT(trace, 5, __func__);
1517 return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm)); 1517 return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
1518} 1518}
1519 1519
@@ -1524,7 +1524,7 @@ static ssize_t conn_fsm_show (struct device *dev,
1524{ 1524{
1525 struct netiucv_priv *priv = dev->driver_data; 1525 struct netiucv_priv *priv = dev->driver_data;
1526 1526
1527 IUCV_DBF_TEXT(trace, 5, __FUNCTION__); 1527 IUCV_DBF_TEXT(trace, 5, __func__);
1528 return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm)); 1528 return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
1529} 1529}
1530 1530
@@ -1535,7 +1535,7 @@ static ssize_t maxmulti_show (struct device *dev,
1535{ 1535{
1536 struct netiucv_priv *priv = dev->driver_data; 1536 struct netiucv_priv *priv = dev->driver_data;
1537 1537
1538 IUCV_DBF_TEXT(trace, 5, __FUNCTION__); 1538 IUCV_DBF_TEXT(trace, 5, __func__);
1539 return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti); 1539 return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
1540} 1540}
1541 1541
@@ -1545,7 +1545,7 @@ static ssize_t maxmulti_write (struct device *dev,
1545{ 1545{
1546 struct netiucv_priv *priv = dev->driver_data; 1546 struct netiucv_priv *priv = dev->driver_data;
1547 1547
1548 IUCV_DBF_TEXT(trace, 4, __FUNCTION__); 1548 IUCV_DBF_TEXT(trace, 4, __func__);
1549 priv->conn->prof.maxmulti = 0; 1549 priv->conn->prof.maxmulti = 0;
1550 return count; 1550 return count;
1551} 1551}
@@ -1557,7 +1557,7 @@ static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
1557{ 1557{
1558 struct netiucv_priv *priv = dev->driver_data; 1558 struct netiucv_priv *priv = dev->driver_data;
1559 1559
1560 IUCV_DBF_TEXT(trace, 5, __FUNCTION__); 1560 IUCV_DBF_TEXT(trace, 5, __func__);
1561 return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue); 1561 return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
1562} 1562}
1563 1563
@@ -1566,7 +1566,7 @@ static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
1566{ 1566{
1567 struct netiucv_priv *priv = dev->driver_data; 1567 struct netiucv_priv *priv = dev->driver_data;
1568 1568
1569 IUCV_DBF_TEXT(trace, 4, __FUNCTION__); 1569 IUCV_DBF_TEXT(trace, 4, __func__);
1570 priv->conn->prof.maxcqueue = 0; 1570 priv->conn->prof.maxcqueue = 0;
1571 return count; 1571 return count;
1572} 1572}
@@ -1578,7 +1578,7 @@ static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
1578{ 1578{
1579 struct netiucv_priv *priv = dev->driver_data; 1579 struct netiucv_priv *priv = dev->driver_data;
1580 1580
1581 IUCV_DBF_TEXT(trace, 5, __FUNCTION__); 1581 IUCV_DBF_TEXT(trace, 5, __func__);
1582 return sprintf(buf, "%ld\n", priv->conn->prof.doios_single); 1582 return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
1583} 1583}
1584 1584
@@ -1587,7 +1587,7 @@ static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
1587{ 1587{
1588 struct netiucv_priv *priv = dev->driver_data; 1588 struct netiucv_priv *priv = dev->driver_data;
1589 1589
1590 IUCV_DBF_TEXT(trace, 4, __FUNCTION__); 1590 IUCV_DBF_TEXT(trace, 4, __func__);
1591 priv->conn->prof.doios_single = 0; 1591 priv->conn->prof.doios_single = 0;
1592 return count; 1592 return count;
1593} 1593}
@@ -1599,7 +1599,7 @@ static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
1599{ 1599{
1600 struct netiucv_priv *priv = dev->driver_data; 1600 struct netiucv_priv *priv = dev->driver_data;
1601 1601
1602 IUCV_DBF_TEXT(trace, 5, __FUNCTION__); 1602 IUCV_DBF_TEXT(trace, 5, __func__);
1603 return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi); 1603 return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
1604} 1604}
1605 1605
@@ -1608,7 +1608,7 @@ static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
1608{ 1608{
1609 struct netiucv_priv *priv = dev->driver_data; 1609 struct netiucv_priv *priv = dev->driver_data;
1610 1610
1611 IUCV_DBF_TEXT(trace, 5, __FUNCTION__); 1611 IUCV_DBF_TEXT(trace, 5, __func__);
1612 priv->conn->prof.doios_multi = 0; 1612 priv->conn->prof.doios_multi = 0;
1613 return count; 1613 return count;
1614} 1614}
@@ -1620,7 +1620,7 @@ static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
1620{ 1620{
1621 struct netiucv_priv *priv = dev->driver_data; 1621 struct netiucv_priv *priv = dev->driver_data;
1622 1622
1623 IUCV_DBF_TEXT(trace, 5, __FUNCTION__); 1623 IUCV_DBF_TEXT(trace, 5, __func__);
1624 return sprintf(buf, "%ld\n", priv->conn->prof.txlen); 1624 return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
1625} 1625}
1626 1626
@@ -1629,7 +1629,7 @@ static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
1629{ 1629{
1630 struct netiucv_priv *priv = dev->driver_data; 1630 struct netiucv_priv *priv = dev->driver_data;
1631 1631
1632 IUCV_DBF_TEXT(trace, 4, __FUNCTION__); 1632 IUCV_DBF_TEXT(trace, 4, __func__);
1633 priv->conn->prof.txlen = 0; 1633 priv->conn->prof.txlen = 0;
1634 return count; 1634 return count;
1635} 1635}
@@ -1641,7 +1641,7 @@ static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
1641{ 1641{
1642 struct netiucv_priv *priv = dev->driver_data; 1642 struct netiucv_priv *priv = dev->driver_data;
1643 1643
1644 IUCV_DBF_TEXT(trace, 5, __FUNCTION__); 1644 IUCV_DBF_TEXT(trace, 5, __func__);
1645 return sprintf(buf, "%ld\n", priv->conn->prof.tx_time); 1645 return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
1646} 1646}
1647 1647
@@ -1650,7 +1650,7 @@ static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
1650{ 1650{
1651 struct netiucv_priv *priv = dev->driver_data; 1651 struct netiucv_priv *priv = dev->driver_data;
1652 1652
1653 IUCV_DBF_TEXT(trace, 4, __FUNCTION__); 1653 IUCV_DBF_TEXT(trace, 4, __func__);
1654 priv->conn->prof.tx_time = 0; 1654 priv->conn->prof.tx_time = 0;
1655 return count; 1655 return count;
1656} 1656}
@@ -1662,7 +1662,7 @@ static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
1662{ 1662{
1663 struct netiucv_priv *priv = dev->driver_data; 1663 struct netiucv_priv *priv = dev->driver_data;
1664 1664
1665 IUCV_DBF_TEXT(trace, 5, __FUNCTION__); 1665 IUCV_DBF_TEXT(trace, 5, __func__);
1666 return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending); 1666 return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
1667} 1667}
1668 1668
@@ -1671,7 +1671,7 @@ static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
1671{ 1671{
1672 struct netiucv_priv *priv = dev->driver_data; 1672 struct netiucv_priv *priv = dev->driver_data;
1673 1673
1674 IUCV_DBF_TEXT(trace, 4, __FUNCTION__); 1674 IUCV_DBF_TEXT(trace, 4, __func__);
1675 priv->conn->prof.tx_pending = 0; 1675 priv->conn->prof.tx_pending = 0;
1676 return count; 1676 return count;
1677} 1677}
@@ -1683,7 +1683,7 @@ static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
1683{ 1683{
1684 struct netiucv_priv *priv = dev->driver_data; 1684 struct netiucv_priv *priv = dev->driver_data;
1685 1685
1686 IUCV_DBF_TEXT(trace, 5, __FUNCTION__); 1686 IUCV_DBF_TEXT(trace, 5, __func__);
1687 return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending); 1687 return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
1688} 1688}
1689 1689
@@ -1692,7 +1692,7 @@ static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
1692{ 1692{
1693 struct netiucv_priv *priv = dev->driver_data; 1693 struct netiucv_priv *priv = dev->driver_data;
1694 1694
1695 IUCV_DBF_TEXT(trace, 4, __FUNCTION__); 1695 IUCV_DBF_TEXT(trace, 4, __func__);
1696 priv->conn->prof.tx_max_pending = 0; 1696 priv->conn->prof.tx_max_pending = 0;
1697 return count; 1697 return count;
1698} 1698}
@@ -1732,7 +1732,7 @@ static int netiucv_add_files(struct device *dev)
1732{ 1732{
1733 int ret; 1733 int ret;
1734 1734
1735 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1735 IUCV_DBF_TEXT(trace, 3, __func__);
1736 ret = sysfs_create_group(&dev->kobj, &netiucv_attr_group); 1736 ret = sysfs_create_group(&dev->kobj, &netiucv_attr_group);
1737 if (ret) 1737 if (ret)
1738 return ret; 1738 return ret;
@@ -1744,7 +1744,7 @@ static int netiucv_add_files(struct device *dev)
1744 1744
1745static void netiucv_remove_files(struct device *dev) 1745static void netiucv_remove_files(struct device *dev)
1746{ 1746{
1747 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1747 IUCV_DBF_TEXT(trace, 3, __func__);
1748 sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group); 1748 sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group);
1749 sysfs_remove_group(&dev->kobj, &netiucv_attr_group); 1749 sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
1750} 1750}
@@ -1756,7 +1756,7 @@ static int netiucv_register_device(struct net_device *ndev)
1756 int ret; 1756 int ret;
1757 1757
1758 1758
1759 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1759 IUCV_DBF_TEXT(trace, 3, __func__);
1760 1760
1761 if (dev) { 1761 if (dev) {
1762 snprintf(dev->bus_id, BUS_ID_SIZE, "net%s", ndev->name); 1762 snprintf(dev->bus_id, BUS_ID_SIZE, "net%s", ndev->name);
@@ -1792,7 +1792,7 @@ out_unreg:
1792 1792
1793static void netiucv_unregister_device(struct device *dev) 1793static void netiucv_unregister_device(struct device *dev)
1794{ 1794{
1795 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1795 IUCV_DBF_TEXT(trace, 3, __func__);
1796 netiucv_remove_files(dev); 1796 netiucv_remove_files(dev);
1797 device_unregister(dev); 1797 device_unregister(dev);
1798} 1798}
@@ -1857,7 +1857,7 @@ out:
1857 */ 1857 */
1858static void netiucv_remove_connection(struct iucv_connection *conn) 1858static void netiucv_remove_connection(struct iucv_connection *conn)
1859{ 1859{
1860 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1860 IUCV_DBF_TEXT(trace, 3, __func__);
1861 write_lock_bh(&iucv_connection_rwlock); 1861 write_lock_bh(&iucv_connection_rwlock);
1862 list_del_init(&conn->list); 1862 list_del_init(&conn->list);
1863 write_unlock_bh(&iucv_connection_rwlock); 1863 write_unlock_bh(&iucv_connection_rwlock);
@@ -1881,7 +1881,7 @@ static void netiucv_free_netdevice(struct net_device *dev)
1881{ 1881{
1882 struct netiucv_priv *privptr = netdev_priv(dev); 1882 struct netiucv_priv *privptr = netdev_priv(dev);
1883 1883
1884 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1884 IUCV_DBF_TEXT(trace, 3, __func__);
1885 1885
1886 if (!dev) 1886 if (!dev)
1887 return; 1887 return;
@@ -1963,7 +1963,7 @@ static ssize_t conn_write(struct device_driver *drv,
1963 struct netiucv_priv *priv; 1963 struct netiucv_priv *priv;
1964 struct iucv_connection *cp; 1964 struct iucv_connection *cp;
1965 1965
1966 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1966 IUCV_DBF_TEXT(trace, 3, __func__);
1967 if (count>9) { 1967 if (count>9) {
1968 PRINT_WARN("netiucv: username too long (%d)!\n", (int)count); 1968 PRINT_WARN("netiucv: username too long (%d)!\n", (int)count);
1969 IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n"); 1969 IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
@@ -2048,7 +2048,7 @@ static ssize_t remove_write (struct device_driver *drv,
2048 const char *p; 2048 const char *p;
2049 int i; 2049 int i;
2050 2050
2051 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 2051 IUCV_DBF_TEXT(trace, 3, __func__);
2052 2052
2053 if (count >= IFNAMSIZ) 2053 if (count >= IFNAMSIZ)
2054 count = IFNAMSIZ - 1;; 2054 count = IFNAMSIZ - 1;;
@@ -2116,7 +2116,7 @@ static void __exit netiucv_exit(void)
2116 struct netiucv_priv *priv; 2116 struct netiucv_priv *priv;
2117 struct device *dev; 2117 struct device *dev;
2118 2118
2119 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 2119 IUCV_DBF_TEXT(trace, 3, __func__);
2120 while (!list_empty(&iucv_connection_list)) { 2120 while (!list_empty(&iucv_connection_list)) {
2121 cp = list_entry(iucv_connection_list.next, 2121 cp = list_entry(iucv_connection_list.next,
2122 struct iucv_connection, list); 2122 struct iucv_connection, list);
@@ -2146,8 +2146,7 @@ static int __init netiucv_init(void)
2146 rc = iucv_register(&netiucv_handler, 1); 2146 rc = iucv_register(&netiucv_handler, 1);
2147 if (rc) 2147 if (rc)
2148 goto out_dbf; 2148 goto out_dbf;
2149 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 2149 IUCV_DBF_TEXT(trace, 3, __func__);
2150 netiucv_driver.groups = netiucv_drv_attr_groups;
2151 rc = driver_register(&netiucv_driver); 2150 rc = driver_register(&netiucv_driver);
2152 if (rc) { 2151 if (rc) {
2153 PRINT_ERR("NETIUCV: failed to register driver.\n"); 2152 PRINT_ERR("NETIUCV: failed to register driver.\n");
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
index 644a06eba828..4d4b54277c43 100644
--- a/drivers/s390/s390mach.c
+++ b/drivers/s390/s390mach.c
@@ -59,15 +59,15 @@ repeat:
59 59
60 printk(KERN_WARNING"%s: Code does not support more " 60 printk(KERN_WARNING"%s: Code does not support more "
61 "than two chained crws; please report to " 61 "than two chained crws; please report to "
62 "linux390@de.ibm.com!\n", __FUNCTION__); 62 "linux390@de.ibm.com!\n", __func__);
63 ccode = stcrw(&tmp_crw); 63 ccode = stcrw(&tmp_crw);
64 printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, " 64 printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, "
65 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 65 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
66 __FUNCTION__, tmp_crw.slct, tmp_crw.oflw, 66 __func__, tmp_crw.slct, tmp_crw.oflw,
67 tmp_crw.chn, tmp_crw.rsc, tmp_crw.anc, 67 tmp_crw.chn, tmp_crw.rsc, tmp_crw.anc,
68 tmp_crw.erc, tmp_crw.rsid); 68 tmp_crw.erc, tmp_crw.rsid);
69 printk(KERN_WARNING"%s: This was crw number %x in the " 69 printk(KERN_WARNING"%s: This was crw number %x in the "
70 "chain\n", __FUNCTION__, chain); 70 "chain\n", __func__, chain);
71 if (ccode != 0) 71 if (ccode != 0)
72 break; 72 break;
73 chain = tmp_crw.chn ? chain + 1 : 0; 73 chain = tmp_crw.chn ? chain + 1 : 0;
@@ -83,7 +83,7 @@ repeat:
83 crw[chain].rsid); 83 crw[chain].rsid);
84 /* Check for overflows. */ 84 /* Check for overflows. */
85 if (crw[chain].oflw) { 85 if (crw[chain].oflw) {
86 pr_debug("%s: crw overflow detected!\n", __FUNCTION__); 86 pr_debug("%s: crw overflow detected!\n", __func__);
87 css_schedule_eval_all(); 87 css_schedule_eval_all();
88 chain = 0; 88 chain = 0;
89 continue; 89 continue;
diff --git a/drivers/s390/s390mach.h b/drivers/s390/s390mach.h
index d3ca4281a494..ca681f9b67fc 100644
--- a/drivers/s390/s390mach.h
+++ b/drivers/s390/s390mach.h
@@ -105,4 +105,8 @@ static inline int stcrw(struct crw *pcrw )
105#define ED_ETR_SYNC 12 /* External damage ETR sync check */ 105#define ED_ETR_SYNC 12 /* External damage ETR sync check */
106#define ED_ETR_SWITCH 13 /* External damage ETR switch to local */ 106#define ED_ETR_SWITCH 13 /* External damage ETR switch to local */
107 107
108struct pt_regs;
109
110void s390_handle_mcck(void);
111void s390_do_machine_check(struct pt_regs *regs);
108#endif /* __s390mach */ 112#endif /* __s390mach */
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 9e9f6c1e4e5d..45a7cd98c140 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -539,7 +539,7 @@ struct zfcp_rc_entry {
539 539
540/* logging routine for zfcp */ 540/* logging routine for zfcp */
541#define _ZFCP_LOG(fmt, args...) \ 541#define _ZFCP_LOG(fmt, args...) \
542 printk(KERN_ERR ZFCP_NAME": %s(%d): " fmt, __FUNCTION__, \ 542 printk(KERN_ERR ZFCP_NAME": %s(%d): " fmt, __func__, \
543 __LINE__ , ##args) 543 __LINE__ , ##args)
544 544
545#define ZFCP_LOG(level, fmt, args...) \ 545#define ZFCP_LOG(level, fmt, args...) \
diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c
index 291ff6235fe2..c3e4ab07b9cc 100644
--- a/drivers/s390/sysinfo.c
+++ b/drivers/s390/sysinfo.c
@@ -11,111 +11,13 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/delay.h> 12#include <linux/delay.h>
13#include <asm/ebcdic.h> 13#include <asm/ebcdic.h>
14#include <asm/sysinfo.h>
14 15
15/* Sigh, math-emu. Don't ask. */ 16/* Sigh, math-emu. Don't ask. */
16#include <asm/sfp-util.h> 17#include <asm/sfp-util.h>
17#include <math-emu/soft-fp.h> 18#include <math-emu/soft-fp.h>
18#include <math-emu/single.h> 19#include <math-emu/single.h>
19 20
20struct sysinfo_1_1_1 {
21 char reserved_0[32];
22 char manufacturer[16];
23 char type[4];
24 char reserved_1[12];
25 char model_capacity[16];
26 char sequence[16];
27 char plant[4];
28 char model[16];
29};
30
31struct sysinfo_1_2_1 {
32 char reserved_0[80];
33 char sequence[16];
34 char plant[4];
35 char reserved_1[2];
36 unsigned short cpu_address;
37};
38
39struct sysinfo_1_2_2 {
40 char format;
41 char reserved_0[1];
42 unsigned short acc_offset;
43 char reserved_1[24];
44 unsigned int secondary_capability;
45 unsigned int capability;
46 unsigned short cpus_total;
47 unsigned short cpus_configured;
48 unsigned short cpus_standby;
49 unsigned short cpus_reserved;
50 unsigned short adjustment[0];
51};
52
53struct sysinfo_1_2_2_extension {
54 unsigned int alt_capability;
55 unsigned short alt_adjustment[0];
56};
57
58struct sysinfo_2_2_1 {
59 char reserved_0[80];
60 char sequence[16];
61 char plant[4];
62 unsigned short cpu_id;
63 unsigned short cpu_address;
64};
65
66struct sysinfo_2_2_2 {
67 char reserved_0[32];
68 unsigned short lpar_number;
69 char reserved_1;
70 unsigned char characteristics;
71 unsigned short cpus_total;
72 unsigned short cpus_configured;
73 unsigned short cpus_standby;
74 unsigned short cpus_reserved;
75 char name[8];
76 unsigned int caf;
77 char reserved_2[16];
78 unsigned short cpus_dedicated;
79 unsigned short cpus_shared;
80};
81
82#define LPAR_CHAR_DEDICATED (1 << 7)
83#define LPAR_CHAR_SHARED (1 << 6)
84#define LPAR_CHAR_LIMITED (1 << 5)
85
86struct sysinfo_3_2_2 {
87 char reserved_0[31];
88 unsigned char count;
89 struct {
90 char reserved_0[4];
91 unsigned short cpus_total;
92 unsigned short cpus_configured;
93 unsigned short cpus_standby;
94 unsigned short cpus_reserved;
95 char name[8];
96 unsigned int caf;
97 char cpi[16];
98 char reserved_1[24];
99
100 } vm[8];
101};
102
103static inline int stsi(void *sysinfo, int fc, int sel1, int sel2)
104{
105 register int r0 asm("0") = (fc << 28) | sel1;
106 register int r1 asm("1") = sel2;
107
108 asm volatile(
109 " stsi 0(%2)\n"
110 "0: jz 2f\n"
111 "1: lhi %0,%3\n"
112 "2:\n"
113 EX_TABLE(0b,1b)
114 : "+d" (r0) : "d" (r1), "a" (sysinfo), "K" (-ENOSYS)
115 : "cc", "memory" );
116 return r0;
117}
118
119static inline int stsi_0(void) 21static inline int stsi_0(void)
120{ 22{
121 int rc = stsi (NULL, 0, 0, 0); 23 int rc = stsi (NULL, 0, 0, 0);
@@ -133,6 +35,8 @@ static int stsi_1_1_1(struct sysinfo_1_1_1 *info, char *page, int len)
133 EBCASC(info->sequence, sizeof(info->sequence)); 35 EBCASC(info->sequence, sizeof(info->sequence));
134 EBCASC(info->plant, sizeof(info->plant)); 36 EBCASC(info->plant, sizeof(info->plant));
135 EBCASC(info->model_capacity, sizeof(info->model_capacity)); 37 EBCASC(info->model_capacity, sizeof(info->model_capacity));
38 EBCASC(info->model_perm_cap, sizeof(info->model_perm_cap));
39 EBCASC(info->model_temp_cap, sizeof(info->model_temp_cap));
136 len += sprintf(page + len, "Manufacturer: %-16.16s\n", 40 len += sprintf(page + len, "Manufacturer: %-16.16s\n",
137 info->manufacturer); 41 info->manufacturer);
138 len += sprintf(page + len, "Type: %-4.4s\n", 42 len += sprintf(page + len, "Type: %-4.4s\n",
@@ -155,8 +59,18 @@ static int stsi_1_1_1(struct sysinfo_1_1_1 *info, char *page, int len)
155 info->sequence); 59 info->sequence);
156 len += sprintf(page + len, "Plant: %-4.4s\n", 60 len += sprintf(page + len, "Plant: %-4.4s\n",
157 info->plant); 61 info->plant);
158 len += sprintf(page + len, "Model Capacity: %-16.16s\n", 62 len += sprintf(page + len, "Model Capacity: %-16.16s %08u\n",
159 info->model_capacity); 63 info->model_capacity, *(u32 *) info->model_cap_rating);
64 if (info->model_perm_cap[0] != '\0')
65 len += sprintf(page + len,
66 "Model Perm. Capacity: %-16.16s %08u\n",
67 info->model_perm_cap,
68 *(u32 *) info->model_perm_cap_rating);
69 if (info->model_temp_cap[0] != '\0')
70 len += sprintf(page + len,
71 "Model Temp. Capacity: %-16.16s %08u\n",
72 info->model_temp_cap,
73 *(u32 *) info->model_temp_cap_rating);
160 return len; 74 return len;
161} 75}
162 76
diff --git a/include/asm-alpha/semaphore.h b/include/asm-alpha/semaphore.h
index f1e9278a9fe2..d9b2034ed1d2 100644
--- a/include/asm-alpha/semaphore.h
+++ b/include/asm-alpha/semaphore.h
@@ -1,149 +1 @@
1#ifndef _ALPHA_SEMAPHORE_H #include <linux/semaphore.h>
2#define _ALPHA_SEMAPHORE_H
3
4/*
5 * SMP- and interrupt-safe semaphores..
6 *
7 * (C) Copyright 1996 Linus Torvalds
8 * (C) Copyright 1996, 2000 Richard Henderson
9 */
10
11#include <asm/current.h>
12#include <asm/system.h>
13#include <asm/atomic.h>
14#include <linux/compiler.h>
15#include <linux/wait.h>
16#include <linux/rwsem.h>
17
18struct semaphore {
19 atomic_t count;
20 wait_queue_head_t wait;
21};
22
23#define __SEMAPHORE_INITIALIZER(name, n) \
24{ \
25 .count = ATOMIC_INIT(n), \
26 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
27}
28
29#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
30 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
31
32#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
33
34static inline void sema_init(struct semaphore *sem, int val)
35{
36 /*
37 * Logically,
38 * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
39 * except that gcc produces better initializing by parts yet.
40 */
41
42 atomic_set(&sem->count, val);
43 init_waitqueue_head(&sem->wait);
44}
45
46static inline void init_MUTEX (struct semaphore *sem)
47{
48 sema_init(sem, 1);
49}
50
51static inline void init_MUTEX_LOCKED (struct semaphore *sem)
52{
53 sema_init(sem, 0);
54}
55
56extern void down(struct semaphore *);
57extern void __down_failed(struct semaphore *);
58extern int down_interruptible(struct semaphore *);
59extern int __down_failed_interruptible(struct semaphore *);
60extern int down_trylock(struct semaphore *);
61extern void up(struct semaphore *);
62extern void __up_wakeup(struct semaphore *);
63
64/*
65 * Hidden out of line code is fun, but extremely messy. Rely on newer
66 * compilers to do a respectable job with this. The contention cases
67 * are handled out of line in arch/alpha/kernel/semaphore.c.
68 */
69
70static inline void __down(struct semaphore *sem)
71{
72 long count;
73 might_sleep();
74 count = atomic_dec_return(&sem->count);
75 if (unlikely(count < 0))
76 __down_failed(sem);
77}
78
79static inline int __down_interruptible(struct semaphore *sem)
80{
81 long count;
82 might_sleep();
83 count = atomic_dec_return(&sem->count);
84 if (unlikely(count < 0))
85 return __down_failed_interruptible(sem);
86 return 0;
87}
88
89/*
90 * down_trylock returns 0 on success, 1 if we failed to get the lock.
91 */
92
93static inline int __down_trylock(struct semaphore *sem)
94{
95 long ret;
96
97 /* "Equivalent" C:
98
99 do {
100 ret = ldl_l;
101 --ret;
102 if (ret < 0)
103 break;
104 ret = stl_c = ret;
105 } while (ret == 0);
106 */
107 __asm__ __volatile__(
108 "1: ldl_l %0,%1\n"
109 " subl %0,1,%0\n"
110 " blt %0,2f\n"
111 " stl_c %0,%1\n"
112 " beq %0,3f\n"
113 " mb\n"
114 "2:\n"
115 ".subsection 2\n"
116 "3: br 1b\n"
117 ".previous"
118 : "=&r" (ret), "=m" (sem->count)
119 : "m" (sem->count));
120
121 return ret < 0;
122}
123
124static inline void __up(struct semaphore *sem)
125{
126 if (unlikely(atomic_inc_return(&sem->count) <= 0))
127 __up_wakeup(sem);
128}
129
130#if !defined(CONFIG_DEBUG_SEMAPHORE)
131extern inline void down(struct semaphore *sem)
132{
133 __down(sem);
134}
135extern inline int down_interruptible(struct semaphore *sem)
136{
137 return __down_interruptible(sem);
138}
139extern inline int down_trylock(struct semaphore *sem)
140{
141 return __down_trylock(sem);
142}
143extern inline void up(struct semaphore *sem)
144{
145 __up(sem);
146}
147#endif
148
149#endif
diff --git a/include/asm-arm/semaphore-helper.h b/include/asm-arm/semaphore-helper.h
deleted file mode 100644
index 1d7f1987edb9..000000000000
--- a/include/asm-arm/semaphore-helper.h
+++ /dev/null
@@ -1,84 +0,0 @@
1#ifndef ASMARM_SEMAPHORE_HELPER_H
2#define ASMARM_SEMAPHORE_HELPER_H
3
4/*
5 * These two _must_ execute atomically wrt each other.
6 */
7static inline void wake_one_more(struct semaphore * sem)
8{
9 unsigned long flags;
10
11 spin_lock_irqsave(&semaphore_wake_lock, flags);
12 if (atomic_read(&sem->count) <= 0)
13 sem->waking++;
14 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
15}
16
17static inline int waking_non_zero(struct semaphore *sem)
18{
19 unsigned long flags;
20 int ret = 0;
21
22 spin_lock_irqsave(&semaphore_wake_lock, flags);
23 if (sem->waking > 0) {
24 sem->waking--;
25 ret = 1;
26 }
27 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
28 return ret;
29}
30
31/*
32 * waking non zero interruptible
33 * 1 got the lock
34 * 0 go to sleep
35 * -EINTR interrupted
36 *
37 * We must undo the sem->count down_interruptible() increment while we are
38 * protected by the spinlock in order to make this atomic_inc() with the
39 * atomic_read() in wake_one_more(), otherwise we can race. -arca
40 */
41static inline int waking_non_zero_interruptible(struct semaphore *sem,
42 struct task_struct *tsk)
43{
44 unsigned long flags;
45 int ret = 0;
46
47 spin_lock_irqsave(&semaphore_wake_lock, flags);
48 if (sem->waking > 0) {
49 sem->waking--;
50 ret = 1;
51 } else if (signal_pending(tsk)) {
52 atomic_inc(&sem->count);
53 ret = -EINTR;
54 }
55 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
56 return ret;
57}
58
59/*
60 * waking_non_zero_try_lock:
61 * 1 failed to lock
62 * 0 got the lock
63 *
64 * We must undo the sem->count down_interruptible() increment while we are
65 * protected by the spinlock in order to make this atomic_inc() with the
66 * atomic_read() in wake_one_more(), otherwise we can race. -arca
67 */
68static inline int waking_non_zero_trylock(struct semaphore *sem)
69{
70 unsigned long flags;
71 int ret = 1;
72
73 spin_lock_irqsave(&semaphore_wake_lock, flags);
74 if (sem->waking <= 0)
75 atomic_inc(&sem->count);
76 else {
77 sem->waking--;
78 ret = 0;
79 }
80 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
81 return ret;
82}
83
84#endif
diff --git a/include/asm-arm/semaphore.h b/include/asm-arm/semaphore.h
index 1c8b441f89e3..d9b2034ed1d2 100644
--- a/include/asm-arm/semaphore.h
+++ b/include/asm-arm/semaphore.h
@@ -1,98 +1 @@
1/* #include <linux/semaphore.h>
2 * linux/include/asm-arm/semaphore.h
3 */
4#ifndef __ASM_ARM_SEMAPHORE_H
5#define __ASM_ARM_SEMAPHORE_H
6
7#include <linux/linkage.h>
8#include <linux/spinlock.h>
9#include <linux/wait.h>
10#include <linux/rwsem.h>
11
12#include <asm/atomic.h>
13#include <asm/locks.h>
14
15struct semaphore {
16 atomic_t count;
17 int sleepers;
18 wait_queue_head_t wait;
19};
20
21#define __SEMAPHORE_INIT(name, cnt) \
22{ \
23 .count = ATOMIC_INIT(cnt), \
24 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
25}
26
27#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
28 struct semaphore name = __SEMAPHORE_INIT(name,count)
29
30#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
31
32static inline void sema_init(struct semaphore *sem, int val)
33{
34 atomic_set(&sem->count, val);
35 sem->sleepers = 0;
36 init_waitqueue_head(&sem->wait);
37}
38
39static inline void init_MUTEX(struct semaphore *sem)
40{
41 sema_init(sem, 1);
42}
43
44static inline void init_MUTEX_LOCKED(struct semaphore *sem)
45{
46 sema_init(sem, 0);
47}
48
49/*
50 * special register calling convention
51 */
52asmlinkage void __down_failed(void);
53asmlinkage int __down_interruptible_failed(void);
54asmlinkage int __down_trylock_failed(void);
55asmlinkage void __up_wakeup(void);
56
57extern void __down(struct semaphore * sem);
58extern int __down_interruptible(struct semaphore * sem);
59extern int __down_trylock(struct semaphore * sem);
60extern void __up(struct semaphore * sem);
61
62/*
63 * This is ugly, but we want the default case to fall through.
64 * "__down" is the actual routine that waits...
65 */
66static inline void down(struct semaphore * sem)
67{
68 might_sleep();
69 __down_op(sem, __down_failed);
70}
71
72/*
73 * This is ugly, but we want the default case to fall through.
74 * "__down_interruptible" is the actual routine that waits...
75 */
76static inline int down_interruptible (struct semaphore * sem)
77{
78 might_sleep();
79 return __down_op_ret(sem, __down_interruptible_failed);
80}
81
82static inline int down_trylock(struct semaphore *sem)
83{
84 return __down_op_ret(sem, __down_trylock_failed);
85}
86
87/*
88 * Note! This is subtle. We jump to wake people up only if
89 * the semaphore was negative (== somebody was waiting on it).
90 * The default case (no contention) will result in NO
91 * jumps for both down() and up().
92 */
93static inline void up(struct semaphore * sem)
94{
95 __up_op(sem, __up_wakeup);
96}
97
98#endif
diff --git a/include/asm-avr32/semaphore.h b/include/asm-avr32/semaphore.h
index feaf1d453386..d9b2034ed1d2 100644
--- a/include/asm-avr32/semaphore.h
+++ b/include/asm-avr32/semaphore.h
@@ -1,108 +1 @@
1/* #include <linux/semaphore.h>
2 * SMP- and interrupt-safe semaphores.
3 *
4 * Copyright (C) 2006 Atmel Corporation
5 *
6 * Based on include/asm-i386/semaphore.h
7 * Copyright (C) 1996 Linus Torvalds
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13#ifndef __ASM_AVR32_SEMAPHORE_H
14#define __ASM_AVR32_SEMAPHORE_H
15
16#include <linux/linkage.h>
17
18#include <asm/system.h>
19#include <asm/atomic.h>
20#include <linux/wait.h>
21#include <linux/rwsem.h>
22
23struct semaphore {
24 atomic_t count;
25 int sleepers;
26 wait_queue_head_t wait;
27};
28
29#define __SEMAPHORE_INITIALIZER(name, n) \
30{ \
31 .count = ATOMIC_INIT(n), \
32 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
33}
34
35#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
36 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
37
38#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
39
40static inline void sema_init (struct semaphore *sem, int val)
41{
42 atomic_set(&sem->count, val);
43 sem->sleepers = 0;
44 init_waitqueue_head(&sem->wait);
45}
46
47static inline void init_MUTEX (struct semaphore *sem)
48{
49 sema_init(sem, 1);
50}
51
52static inline void init_MUTEX_LOCKED (struct semaphore *sem)
53{
54 sema_init(sem, 0);
55}
56
57void __down(struct semaphore * sem);
58int __down_interruptible(struct semaphore * sem);
59void __up(struct semaphore * sem);
60
61/*
62 * This is ugly, but we want the default case to fall through.
63 * "__down_failed" is a special asm handler that calls the C
64 * routine that actually waits. See arch/i386/kernel/semaphore.c
65 */
66static inline void down(struct semaphore * sem)
67{
68 might_sleep();
69 if (unlikely(atomic_dec_return (&sem->count) < 0))
70 __down (sem);
71}
72
73/*
74 * Interruptible try to acquire a semaphore. If we obtained
75 * it, return zero. If we were interrupted, returns -EINTR
76 */
77static inline int down_interruptible(struct semaphore * sem)
78{
79 int ret = 0;
80
81 might_sleep();
82 if (unlikely(atomic_dec_return (&sem->count) < 0))
83 ret = __down_interruptible (sem);
84 return ret;
85}
86
87/*
88 * Non-blockingly attempt to down() a semaphore.
89 * Returns zero if we acquired it
90 */
91static inline int down_trylock(struct semaphore * sem)
92{
93 return atomic_dec_if_positive(&sem->count) < 0;
94}
95
96/*
97 * Note! This is subtle. We jump to wake people up only if
98 * the semaphore was negative (== somebody was waiting on it).
99 * The default case (no contention) will result in NO
100 * jumps for both down() and up().
101 */
102static inline void up(struct semaphore * sem)
103{
104 if (unlikely(atomic_inc_return (&sem->count) <= 0))
105 __up (sem);
106}
107
108#endif /*__ASM_AVR32_SEMAPHORE_H */
diff --git a/include/asm-blackfin/semaphore-helper.h b/include/asm-blackfin/semaphore-helper.h
deleted file mode 100644
index 9082b0dc3eb5..000000000000
--- a/include/asm-blackfin/semaphore-helper.h
+++ /dev/null
@@ -1,82 +0,0 @@
1/* Based on M68K version, Lineo Inc. May 2001 */
2
3#ifndef _BFIN_SEMAPHORE_HELPER_H
4#define _BFIN_SEMAPHORE_HELPER_H
5
6/*
7 * SMP- and interrupt-safe semaphores helper functions.
8 *
9 * (C) Copyright 1996 Linus Torvalds
10 *
11 */
12
13#include <asm/errno.h>
14
15/*
16 * These two _must_ execute atomically wrt each other.
17 */
18static inline void wake_one_more(struct semaphore *sem)
19{
20 atomic_inc(&sem->waking);
21}
22
23static inline int waking_non_zero(struct semaphore *sem)
24{
25 int ret;
26 unsigned long flags = 0;
27
28 spin_lock_irqsave(&semaphore_wake_lock, flags);
29 ret = 0;
30 if (atomic_read(&sem->waking) > 0) {
31 atomic_dec(&sem->waking);
32 ret = 1;
33 }
34 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
35 return ret;
36}
37
38/*
39 * waking_non_zero_interruptible:
40 * 1 got the lock
41 * 0 go to sleep
42 * -EINTR interrupted
43 */
44static inline int waking_non_zero_interruptible(struct semaphore *sem,
45 struct task_struct *tsk)
46{
47 int ret = 0;
48 unsigned long flags = 0;
49
50 spin_lock_irqsave(&semaphore_wake_lock, flags);
51 if (atomic_read(&sem->waking) > 0) {
52 atomic_dec(&sem->waking);
53 ret = 1;
54 } else if (signal_pending(tsk)) {
55 atomic_inc(&sem->count);
56 ret = -EINTR;
57 }
58 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
59 return ret;
60}
61
62/*
63 * waking_non_zero_trylock:
64 * 1 failed to lock
65 * 0 got the lock
66 */
67static inline int waking_non_zero_trylock(struct semaphore *sem)
68{
69 int ret = 1;
70 unsigned long flags = 0;
71
72 spin_lock_irqsave(&semaphore_wake_lock, flags);
73 if (atomic_read(&sem->waking) > 0) {
74 atomic_dec(&sem->waking);
75 ret = 0;
76 } else
77 atomic_inc(&sem->count);
78 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
79 return ret;
80}
81
82#endif /* _BFIN_SEMAPHORE_HELPER_H */
diff --git a/include/asm-blackfin/semaphore.h b/include/asm-blackfin/semaphore.h
index 533f90fb2e4e..d9b2034ed1d2 100644
--- a/include/asm-blackfin/semaphore.h
+++ b/include/asm-blackfin/semaphore.h
@@ -1,105 +1 @@
1#ifndef _BFIN_SEMAPHORE_H #include <linux/semaphore.h>
2#define _BFIN_SEMAPHORE_H
3
4#ifndef __ASSEMBLY__
5
6#include <linux/linkage.h>
7#include <linux/wait.h>
8#include <linux/spinlock.h>
9#include <linux/rwsem.h>
10#include <asm/atomic.h>
11
12/*
13 * Interrupt-safe semaphores..
14 *
15 * (C) Copyright 1996 Linus Torvalds
16 *
17 * BFIN version by akbar hussain Lineo Inc April 2001
18 *
19 */
20
21struct semaphore {
22 atomic_t count;
23 int sleepers;
24 wait_queue_head_t wait;
25};
26
27#define __SEMAPHORE_INITIALIZER(name, n) \
28{ \
29 .count = ATOMIC_INIT(n), \
30 .sleepers = 0, \
31 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
32}
33
34#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
35 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
36
37#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
38
39static inline void sema_init(struct semaphore *sem, int val)
40{
41 *sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val);
42}
43
44static inline void init_MUTEX(struct semaphore *sem)
45{
46 sema_init(sem, 1);
47}
48
49static inline void init_MUTEX_LOCKED(struct semaphore *sem)
50{
51 sema_init(sem, 0);
52}
53
54asmlinkage void __down(struct semaphore *sem);
55asmlinkage int __down_interruptible(struct semaphore *sem);
56asmlinkage int __down_trylock(struct semaphore *sem);
57asmlinkage void __up(struct semaphore *sem);
58
59extern spinlock_t semaphore_wake_lock;
60
61/*
62 * This is ugly, but we want the default case to fall through.
63 * "down_failed" is a special asm handler that calls the C
64 * routine that actually waits.
65 */
66static inline void down(struct semaphore *sem)
67{
68 might_sleep();
69 if (atomic_dec_return(&sem->count) < 0)
70 __down(sem);
71}
72
73static inline int down_interruptible(struct semaphore *sem)
74{
75 int ret = 0;
76
77 might_sleep();
78 if (atomic_dec_return(&sem->count) < 0)
79 ret = __down_interruptible(sem);
80 return (ret);
81}
82
83static inline int down_trylock(struct semaphore *sem)
84{
85 int ret = 0;
86
87 if (atomic_dec_return(&sem->count) < 0)
88 ret = __down_trylock(sem);
89 return ret;
90}
91
92/*
93 * Note! This is subtle. We jump to wake people up only if
94 * the semaphore was negative (== somebody was waiting on it).
95 * The default case (no contention) will result in NO
96 * jumps for both down() and up().
97 */
98static inline void up(struct semaphore *sem)
99{
100 if (atomic_inc_return(&sem->count) <= 0)
101 __up(sem);
102}
103
104#endif /* __ASSEMBLY__ */
105#endif /* _BFIN_SEMAPHORE_H */
diff --git a/include/asm-cris/semaphore-helper.h b/include/asm-cris/semaphore-helper.h
deleted file mode 100644
index 27bfeca1b981..000000000000
--- a/include/asm-cris/semaphore-helper.h
+++ /dev/null
@@ -1,78 +0,0 @@
1/* $Id: semaphore-helper.h,v 1.3 2001/03/26 15:00:33 orjanf Exp $
2 *
3 * SMP- and interrupt-safe semaphores helper functions. Generic versions, no
4 * optimizations whatsoever...
5 *
6 */
7
8#ifndef _ASM_SEMAPHORE_HELPER_H
9#define _ASM_SEMAPHORE_HELPER_H
10
11#include <asm/atomic.h>
12#include <linux/errno.h>
13
14#define read(a) ((a)->counter)
15#define inc(a) (((a)->counter)++)
16#define dec(a) (((a)->counter)--)
17
18#define count_inc(a) ((*(a))++)
19
20/*
21 * These two _must_ execute atomically wrt each other.
22 */
23static inline void wake_one_more(struct semaphore * sem)
24{
25 atomic_inc(&sem->waking);
26}
27
28static inline int waking_non_zero(struct semaphore *sem)
29{
30 unsigned long flags;
31 int ret = 0;
32
33 local_irq_save(flags);
34 if (read(&sem->waking) > 0) {
35 dec(&sem->waking);
36 ret = 1;
37 }
38 local_irq_restore(flags);
39 return ret;
40}
41
42static inline int waking_non_zero_interruptible(struct semaphore *sem,
43 struct task_struct *tsk)
44{
45 int ret = 0;
46 unsigned long flags;
47
48 local_irq_save(flags);
49 if (read(&sem->waking) > 0) {
50 dec(&sem->waking);
51 ret = 1;
52 } else if (signal_pending(tsk)) {
53 inc(&sem->count);
54 ret = -EINTR;
55 }
56 local_irq_restore(flags);
57 return ret;
58}
59
60static inline int waking_non_zero_trylock(struct semaphore *sem)
61{
62 int ret = 1;
63 unsigned long flags;
64
65 local_irq_save(flags);
66 if (read(&sem->waking) <= 0)
67 inc(&sem->count);
68 else {
69 dec(&sem->waking);
70 ret = 0;
71 }
72 local_irq_restore(flags);
73 return ret;
74}
75
76#endif /* _ASM_SEMAPHORE_HELPER_H */
77
78
diff --git a/include/asm-cris/semaphore.h b/include/asm-cris/semaphore.h
index 31a4ac448195..d9b2034ed1d2 100644
--- a/include/asm-cris/semaphore.h
+++ b/include/asm-cris/semaphore.h
@@ -1,133 +1 @@
1/* $Id: semaphore.h,v 1.3 2001/05/08 13:54:09 bjornw Exp $ */ #include <linux/semaphore.h>
2
3/* On the i386 these are coded in asm, perhaps we should as well. Later.. */
4
5#ifndef _CRIS_SEMAPHORE_H
6#define _CRIS_SEMAPHORE_H
7
8#define RW_LOCK_BIAS 0x01000000
9
10#include <linux/wait.h>
11#include <linux/spinlock.h>
12#include <linux/rwsem.h>
13
14#include <asm/system.h>
15#include <asm/atomic.h>
16
17/*
18 * CRIS semaphores, implemented in C-only so far.
19 */
20
21struct semaphore {
22 atomic_t count;
23 atomic_t waking;
24 wait_queue_head_t wait;
25};
26
27#define __SEMAPHORE_INITIALIZER(name, n) \
28{ \
29 .count = ATOMIC_INIT(n), \
30 .waking = ATOMIC_INIT(0), \
31 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
32}
33
34#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
35 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
36
37#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
38
39static inline void sema_init(struct semaphore *sem, int val)
40{
41 *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
42}
43
44static inline void init_MUTEX (struct semaphore *sem)
45{
46 sema_init(sem, 1);
47}
48
49static inline void init_MUTEX_LOCKED (struct semaphore *sem)
50{
51 sema_init(sem, 0);
52}
53
54extern void __down(struct semaphore * sem);
55extern int __down_interruptible(struct semaphore * sem);
56extern int __down_trylock(struct semaphore * sem);
57extern void __up(struct semaphore * sem);
58
59/* notice - we probably can do cli/sti here instead of saving */
60
61static inline void down(struct semaphore * sem)
62{
63 unsigned long flags;
64 int failed;
65
66 might_sleep();
67
68 /* atomically decrement the semaphores count, and if its negative, we wait */
69 cris_atomic_save(sem, flags);
70 failed = --(sem->count.counter) < 0;
71 cris_atomic_restore(sem, flags);
72 if(failed) {
73 __down(sem);
74 }
75}
76
77/*
78 * This version waits in interruptible state so that the waiting
79 * process can be killed. The down_interruptible routine
80 * returns negative for signalled and zero for semaphore acquired.
81 */
82
83static inline int down_interruptible(struct semaphore * sem)
84{
85 unsigned long flags;
86 int failed;
87
88 might_sleep();
89
90 /* atomically decrement the semaphores count, and if its negative, we wait */
91 cris_atomic_save(sem, flags);
92 failed = --(sem->count.counter) < 0;
93 cris_atomic_restore(sem, flags);
94 if(failed)
95 failed = __down_interruptible(sem);
96 return(failed);
97}
98
99static inline int down_trylock(struct semaphore * sem)
100{
101 unsigned long flags;
102 int failed;
103
104 cris_atomic_save(sem, flags);
105 failed = --(sem->count.counter) < 0;
106 cris_atomic_restore(sem, flags);
107 if(failed)
108 failed = __down_trylock(sem);
109 return(failed);
110
111}
112
113/*
114 * Note! This is subtle. We jump to wake people up only if
115 * the semaphore was negative (== somebody was waiting on it).
116 * The default case (no contention) will result in NO
117 * jumps for both down() and up().
118 */
119static inline void up(struct semaphore * sem)
120{
121 unsigned long flags;
122 int wakeup;
123
124 /* atomically increment the semaphores count, and if it was negative, we wake people */
125 cris_atomic_save(sem, flags);
126 wakeup = ++(sem->count.counter) <= 0;
127 cris_atomic_restore(sem, flags);
128 if(wakeup) {
129 __up(sem);
130 }
131}
132
133#endif
diff --git a/include/asm-frv/semaphore.h b/include/asm-frv/semaphore.h
index d7aaa1911a1a..d9b2034ed1d2 100644
--- a/include/asm-frv/semaphore.h
+++ b/include/asm-frv/semaphore.h
@@ -1,155 +1 @@
1/* semaphore.h: semaphores for the FR-V #include <linux/semaphore.h>
2 *
3 * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#ifndef _ASM_SEMAPHORE_H
12#define _ASM_SEMAPHORE_H
13
14#define RW_LOCK_BIAS 0x01000000
15
16#ifndef __ASSEMBLY__
17
18#include <linux/linkage.h>
19#include <linux/wait.h>
20#include <linux/spinlock.h>
21#include <linux/rwsem.h>
22
23/*
24 * the semaphore definition
25 * - if counter is >0 then there are tokens available on the semaphore for down to collect
26 * - if counter is <=0 then there are no spare tokens, and anyone that wants one must wait
27 * - if wait_list is not empty, then there are processes waiting for the semaphore
28 */
29struct semaphore {
30 unsigned counter;
31 spinlock_t wait_lock;
32 struct list_head wait_list;
33#ifdef CONFIG_DEBUG_SEMAPHORE
34 unsigned __magic;
35#endif
36};
37
38#ifdef CONFIG_DEBUG_SEMAPHORE
39# define __SEM_DEBUG_INIT(name) , (long)&(name).__magic
40#else
41# define __SEM_DEBUG_INIT(name)
42#endif
43
44
45#define __SEMAPHORE_INITIALIZER(name,count) \
46{ count, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __SEM_DEBUG_INIT(name) }
47
48#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
49 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
50
51#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
52
53static inline void sema_init (struct semaphore *sem, int val)
54{
55 *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);
56}
57
58static inline void init_MUTEX (struct semaphore *sem)
59{
60 sema_init(sem, 1);
61}
62
63static inline void init_MUTEX_LOCKED (struct semaphore *sem)
64{
65 sema_init(sem, 0);
66}
67
68extern void __down(struct semaphore *sem, unsigned long flags);
69extern int __down_interruptible(struct semaphore *sem, unsigned long flags);
70extern void __up(struct semaphore *sem);
71
72static inline void down(struct semaphore *sem)
73{
74 unsigned long flags;
75
76#ifdef CONFIG_DEBUG_SEMAPHORE
77 CHECK_MAGIC(sem->__magic);
78#endif
79
80 spin_lock_irqsave(&sem->wait_lock, flags);
81 if (likely(sem->counter > 0)) {
82 sem->counter--;
83 spin_unlock_irqrestore(&sem->wait_lock, flags);
84 }
85 else {
86 __down(sem, flags);
87 }
88}
89
90static inline int down_interruptible(struct semaphore *sem)
91{
92 unsigned long flags;
93 int ret = 0;
94
95#ifdef CONFIG_DEBUG_SEMAPHORE
96 CHECK_MAGIC(sem->__magic);
97#endif
98
99 spin_lock_irqsave(&sem->wait_lock, flags);
100 if (likely(sem->counter > 0)) {
101 sem->counter--;
102 spin_unlock_irqrestore(&sem->wait_lock, flags);
103 }
104 else {
105 ret = __down_interruptible(sem, flags);
106 }
107 return ret;
108}
109
110/*
111 * non-blockingly attempt to down() a semaphore.
112 * - returns zero if we acquired it
113 */
114static inline int down_trylock(struct semaphore *sem)
115{
116 unsigned long flags;
117 int success = 0;
118
119#ifdef CONFIG_DEBUG_SEMAPHORE
120 CHECK_MAGIC(sem->__magic);
121#endif
122
123 spin_lock_irqsave(&sem->wait_lock, flags);
124 if (sem->counter > 0) {
125 sem->counter--;
126 success = 1;
127 }
128 spin_unlock_irqrestore(&sem->wait_lock, flags);
129 return !success;
130}
131
132static inline void up(struct semaphore *sem)
133{
134 unsigned long flags;
135
136#ifdef CONFIG_DEBUG_SEMAPHORE
137 CHECK_MAGIC(sem->__magic);
138#endif
139
140 spin_lock_irqsave(&sem->wait_lock, flags);
141 if (!list_empty(&sem->wait_list))
142 __up(sem);
143 else
144 sem->counter++;
145 spin_unlock_irqrestore(&sem->wait_lock, flags);
146}
147
148static inline int sem_getcount(struct semaphore *sem)
149{
150 return sem->counter;
151}
152
153#endif /* __ASSEMBLY__ */
154
155#endif
diff --git a/include/asm-h8300/semaphore-helper.h b/include/asm-h8300/semaphore-helper.h
deleted file mode 100644
index 4fea36be5fd8..000000000000
--- a/include/asm-h8300/semaphore-helper.h
+++ /dev/null
@@ -1,85 +0,0 @@
1#ifndef _H8300_SEMAPHORE_HELPER_H
2#define _H8300_SEMAPHORE_HELPER_H
3
4/*
5 * SMP- and interrupt-safe semaphores helper functions.
6 *
7 * (C) Copyright 1996 Linus Torvalds
8 *
9 * based on
10 * m68k version by Andreas Schwab
11 */
12
13#include <linux/errno.h>
14
15/*
16 * These two _must_ execute atomically wrt each other.
17 */
18static inline void wake_one_more(struct semaphore * sem)
19{
20 atomic_inc((atomic_t *)&sem->sleepers);
21}
22
23static inline int waking_non_zero(struct semaphore *sem)
24{
25 int ret;
26 unsigned long flags;
27
28 spin_lock_irqsave(&semaphore_wake_lock, flags);
29 ret = 0;
30 if (sem->sleepers > 0) {
31 sem->sleepers--;
32 ret = 1;
33 }
34 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
35 return ret;
36}
37
38/*
39 * waking_non_zero_interruptible:
40 * 1 got the lock
41 * 0 go to sleep
42 * -EINTR interrupted
43 */
44static inline int waking_non_zero_interruptible(struct semaphore *sem,
45 struct task_struct *tsk)
46{
47 int ret;
48 unsigned long flags;
49
50 spin_lock_irqsave(&semaphore_wake_lock, flags);
51 ret = 0;
52 if (sem->sleepers > 0) {
53 sem->sleepers--;
54 ret = 1;
55 } else if (signal_pending(tsk)) {
56 atomic_inc(&sem->count);
57 ret = -EINTR;
58 }
59 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
60 return ret;
61}
62
63/*
64 * waking_non_zero_trylock:
65 * 1 failed to lock
66 * 0 got the lock
67 */
68static inline int waking_non_zero_trylock(struct semaphore *sem)
69{
70 int ret;
71 unsigned long flags;
72
73 spin_lock_irqsave(&semaphore_wake_lock, flags);
74 ret = 1;
75 if (sem->sleepers <= 0)
76 atomic_inc(&sem->count);
77 else {
78 sem->sleepers--;
79 ret = 0;
80 }
81 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
82 return ret;
83}
84
85#endif
diff --git a/include/asm-h8300/semaphore.h b/include/asm-h8300/semaphore.h
index f3ffff83ff09..d9b2034ed1d2 100644
--- a/include/asm-h8300/semaphore.h
+++ b/include/asm-h8300/semaphore.h
@@ -1,190 +1 @@
1#ifndef _H8300_SEMAPHORE_H #include <linux/semaphore.h>
2#define _H8300_SEMAPHORE_H
3
4#define RW_LOCK_BIAS 0x01000000
5
6#ifndef __ASSEMBLY__
7
8#include <linux/linkage.h>
9#include <linux/wait.h>
10#include <linux/spinlock.h>
11#include <linux/rwsem.h>
12
13#include <asm/system.h>
14#include <asm/atomic.h>
15
16/*
17 * Interrupt-safe semaphores..
18 *
19 * (C) Copyright 1996 Linus Torvalds
20 *
21 * H8/300 version by Yoshinori Sato
22 */
23
24
25struct semaphore {
26 atomic_t count;
27 int sleepers;
28 wait_queue_head_t wait;
29};
30
31#define __SEMAPHORE_INITIALIZER(name, n) \
32{ \
33 .count = ATOMIC_INIT(n), \
34 .sleepers = 0, \
35 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
36}
37
38#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
39 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
40
41#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
42
43static inline void sema_init (struct semaphore *sem, int val)
44{
45 *sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val);
46}
47
48static inline void init_MUTEX (struct semaphore *sem)
49{
50 sema_init(sem, 1);
51}
52
53static inline void init_MUTEX_LOCKED (struct semaphore *sem)
54{
55 sema_init(sem, 0);
56}
57
58asmlinkage void __down_failed(void /* special register calling convention */);
59asmlinkage int __down_failed_interruptible(void /* params in registers */);
60asmlinkage int __down_failed_trylock(void /* params in registers */);
61asmlinkage void __up_wakeup(void /* special register calling convention */);
62
63asmlinkage void __down(struct semaphore * sem);
64asmlinkage int __down_interruptible(struct semaphore * sem);
65asmlinkage int __down_trylock(struct semaphore * sem);
66asmlinkage void __up(struct semaphore * sem);
67
68extern spinlock_t semaphore_wake_lock;
69
70/*
71 * This is ugly, but we want the default case to fall through.
72 * "down_failed" is a special asm handler that calls the C
73 * routine that actually waits. See arch/m68k/lib/semaphore.S
74 */
75static inline void down(struct semaphore * sem)
76{
77 register atomic_t *count asm("er0");
78
79 might_sleep();
80
81 count = &(sem->count);
82 __asm__ __volatile__(
83 "stc ccr,r3l\n\t"
84 "orc #0x80,ccr\n\t"
85 "mov.l %2, er1\n\t"
86 "dec.l #1,er1\n\t"
87 "mov.l er1,%0\n\t"
88 "bpl 1f\n\t"
89 "ldc r3l,ccr\n\t"
90 "mov.l %1,er0\n\t"
91 "jsr @___down\n\t"
92 "bra 2f\n"
93 "1:\n\t"
94 "ldc r3l,ccr\n"
95 "2:"
96 : "=m"(*count)
97 : "g"(sem),"m"(*count)
98 : "cc", "er1", "er2", "er3");
99}
100
101static inline int down_interruptible(struct semaphore * sem)
102{
103 register atomic_t *count asm("er0");
104
105 might_sleep();
106
107 count = &(sem->count);
108 __asm__ __volatile__(
109 "stc ccr,r1l\n\t"
110 "orc #0x80,ccr\n\t"
111 "mov.l %3, er2\n\t"
112 "dec.l #1,er2\n\t"
113 "mov.l er2,%1\n\t"
114 "bpl 1f\n\t"
115 "ldc r1l,ccr\n\t"
116 "mov.l %2,er0\n\t"
117 "jsr @___down_interruptible\n\t"
118 "bra 2f\n"
119 "1:\n\t"
120 "ldc r1l,ccr\n\t"
121 "sub.l %0,%0\n\t"
122 "2:\n\t"
123 : "=r" (count),"=m" (*count)
124 : "g"(sem),"m"(*count)
125 : "cc", "er1", "er2", "er3");
126 return (int)count;
127}
128
129static inline int down_trylock(struct semaphore * sem)
130{
131 register atomic_t *count asm("er0");
132
133 count = &(sem->count);
134 __asm__ __volatile__(
135 "stc ccr,r3l\n\t"
136 "orc #0x80,ccr\n\t"
137 "mov.l %3,er2\n\t"
138 "dec.l #1,er2\n\t"
139 "mov.l er2,%0\n\t"
140 "bpl 1f\n\t"
141 "ldc r3l,ccr\n\t"
142 "jmp @3f\n\t"
143 LOCK_SECTION_START(".align 2\n\t")
144 "3:\n\t"
145 "mov.l %2,er0\n\t"
146 "jsr @___down_trylock\n\t"
147 "jmp @2f\n\t"
148 LOCK_SECTION_END
149 "1:\n\t"
150 "ldc r3l,ccr\n\t"
151 "sub.l %1,%1\n"
152 "2:"
153 : "=m" (*count),"=r"(count)
154 : "g"(sem),"m"(*count)
155 : "cc", "er1","er2", "er3");
156 return (int)count;
157}
158
159/*
160 * Note! This is subtle. We jump to wake people up only if
161 * the semaphore was negative (== somebody was waiting on it).
162 * The default case (no contention) will result in NO
163 * jumps for both down() and up().
164 */
165static inline void up(struct semaphore * sem)
166{
167 register atomic_t *count asm("er0");
168
169 count = &(sem->count);
170 __asm__ __volatile__(
171 "stc ccr,r3l\n\t"
172 "orc #0x80,ccr\n\t"
173 "mov.l %2,er1\n\t"
174 "inc.l #1,er1\n\t"
175 "mov.l er1,%0\n\t"
176 "ldc r3l,ccr\n\t"
177 "sub.l er2,er2\n\t"
178 "cmp.l er2,er1\n\t"
179 "bgt 1f\n\t"
180 "mov.l %1,er0\n\t"
181 "jsr @___up\n"
182 "1:"
183 : "=m"(*count)
184 : "g"(sem),"m"(*count)
185 : "cc", "er1", "er2", "er3");
186}
187
188#endif /* __ASSEMBLY__ */
189
190#endif
diff --git a/include/asm-ia64/semaphore.h b/include/asm-ia64/semaphore.h
index d8393d11288d..d9b2034ed1d2 100644
--- a/include/asm-ia64/semaphore.h
+++ b/include/asm-ia64/semaphore.h
@@ -1,99 +1 @@
1#ifndef _ASM_IA64_SEMAPHORE_H #include <linux/semaphore.h>
2#define _ASM_IA64_SEMAPHORE_H
3
4/*
5 * Copyright (C) 1998-2000 Hewlett-Packard Co
6 * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
7 */
8
9#include <linux/wait.h>
10#include <linux/rwsem.h>
11
12#include <asm/atomic.h>
13
14struct semaphore {
15 atomic_t count;
16 int sleepers;
17 wait_queue_head_t wait;
18};
19
20#define __SEMAPHORE_INITIALIZER(name, n) \
21{ \
22 .count = ATOMIC_INIT(n), \
23 .sleepers = 0, \
24 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
25}
26
27#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
28 struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)
29
30#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
31
32static inline void
33sema_init (struct semaphore *sem, int val)
34{
35 *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);
36}
37
38static inline void
39init_MUTEX (struct semaphore *sem)
40{
41 sema_init(sem, 1);
42}
43
44static inline void
45init_MUTEX_LOCKED (struct semaphore *sem)
46{
47 sema_init(sem, 0);
48}
49
50extern void __down (struct semaphore * sem);
51extern int __down_interruptible (struct semaphore * sem);
52extern int __down_trylock (struct semaphore * sem);
53extern void __up (struct semaphore * sem);
54
55/*
56 * Atomically decrement the semaphore's count. If it goes negative,
57 * block the calling thread in the TASK_UNINTERRUPTIBLE state.
58 */
59static inline void
60down (struct semaphore *sem)
61{
62 might_sleep();
63 if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1)
64 __down(sem);
65}
66
67/*
68 * Atomically decrement the semaphore's count. If it goes negative,
69 * block the calling thread in the TASK_INTERRUPTIBLE state.
70 */
71static inline int
72down_interruptible (struct semaphore * sem)
73{
74 int ret = 0;
75
76 might_sleep();
77 if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1)
78 ret = __down_interruptible(sem);
79 return ret;
80}
81
82static inline int
83down_trylock (struct semaphore *sem)
84{
85 int ret = 0;
86
87 if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1)
88 ret = __down_trylock(sem);
89 return ret;
90}
91
92static inline void
93up (struct semaphore * sem)
94{
95 if (ia64_fetchadd(1, &sem->count.counter, rel) <= -1)
96 __up(sem);
97}
98
99#endif /* _ASM_IA64_SEMAPHORE_H */
diff --git a/include/asm-m32r/semaphore.h b/include/asm-m32r/semaphore.h
index b5bf95a6f2b4..d9b2034ed1d2 100644
--- a/include/asm-m32r/semaphore.h
+++ b/include/asm-m32r/semaphore.h
@@ -1,144 +1 @@
1#ifndef _ASM_M32R_SEMAPHORE_H #include <linux/semaphore.h>
2#define _ASM_M32R_SEMAPHORE_H
3
4#include <linux/linkage.h>
5
6#ifdef __KERNEL__
7
8/*
9 * SMP- and interrupt-safe semaphores..
10 *
11 * Copyright (C) 1996 Linus Torvalds
12 * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org>
13 */
14
15#include <linux/wait.h>
16#include <linux/rwsem.h>
17#include <asm/assembler.h>
18#include <asm/system.h>
19#include <asm/atomic.h>
20
21struct semaphore {
22 atomic_t count;
23 int sleepers;
24 wait_queue_head_t wait;
25};
26
27#define __SEMAPHORE_INITIALIZER(name, n) \
28{ \
29 .count = ATOMIC_INIT(n), \
30 .sleepers = 0, \
31 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
32}
33
34#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
35 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
36
37#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
38
39static inline void sema_init (struct semaphore *sem, int val)
40{
41/*
42 * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
43 *
44 * i'd rather use the more flexible initialization above, but sadly
45 * GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well.
46 */
47 atomic_set(&sem->count, val);
48 sem->sleepers = 0;
49 init_waitqueue_head(&sem->wait);
50}
51
52static inline void init_MUTEX (struct semaphore *sem)
53{
54 sema_init(sem, 1);
55}
56
57static inline void init_MUTEX_LOCKED (struct semaphore *sem)
58{
59 sema_init(sem, 0);
60}
61
62asmlinkage void __down_failed(void /* special register calling convention */);
63asmlinkage int __down_failed_interruptible(void /* params in registers */);
64asmlinkage int __down_failed_trylock(void /* params in registers */);
65asmlinkage void __up_wakeup(void /* special register calling convention */);
66
67asmlinkage void __down(struct semaphore * sem);
68asmlinkage int __down_interruptible(struct semaphore * sem);
69asmlinkage int __down_trylock(struct semaphore * sem);
70asmlinkage void __up(struct semaphore * sem);
71
72/*
73 * Atomically decrement the semaphore's count. If it goes negative,
74 * block the calling thread in the TASK_UNINTERRUPTIBLE state.
75 */
76static inline void down(struct semaphore * sem)
77{
78 might_sleep();
79 if (unlikely(atomic_dec_return(&sem->count) < 0))
80 __down(sem);
81}
82
83/*
84 * Interruptible try to acquire a semaphore. If we obtained
85 * it, return zero. If we were interrupted, returns -EINTR
86 */
87static inline int down_interruptible(struct semaphore * sem)
88{
89 int result = 0;
90
91 might_sleep();
92 if (unlikely(atomic_dec_return(&sem->count) < 0))
93 result = __down_interruptible(sem);
94
95 return result;
96}
97
98/*
99 * Non-blockingly attempt to down() a semaphore.
100 * Returns zero if we acquired it
101 */
102static inline int down_trylock(struct semaphore * sem)
103{
104 unsigned long flags;
105 long count;
106 int result = 0;
107
108 local_irq_save(flags);
109 __asm__ __volatile__ (
110 "# down_trylock \n\t"
111 DCACHE_CLEAR("%0", "r4", "%1")
112 M32R_LOCK" %0, @%1; \n\t"
113 "addi %0, #-1; \n\t"
114 M32R_UNLOCK" %0, @%1; \n\t"
115 : "=&r" (count)
116 : "r" (&sem->count)
117 : "memory"
118#ifdef CONFIG_CHIP_M32700_TS1
119 , "r4"
120#endif /* CONFIG_CHIP_M32700_TS1 */
121 );
122 local_irq_restore(flags);
123
124 if (unlikely(count < 0))
125 result = __down_trylock(sem);
126
127 return result;
128}
129
130/*
131 * Note! This is subtle. We jump to wake people up only if
132 * the semaphore was negative (== somebody was waiting on it).
133 * The default case (no contention) will result in NO
134 * jumps for both down() and up().
135 */
136static inline void up(struct semaphore * sem)
137{
138 if (unlikely(atomic_inc_return(&sem->count) <= 0))
139 __up(sem);
140}
141
142#endif /* __KERNEL__ */
143
144#endif /* _ASM_M32R_SEMAPHORE_H */
diff --git a/include/asm-m68k/semaphore-helper.h b/include/asm-m68k/semaphore-helper.h
deleted file mode 100644
index eef30ba0b499..000000000000
--- a/include/asm-m68k/semaphore-helper.h
+++ /dev/null
@@ -1,142 +0,0 @@
1#ifndef _M68K_SEMAPHORE_HELPER_H
2#define _M68K_SEMAPHORE_HELPER_H
3
4/*
5 * SMP- and interrupt-safe semaphores helper functions.
6 *
7 * (C) Copyright 1996 Linus Torvalds
8 *
9 * m68k version by Andreas Schwab
10 */
11
12#include <linux/errno.h>
13
14/*
15 * These two _must_ execute atomically wrt each other.
16 */
17static inline void wake_one_more(struct semaphore * sem)
18{
19 atomic_inc(&sem->waking);
20}
21
22#ifndef CONFIG_RMW_INSNS
23extern spinlock_t semaphore_wake_lock;
24#endif
25
26static inline int waking_non_zero(struct semaphore *sem)
27{
28 int ret;
29#ifndef CONFIG_RMW_INSNS
30 unsigned long flags;
31
32 spin_lock_irqsave(&semaphore_wake_lock, flags);
33 ret = 0;
34 if (atomic_read(&sem->waking) > 0) {
35 atomic_dec(&sem->waking);
36 ret = 1;
37 }
38 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
39#else
40 int tmp1, tmp2;
41
42 __asm__ __volatile__
43 ("1: movel %1,%2\n"
44 " jle 2f\n"
45 " subql #1,%2\n"
46 " casl %1,%2,%3\n"
47 " jne 1b\n"
48 " moveq #1,%0\n"
49 "2:"
50 : "=d" (ret), "=d" (tmp1), "=d" (tmp2)
51 : "m" (sem->waking), "0" (0), "1" (sem->waking));
52#endif
53
54 return ret;
55}
56
57/*
58 * waking_non_zero_interruptible:
59 * 1 got the lock
60 * 0 go to sleep
61 * -EINTR interrupted
62 */
63static inline int waking_non_zero_interruptible(struct semaphore *sem,
64 struct task_struct *tsk)
65{
66 int ret;
67#ifndef CONFIG_RMW_INSNS
68 unsigned long flags;
69
70 spin_lock_irqsave(&semaphore_wake_lock, flags);
71 ret = 0;
72 if (atomic_read(&sem->waking) > 0) {
73 atomic_dec(&sem->waking);
74 ret = 1;
75 } else if (signal_pending(tsk)) {
76 atomic_inc(&sem->count);
77 ret = -EINTR;
78 }
79 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
80#else
81 int tmp1, tmp2;
82
83 __asm__ __volatile__
84 ("1: movel %1,%2\n"
85 " jle 2f\n"
86 " subql #1,%2\n"
87 " casl %1,%2,%3\n"
88 " jne 1b\n"
89 " moveq #1,%0\n"
90 " jra %a4\n"
91 "2:"
92 : "=d" (ret), "=d" (tmp1), "=d" (tmp2)
93 : "m" (sem->waking), "i" (&&next), "0" (0), "1" (sem->waking));
94 if (signal_pending(tsk)) {
95 atomic_inc(&sem->count);
96 ret = -EINTR;
97 }
98next:
99#endif
100
101 return ret;
102}
103
104/*
105 * waking_non_zero_trylock:
106 * 1 failed to lock
107 * 0 got the lock
108 */
109static inline int waking_non_zero_trylock(struct semaphore *sem)
110{
111 int ret;
112#ifndef CONFIG_RMW_INSNS
113 unsigned long flags;
114
115 spin_lock_irqsave(&semaphore_wake_lock, flags);
116 ret = 1;
117 if (atomic_read(&sem->waking) > 0) {
118 atomic_dec(&sem->waking);
119 ret = 0;
120 } else
121 atomic_inc(&sem->count);
122 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
123#else
124 int tmp1, tmp2;
125
126 __asm__ __volatile__
127 ("1: movel %1,%2\n"
128 " jle 2f\n"
129 " subql #1,%2\n"
130 " casl %1,%2,%3\n"
131 " jne 1b\n"
132 " moveq #0,%0\n"
133 "2:"
134 : "=d" (ret), "=d" (tmp1), "=d" (tmp2)
135 : "m" (sem->waking), "0" (1), "1" (sem->waking));
136 if (ret)
137 atomic_inc(&sem->count);
138#endif
139 return ret;
140}
141
142#endif
diff --git a/include/asm-m68k/semaphore.h b/include/asm-m68k/semaphore.h
index 64d6b119bb0a..d9b2034ed1d2 100644
--- a/include/asm-m68k/semaphore.h
+++ b/include/asm-m68k/semaphore.h
@@ -1,163 +1 @@
1#ifndef _M68K_SEMAPHORE_H #include <linux/semaphore.h>
2#define _M68K_SEMAPHORE_H
3
4#define RW_LOCK_BIAS 0x01000000
5
6#ifndef __ASSEMBLY__
7
8#include <linux/linkage.h>
9#include <linux/wait.h>
10#include <linux/spinlock.h>
11#include <linux/rwsem.h>
12#include <linux/stringify.h>
13
14#include <asm/system.h>
15#include <asm/atomic.h>
16
17/*
18 * Interrupt-safe semaphores..
19 *
20 * (C) Copyright 1996 Linus Torvalds
21 *
22 * m68k version by Andreas Schwab
23 */
24
25
26struct semaphore {
27 atomic_t count;
28 atomic_t waking;
29 wait_queue_head_t wait;
30};
31
32#define __SEMAPHORE_INITIALIZER(name, n) \
33{ \
34 .count = ATOMIC_INIT(n), \
35 .waking = ATOMIC_INIT(0), \
36 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
37}
38
39#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
40 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
41
42#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
43
44static inline void sema_init(struct semaphore *sem, int val)
45{
46 *sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val);
47}
48
49static inline void init_MUTEX (struct semaphore *sem)
50{
51 sema_init(sem, 1);
52}
53
54static inline void init_MUTEX_LOCKED (struct semaphore *sem)
55{
56 sema_init(sem, 0);
57}
58
59asmlinkage void __down_failed(void /* special register calling convention */);
60asmlinkage int __down_failed_interruptible(void /* params in registers */);
61asmlinkage int __down_failed_trylock(void /* params in registers */);
62asmlinkage void __up_wakeup(void /* special register calling convention */);
63
64asmlinkage void __down(struct semaphore * sem);
65asmlinkage int __down_interruptible(struct semaphore * sem);
66asmlinkage int __down_trylock(struct semaphore * sem);
67asmlinkage void __up(struct semaphore * sem);
68
69/*
70 * This is ugly, but we want the default case to fall through.
71 * "down_failed" is a special asm handler that calls the C
72 * routine that actually waits. See arch/m68k/lib/semaphore.S
73 */
74static inline void down(struct semaphore *sem)
75{
76 register struct semaphore *sem1 __asm__ ("%a1") = sem;
77
78 might_sleep();
79 __asm__ __volatile__(
80 "| atomic down operation\n\t"
81 "subql #1,%0@\n\t"
82 "jmi 2f\n\t"
83 "1:\n"
84 LOCK_SECTION_START(".even\n\t")
85 "2:\tpea 1b\n\t"
86 "jbra __down_failed\n"
87 LOCK_SECTION_END
88 : /* no outputs */
89 : "a" (sem1)
90 : "memory");
91}
92
93static inline int down_interruptible(struct semaphore *sem)
94{
95 register struct semaphore *sem1 __asm__ ("%a1") = sem;
96 register int result __asm__ ("%d0");
97
98 might_sleep();
99 __asm__ __volatile__(
100 "| atomic interruptible down operation\n\t"
101 "subql #1,%1@\n\t"
102 "jmi 2f\n\t"
103 "clrl %0\n"
104 "1:\n"
105 LOCK_SECTION_START(".even\n\t")
106 "2:\tpea 1b\n\t"
107 "jbra __down_failed_interruptible\n"
108 LOCK_SECTION_END
109 : "=d" (result)
110 : "a" (sem1)
111 : "memory");
112 return result;
113}
114
115static inline int down_trylock(struct semaphore *sem)
116{
117 register struct semaphore *sem1 __asm__ ("%a1") = sem;
118 register int result __asm__ ("%d0");
119
120 __asm__ __volatile__(
121 "| atomic down trylock operation\n\t"
122 "subql #1,%1@\n\t"
123 "jmi 2f\n\t"
124 "clrl %0\n"
125 "1:\n"
126 LOCK_SECTION_START(".even\n\t")
127 "2:\tpea 1b\n\t"
128 "jbra __down_failed_trylock\n"
129 LOCK_SECTION_END
130 : "=d" (result)
131 : "a" (sem1)
132 : "memory");
133 return result;
134}
135
136/*
137 * Note! This is subtle. We jump to wake people up only if
138 * the semaphore was negative (== somebody was waiting on it).
139 * The default case (no contention) will result in NO
140 * jumps for both down() and up().
141 */
142static inline void up(struct semaphore *sem)
143{
144 register struct semaphore *sem1 __asm__ ("%a1") = sem;
145
146 __asm__ __volatile__(
147 "| atomic up operation\n\t"
148 "addql #1,%0@\n\t"
149 "jle 2f\n"
150 "1:\n"
151 LOCK_SECTION_START(".even\n\t")
152 "2:\t"
153 "pea 1b\n\t"
154 "jbra __up_wakeup\n"
155 LOCK_SECTION_END
156 : /* no outputs */
157 : "a" (sem1)
158 : "memory");
159}
160
161#endif /* __ASSEMBLY__ */
162
163#endif
diff --git a/include/asm-m68knommu/semaphore-helper.h b/include/asm-m68knommu/semaphore-helper.h
deleted file mode 100644
index 43da7bc483c7..000000000000
--- a/include/asm-m68knommu/semaphore-helper.h
+++ /dev/null
@@ -1,82 +0,0 @@
1#ifndef _M68K_SEMAPHORE_HELPER_H
2#define _M68K_SEMAPHORE_HELPER_H
3
4/*
5 * SMP- and interrupt-safe semaphores helper functions.
6 *
7 * (C) Copyright 1996 Linus Torvalds
8 *
9 * m68k version by Andreas Schwab
10 */
11
12
13/*
14 * These two _must_ execute atomically wrt each other.
15 */
16static inline void wake_one_more(struct semaphore * sem)
17{
18 atomic_inc(&sem->waking);
19}
20
21static inline int waking_non_zero(struct semaphore *sem)
22{
23 int ret;
24 unsigned long flags;
25
26 spin_lock_irqsave(&semaphore_wake_lock, flags);
27 ret = 0;
28 if (atomic_read(&sem->waking) > 0) {
29 atomic_dec(&sem->waking);
30 ret = 1;
31 }
32 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
33 return ret;
34}
35
36/*
37 * waking_non_zero_interruptible:
38 * 1 got the lock
39 * 0 go to sleep
40 * -EINTR interrupted
41 */
42static inline int waking_non_zero_interruptible(struct semaphore *sem,
43 struct task_struct *tsk)
44{
45 int ret;
46 unsigned long flags;
47
48 spin_lock_irqsave(&semaphore_wake_lock, flags);
49 ret = 0;
50 if (atomic_read(&sem->waking) > 0) {
51 atomic_dec(&sem->waking);
52 ret = 1;
53 } else if (signal_pending(tsk)) {
54 atomic_inc(&sem->count);
55 ret = -EINTR;
56 }
57 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
58 return ret;
59}
60
61/*
62 * waking_non_zero_trylock:
63 * 1 failed to lock
64 * 0 got the lock
65 */
66static inline int waking_non_zero_trylock(struct semaphore *sem)
67{
68 int ret;
69 unsigned long flags;
70
71 spin_lock_irqsave(&semaphore_wake_lock, flags);
72 ret = 1;
73 if (atomic_read(&sem->waking) > 0) {
74 atomic_dec(&sem->waking);
75 ret = 0;
76 } else
77 atomic_inc(&sem->count);
78 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
79 return ret;
80}
81
82#endif
diff --git a/include/asm-m68knommu/semaphore.h b/include/asm-m68knommu/semaphore.h
index 5779eb6c0689..d9b2034ed1d2 100644
--- a/include/asm-m68knommu/semaphore.h
+++ b/include/asm-m68knommu/semaphore.h
@@ -1,153 +1 @@
1#ifndef _M68K_SEMAPHORE_H #include <linux/semaphore.h>
2#define _M68K_SEMAPHORE_H
3
4#define RW_LOCK_BIAS 0x01000000
5
6#ifndef __ASSEMBLY__
7
8#include <linux/linkage.h>
9#include <linux/wait.h>
10#include <linux/spinlock.h>
11#include <linux/rwsem.h>
12
13#include <asm/system.h>
14#include <asm/atomic.h>
15
16/*
17 * Interrupt-safe semaphores..
18 *
19 * (C) Copyright 1996 Linus Torvalds
20 *
21 * m68k version by Andreas Schwab
22 */
23
24
25struct semaphore {
26 atomic_t count;
27 atomic_t waking;
28 wait_queue_head_t wait;
29};
30
31#define __SEMAPHORE_INITIALIZER(name, n) \
32{ \
33 .count = ATOMIC_INIT(n), \
34 .waking = ATOMIC_INIT(0), \
35 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
36}
37
38#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
39 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
40
41#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
42
43static inline void sema_init (struct semaphore *sem, int val)
44{
45 *sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val);
46}
47
48static inline void init_MUTEX (struct semaphore *sem)
49{
50 sema_init(sem, 1);
51}
52
53static inline void init_MUTEX_LOCKED (struct semaphore *sem)
54{
55 sema_init(sem, 0);
56}
57
58asmlinkage void __down_failed(void /* special register calling convention */);
59asmlinkage int __down_failed_interruptible(void /* params in registers */);
60asmlinkage int __down_failed_trylock(void /* params in registers */);
61asmlinkage void __up_wakeup(void /* special register calling convention */);
62
63asmlinkage void __down(struct semaphore * sem);
64asmlinkage int __down_interruptible(struct semaphore * sem);
65asmlinkage int __down_trylock(struct semaphore * sem);
66asmlinkage void __up(struct semaphore * sem);
67
68extern spinlock_t semaphore_wake_lock;
69
70/*
71 * This is ugly, but we want the default case to fall through.
72 * "down_failed" is a special asm handler that calls the C
73 * routine that actually waits. See arch/m68k/lib/semaphore.S
74 */
75static inline void down(struct semaphore * sem)
76{
77 might_sleep();
78 __asm__ __volatile__(
79 "| atomic down operation\n\t"
80 "movel %0, %%a1\n\t"
81 "lea %%pc@(1f), %%a0\n\t"
82 "subql #1, %%a1@\n\t"
83 "jmi __down_failed\n"
84 "1:"
85 : /* no outputs */
86 : "g" (sem)
87 : "cc", "%a0", "%a1", "memory");
88}
89
90static inline int down_interruptible(struct semaphore * sem)
91{
92 int ret;
93
94 might_sleep();
95 __asm__ __volatile__(
96 "| atomic down operation\n\t"
97 "movel %1, %%a1\n\t"
98 "lea %%pc@(1f), %%a0\n\t"
99 "subql #1, %%a1@\n\t"
100 "jmi __down_failed_interruptible\n\t"
101 "clrl %%d0\n"
102 "1: movel %%d0, %0\n"
103 : "=d" (ret)
104 : "g" (sem)
105 : "cc", "%d0", "%a0", "%a1", "memory");
106 return(ret);
107}
108
109static inline int down_trylock(struct semaphore * sem)
110{
111 register struct semaphore *sem1 __asm__ ("%a1") = sem;
112 register int result __asm__ ("%d0");
113
114 __asm__ __volatile__(
115 "| atomic down trylock operation\n\t"
116 "subql #1,%1@\n\t"
117 "jmi 2f\n\t"
118 "clrl %0\n"
119 "1:\n"
120 ".section .text.lock,\"ax\"\n"
121 ".even\n"
122 "2:\tpea 1b\n\t"
123 "jbra __down_failed_trylock\n"
124 ".previous"
125 : "=d" (result)
126 : "a" (sem1)
127 : "memory");
128 return result;
129}
130
131/*
132 * Note! This is subtle. We jump to wake people up only if
133 * the semaphore was negative (== somebody was waiting on it).
134 * The default case (no contention) will result in NO
135 * jumps for both down() and up().
136 */
137static inline void up(struct semaphore * sem)
138{
139 __asm__ __volatile__(
140 "| atomic up operation\n\t"
141 "movel %0, %%a1\n\t"
142 "lea %%pc@(1f), %%a0\n\t"
143 "addql #1, %%a1@\n\t"
144 "jle __up_wakeup\n"
145 "1:"
146 : /* no outputs */
147 : "g" (sem)
148 : "cc", "%a0", "%a1", "memory");
149}
150
151#endif /* __ASSEMBLY__ */
152
153#endif
diff --git a/include/asm-mips/semaphore.h b/include/asm-mips/semaphore.h
index fdf8042b784b..d9b2034ed1d2 100644
--- a/include/asm-mips/semaphore.h
+++ b/include/asm-mips/semaphore.h
@@ -1,108 +1 @@
1/* #include <linux/semaphore.h>
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1996 Linus Torvalds
7 * Copyright (C) 1998, 99, 2000, 01, 04 Ralf Baechle
8 * Copyright (C) 1999, 2000, 01 Silicon Graphics, Inc.
9 * Copyright (C) 2000, 01 MIPS Technologies, Inc.
10 *
11 * In all honesty, little of the old MIPS code left - the PPC64 variant was
12 * just looking nice and portable so I ripped it. Credits to whoever wrote
13 * it.
14 */
15#ifndef __ASM_SEMAPHORE_H
16#define __ASM_SEMAPHORE_H
17
18/*
19 * Remove spinlock-based RW semaphores; RW semaphore definitions are
20 * now in rwsem.h and we use the generic lib/rwsem.c implementation.
21 * Rework semaphores to use atomic_dec_if_positive.
22 * -- Paul Mackerras (paulus@samba.org)
23 */
24
25#ifdef __KERNEL__
26
27#include <asm/atomic.h>
28#include <asm/system.h>
29#include <linux/wait.h>
30#include <linux/rwsem.h>
31
32struct semaphore {
33 /*
34 * Note that any negative value of count is equivalent to 0,
35 * but additionally indicates that some process(es) might be
36 * sleeping on `wait'.
37 */
38 atomic_t count;
39 wait_queue_head_t wait;
40};
41
42#define __SEMAPHORE_INITIALIZER(name, n) \
43{ \
44 .count = ATOMIC_INIT(n), \
45 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
46}
47
48#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
49 struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)
50
51#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
52
53static inline void sema_init(struct semaphore *sem, int val)
54{
55 atomic_set(&sem->count, val);
56 init_waitqueue_head(&sem->wait);
57}
58
59static inline void init_MUTEX(struct semaphore *sem)
60{
61 sema_init(sem, 1);
62}
63
64static inline void init_MUTEX_LOCKED(struct semaphore *sem)
65{
66 sema_init(sem, 0);
67}
68
69extern void __down(struct semaphore * sem);
70extern int __down_interruptible(struct semaphore * sem);
71extern void __up(struct semaphore * sem);
72
73static inline void down(struct semaphore * sem)
74{
75 might_sleep();
76
77 /*
78 * Try to get the semaphore, take the slow path if we fail.
79 */
80 if (unlikely(atomic_dec_return(&sem->count) < 0))
81 __down(sem);
82}
83
84static inline int down_interruptible(struct semaphore * sem)
85{
86 int ret = 0;
87
88 might_sleep();
89
90 if (unlikely(atomic_dec_return(&sem->count) < 0))
91 ret = __down_interruptible(sem);
92 return ret;
93}
94
95static inline int down_trylock(struct semaphore * sem)
96{
97 return atomic_dec_if_positive(&sem->count) < 0;
98}
99
100static inline void up(struct semaphore * sem)
101{
102 if (unlikely(atomic_inc_return(&sem->count) <= 0))
103 __up(sem);
104}
105
106#endif /* __KERNEL__ */
107
108#endif /* __ASM_SEMAPHORE_H */
diff --git a/include/asm-mn10300/semaphore.h b/include/asm-mn10300/semaphore.h
index 5a9e1ad0b253..d9b2034ed1d2 100644
--- a/include/asm-mn10300/semaphore.h
+++ b/include/asm-mn10300/semaphore.h
@@ -1,169 +1 @@
1/* MN10300 Semaphores #include <linux/semaphore.h>
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#ifndef _ASM_SEMAPHORE_H
12#define _ASM_SEMAPHORE_H
13
14#ifndef __ASSEMBLY__
15
16#include <linux/linkage.h>
17#include <linux/wait.h>
18#include <linux/spinlock.h>
19#include <linux/rwsem.h>
20
21#define SEMAPHORE_DEBUG 0
22
23/*
24 * the semaphore definition
25 * - if count is >0 then there are tokens available on the semaphore for down
26 * to collect
27 * - if count is <=0 then there are no spare tokens, and anyone that wants one
28 * must wait
29 * - if wait_list is not empty, then there are processes waiting for the
30 * semaphore
31 */
32struct semaphore {
33 atomic_t count; /* it's not really atomic, it's
34 * just that certain modules
35 * expect to be able to access
36 * it directly */
37 spinlock_t wait_lock;
38 struct list_head wait_list;
39#if SEMAPHORE_DEBUG
40 unsigned __magic;
41#endif
42};
43
44#if SEMAPHORE_DEBUG
45# define __SEM_DEBUG_INIT(name) , (long)&(name).__magic
46#else
47# define __SEM_DEBUG_INIT(name)
48#endif
49
50
51#define __SEMAPHORE_INITIALIZER(name, init_count) \
52{ \
53 .count = ATOMIC_INIT(init_count), \
54 .wait_lock = __SPIN_LOCK_UNLOCKED((name).wait_lock), \
55 .wait_list = LIST_HEAD_INIT((name).wait_list) \
56 __SEM_DEBUG_INIT(name) \
57}
58
59#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
60 struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)
61
62#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
63#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name, 0)
64
65static inline void sema_init(struct semaphore *sem, int val)
66{
67 *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);
68}
69
70static inline void init_MUTEX(struct semaphore *sem)
71{
72 sema_init(sem, 1);
73}
74
75static inline void init_MUTEX_LOCKED(struct semaphore *sem)
76{
77 sema_init(sem, 0);
78}
79
80extern void __down(struct semaphore *sem, unsigned long flags);
81extern int __down_interruptible(struct semaphore *sem, unsigned long flags);
82extern void __up(struct semaphore *sem);
83
84static inline void down(struct semaphore *sem)
85{
86 unsigned long flags;
87 int count;
88
89#if SEMAPHORE_DEBUG
90 CHECK_MAGIC(sem->__magic);
91#endif
92
93 spin_lock_irqsave(&sem->wait_lock, flags);
94 count = atomic_read(&sem->count);
95 if (likely(count > 0)) {
96 atomic_set(&sem->count, count - 1);
97 spin_unlock_irqrestore(&sem->wait_lock, flags);
98 } else {
99 __down(sem, flags);
100 }
101}
102
103static inline int down_interruptible(struct semaphore *sem)
104{
105 unsigned long flags;
106 int count, ret = 0;
107
108#if SEMAPHORE_DEBUG
109 CHECK_MAGIC(sem->__magic);
110#endif
111
112 spin_lock_irqsave(&sem->wait_lock, flags);
113 count = atomic_read(&sem->count);
114 if (likely(count > 0)) {
115 atomic_set(&sem->count, count - 1);
116 spin_unlock_irqrestore(&sem->wait_lock, flags);
117 } else {
118 ret = __down_interruptible(sem, flags);
119 }
120 return ret;
121}
122
123/*
124 * non-blockingly attempt to down() a semaphore.
125 * - returns zero if we acquired it
126 */
127static inline int down_trylock(struct semaphore *sem)
128{
129 unsigned long flags;
130 int count, success = 0;
131
132#if SEMAPHORE_DEBUG
133 CHECK_MAGIC(sem->__magic);
134#endif
135
136 spin_lock_irqsave(&sem->wait_lock, flags);
137 count = atomic_read(&sem->count);
138 if (likely(count > 0)) {
139 atomic_set(&sem->count, count - 1);
140 success = 1;
141 }
142 spin_unlock_irqrestore(&sem->wait_lock, flags);
143 return !success;
144}
145
146static inline void up(struct semaphore *sem)
147{
148 unsigned long flags;
149
150#if SEMAPHORE_DEBUG
151 CHECK_MAGIC(sem->__magic);
152#endif
153
154 spin_lock_irqsave(&sem->wait_lock, flags);
155 if (!list_empty(&sem->wait_list))
156 __up(sem);
157 else
158 atomic_set(&sem->count, atomic_read(&sem->count) + 1);
159 spin_unlock_irqrestore(&sem->wait_lock, flags);
160}
161
162static inline int sem_getcount(struct semaphore *sem)
163{
164 return atomic_read(&sem->count);
165}
166
167#endif /* __ASSEMBLY__ */
168
169#endif
diff --git a/include/asm-parisc/semaphore-helper.h b/include/asm-parisc/semaphore-helper.h
deleted file mode 100644
index 387f7c1277a2..000000000000
--- a/include/asm-parisc/semaphore-helper.h
+++ /dev/null
@@ -1,89 +0,0 @@
1#ifndef _ASM_PARISC_SEMAPHORE_HELPER_H
2#define _ASM_PARISC_SEMAPHORE_HELPER_H
3
4/*
5 * SMP- and interrupt-safe semaphores helper functions.
6 *
7 * (C) Copyright 1996 Linus Torvalds
8 * (C) Copyright 1999 Andrea Arcangeli
9 */
10
11/*
12 * These two _must_ execute atomically wrt each other.
13 *
14 * This is trivially done with load_locked/store_cond,
15 * which we have. Let the rest of the losers suck eggs.
16 */
17static __inline__ void wake_one_more(struct semaphore * sem)
18{
19 atomic_inc((atomic_t *)&sem->waking);
20}
21
22static __inline__ int waking_non_zero(struct semaphore *sem)
23{
24 unsigned long flags;
25 int ret = 0;
26
27 spin_lock_irqsave(&semaphore_wake_lock, flags);
28 if (sem->waking > 0) {
29 sem->waking--;
30 ret = 1;
31 }
32 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
33 return ret;
34}
35
36/*
37 * waking_non_zero_interruptible:
38 * 1 got the lock
39 * 0 go to sleep
40 * -EINTR interrupted
41 *
42 * We must undo the sem->count down_interruptible() increment while we are
43 * protected by the spinlock in order to make atomic this atomic_inc() with the
44 * atomic_read() in wake_one_more(), otherwise we can race. -arca
45 */
46static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
47 struct task_struct *tsk)
48{
49 unsigned long flags;
50 int ret = 0;
51
52 spin_lock_irqsave(&semaphore_wake_lock, flags);
53 if (sem->waking > 0) {
54 sem->waking--;
55 ret = 1;
56 } else if (signal_pending(tsk)) {
57 atomic_inc(&sem->count);
58 ret = -EINTR;
59 }
60 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
61 return ret;
62}
63
64/*
65 * waking_non_zero_trylock:
66 * 1 failed to lock
67 * 0 got the lock
68 *
69 * We must undo the sem->count down_trylock() increment while we are
70 * protected by the spinlock in order to make atomic this atomic_inc() with the
71 * atomic_read() in wake_one_more(), otherwise we can race. -arca
72 */
73static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
74{
75 unsigned long flags;
76 int ret = 1;
77
78 spin_lock_irqsave(&semaphore_wake_lock, flags);
79 if (sem->waking <= 0)
80 atomic_inc(&sem->count);
81 else {
82 sem->waking--;
83 ret = 0;
84 }
85 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
86 return ret;
87}
88
89#endif /* _ASM_PARISC_SEMAPHORE_HELPER_H */
diff --git a/include/asm-parisc/semaphore.h b/include/asm-parisc/semaphore.h
index a16271cdc748..d9b2034ed1d2 100644
--- a/include/asm-parisc/semaphore.h
+++ b/include/asm-parisc/semaphore.h
@@ -1,145 +1 @@
1/* SMP- and interrupt-safe semaphores. #include <linux/semaphore.h>
2 * PA-RISC version by Matthew Wilcox
3 *
4 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
5 * Copyright (C) 1996 Linus Torvalds
6 * Copyright (C) 1999-2001 Matthew Wilcox < willy at debian d0T org >
7 * Copyright (C) 2000 Grant Grundler < grundler a debian org >
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24#ifndef _ASM_PARISC_SEMAPHORE_H
25#define _ASM_PARISC_SEMAPHORE_H
26
27#include <linux/spinlock.h>
28#include <linux/wait.h>
29#include <linux/rwsem.h>
30
31#include <asm/system.h>
32
33/*
34 * The `count' is initialised to the number of people who are allowed to
35 * take the lock. (Normally we want a mutex, so this is `1'). if
36 * `count' is positive, the lock can be taken. if it's 0, no-one is
37 * waiting on it. if it's -1, at least one task is waiting.
38 */
39struct semaphore {
40 spinlock_t sentry;
41 int count;
42 wait_queue_head_t wait;
43};
44
45#define __SEMAPHORE_INITIALIZER(name, n) \
46{ \
47 .sentry = SPIN_LOCK_UNLOCKED, \
48 .count = n, \
49 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
50}
51
52#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
53 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
54
55#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
56
57static inline void sema_init (struct semaphore *sem, int val)
58{
59 *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
60}
61
62static inline void init_MUTEX (struct semaphore *sem)
63{
64 sema_init(sem, 1);
65}
66
67static inline void init_MUTEX_LOCKED (struct semaphore *sem)
68{
69 sema_init(sem, 0);
70}
71
72static inline int sem_getcount(struct semaphore *sem)
73{
74 return sem->count;
75}
76
77asmlinkage void __down(struct semaphore * sem);
78asmlinkage int __down_interruptible(struct semaphore * sem);
79asmlinkage void __up(struct semaphore * sem);
80
81/* Semaphores can be `tried' from irq context. So we have to disable
82 * interrupts while we're messing with the semaphore. Sorry.
83 */
84
85static inline void down(struct semaphore * sem)
86{
87 might_sleep();
88 spin_lock_irq(&sem->sentry);
89 if (sem->count > 0) {
90 sem->count--;
91 } else {
92 __down(sem);
93 }
94 spin_unlock_irq(&sem->sentry);
95}
96
97static inline int down_interruptible(struct semaphore * sem)
98{
99 int ret = 0;
100 might_sleep();
101 spin_lock_irq(&sem->sentry);
102 if (sem->count > 0) {
103 sem->count--;
104 } else {
105 ret = __down_interruptible(sem);
106 }
107 spin_unlock_irq(&sem->sentry);
108 return ret;
109}
110
111/*
112 * down_trylock returns 0 on success, 1 if we failed to get the lock.
113 * May not sleep, but must preserve irq state
114 */
115static inline int down_trylock(struct semaphore * sem)
116{
117 unsigned long flags;
118 int count;
119
120 spin_lock_irqsave(&sem->sentry, flags);
121 count = sem->count - 1;
122 if (count >= 0)
123 sem->count = count;
124 spin_unlock_irqrestore(&sem->sentry, flags);
125 return (count < 0);
126}
127
128/*
129 * Note! This is subtle. We jump to wake people up only if
130 * the semaphore was negative (== somebody was waiting on it).
131 */
132static inline void up(struct semaphore * sem)
133{
134 unsigned long flags;
135
136 spin_lock_irqsave(&sem->sentry, flags);
137 if (sem->count < 0) {
138 __up(sem);
139 } else {
140 sem->count++;
141 }
142 spin_unlock_irqrestore(&sem->sentry, flags);
143}
144
145#endif /* _ASM_PARISC_SEMAPHORE_H */
diff --git a/include/asm-powerpc/semaphore.h b/include/asm-powerpc/semaphore.h
index 48dd32e07749..d9b2034ed1d2 100644
--- a/include/asm-powerpc/semaphore.h
+++ b/include/asm-powerpc/semaphore.h
@@ -1,94 +1 @@
1#ifndef _ASM_POWERPC_SEMAPHORE_H #include <linux/semaphore.h>
2#define _ASM_POWERPC_SEMAPHORE_H
3
4/*
5 * Remove spinlock-based RW semaphores; RW semaphore definitions are
6 * now in rwsem.h and we use the generic lib/rwsem.c implementation.
7 * Rework semaphores to use atomic_dec_if_positive.
8 * -- Paul Mackerras (paulus@samba.org)
9 */
10
11#ifdef __KERNEL__
12
13#include <asm/atomic.h>
14#include <asm/system.h>
15#include <linux/wait.h>
16#include <linux/rwsem.h>
17
18struct semaphore {
19 /*
20 * Note that any negative value of count is equivalent to 0,
21 * but additionally indicates that some process(es) might be
22 * sleeping on `wait'.
23 */
24 atomic_t count;
25 wait_queue_head_t wait;
26};
27
28#define __SEMAPHORE_INITIALIZER(name, n) \
29{ \
30 .count = ATOMIC_INIT(n), \
31 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
32}
33
34#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
35 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
36
37#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
38
39static inline void sema_init (struct semaphore *sem, int val)
40{
41 atomic_set(&sem->count, val);
42 init_waitqueue_head(&sem->wait);
43}
44
45static inline void init_MUTEX (struct semaphore *sem)
46{
47 sema_init(sem, 1);
48}
49
50static inline void init_MUTEX_LOCKED (struct semaphore *sem)
51{
52 sema_init(sem, 0);
53}
54
55extern void __down(struct semaphore * sem);
56extern int __down_interruptible(struct semaphore * sem);
57extern void __up(struct semaphore * sem);
58
59static inline void down(struct semaphore * sem)
60{
61 might_sleep();
62
63 /*
64 * Try to get the semaphore, take the slow path if we fail.
65 */
66 if (unlikely(atomic_dec_return(&sem->count) < 0))
67 __down(sem);
68}
69
70static inline int down_interruptible(struct semaphore * sem)
71{
72 int ret = 0;
73
74 might_sleep();
75
76 if (unlikely(atomic_dec_return(&sem->count) < 0))
77 ret = __down_interruptible(sem);
78 return ret;
79}
80
81static inline int down_trylock(struct semaphore * sem)
82{
83 return atomic_dec_if_positive(&sem->count) < 0;
84}
85
86static inline void up(struct semaphore * sem)
87{
88 if (unlikely(atomic_inc_return(&sem->count) <= 0))
89 __up(sem);
90}
91
92#endif /* __KERNEL__ */
93
94#endif /* _ASM_POWERPC_SEMAPHORE_H */
diff --git a/include/asm-s390/cio.h b/include/asm-s390/cio.h
index 123b557c3ff4..0818ecd30ca6 100644
--- a/include/asm-s390/cio.h
+++ b/include/asm-s390/cio.h
@@ -397,6 +397,10 @@ struct cio_iplinfo {
397 397
398extern int cio_get_iplinfo(struct cio_iplinfo *iplinfo); 398extern int cio_get_iplinfo(struct cio_iplinfo *iplinfo);
399 399
400/* Function from drivers/s390/cio/chsc.c */
401int chsc_sstpc(void *page, unsigned int op, u16 ctrl);
402int chsc_sstpi(void *page, void *result, size_t size);
403
400#endif 404#endif
401 405
402#endif 406#endif
diff --git a/include/asm-s390/cpu.h b/include/asm-s390/cpu.h
index 352dde194f3c..e5a6a9ba3adf 100644
--- a/include/asm-s390/cpu.h
+++ b/include/asm-s390/cpu.h
@@ -22,4 +22,12 @@ struct s390_idle_data {
22 22
23DECLARE_PER_CPU(struct s390_idle_data, s390_idle); 23DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
24 24
25void s390_idle_leave(void);
26
27static inline void s390_idle_check(void)
28{
29 if ((&__get_cpu_var(s390_idle))->in_idle)
30 s390_idle_leave();
31}
32
25#endif /* _ASM_S390_CPU_H_ */ 33#endif /* _ASM_S390_CPU_H_ */
diff --git a/include/asm-s390/debug.h b/include/asm-s390/debug.h
index c00dd2b3dc50..335baf4fc64f 100644
--- a/include/asm-s390/debug.h
+++ b/include/asm-s390/debug.h
@@ -73,6 +73,7 @@ typedef struct debug_info {
73 struct dentry* debugfs_entries[DEBUG_MAX_VIEWS]; 73 struct dentry* debugfs_entries[DEBUG_MAX_VIEWS];
74 struct debug_view* views[DEBUG_MAX_VIEWS]; 74 struct debug_view* views[DEBUG_MAX_VIEWS];
75 char name[DEBUG_MAX_NAME_LEN]; 75 char name[DEBUG_MAX_NAME_LEN];
76 mode_t mode;
76} debug_info_t; 77} debug_info_t;
77 78
78typedef int (debug_header_proc_t) (debug_info_t* id, 79typedef int (debug_header_proc_t) (debug_info_t* id,
@@ -122,6 +123,10 @@ debug_entry_t* debug_exception_common(debug_info_t* id, int level,
122debug_info_t* debug_register(char* name, int pages, int nr_areas, 123debug_info_t* debug_register(char* name, int pages, int nr_areas,
123 int buf_size); 124 int buf_size);
124 125
126debug_info_t *debug_register_mode(char *name, int pages, int nr_areas,
127 int buf_size, mode_t mode, uid_t uid,
128 gid_t gid);
129
125void debug_unregister(debug_info_t* id); 130void debug_unregister(debug_info_t* id);
126 131
127void debug_set_level(debug_info_t* id, int new_level); 132void debug_set_level(debug_info_t* id, int new_level);
diff --git a/include/asm-s390/extmem.h b/include/asm-s390/extmem.h
index c8802c934b74..33837d756184 100644
--- a/include/asm-s390/extmem.h
+++ b/include/asm-s390/extmem.h
@@ -22,11 +22,12 @@
22#define SEGMENT_SHARED 0 22#define SEGMENT_SHARED 0
23#define SEGMENT_EXCLUSIVE 1 23#define SEGMENT_EXCLUSIVE 1
24 24
25extern int segment_load (char *name,int segtype,unsigned long *addr,unsigned long *length); 25int segment_load (char *name, int segtype, unsigned long *addr, unsigned long *length);
26extern void segment_unload(char *name); 26void segment_unload(char *name);
27extern void segment_save(char *name); 27void segment_save(char *name);
28extern int segment_type (char* name); 28int segment_type (char* name);
29extern int segment_modify_shared (char *name, int do_nonshared); 29int segment_modify_shared (char *name, int do_nonshared);
30void segment_warning(int rc, char *seg_name);
30 31
31#endif 32#endif
32#endif 33#endif
diff --git a/include/asm-s390/hardirq.h b/include/asm-s390/hardirq.h
index 31beb18cb3d1..4b7cb964ff35 100644
--- a/include/asm-s390/hardirq.h
+++ b/include/asm-s390/hardirq.h
@@ -32,6 +32,6 @@ typedef struct {
32 32
33#define HARDIRQ_BITS 8 33#define HARDIRQ_BITS 8
34 34
35extern void account_ticks(u64 time); 35void clock_comparator_work(void);
36 36
37#endif /* __ASM_HARDIRQ_H */ 37#endif /* __ASM_HARDIRQ_H */
diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h
index 801a6fd35b5b..5de3efb31445 100644
--- a/include/asm-s390/lowcore.h
+++ b/include/asm-s390/lowcore.h
@@ -56,6 +56,8 @@
56#define __LC_IO_INT_WORD 0x0C0 56#define __LC_IO_INT_WORD 0x0C0
57#define __LC_MCCK_CODE 0x0E8 57#define __LC_MCCK_CODE 0x0E8
58 58
59#define __LC_LAST_BREAK 0x110
60
59#define __LC_RETURN_PSW 0x200 61#define __LC_RETURN_PSW 0x200
60 62
61#define __LC_SAVE_AREA 0xC00 63#define __LC_SAVE_AREA 0xC00
@@ -80,7 +82,6 @@
80#define __LC_CPUID 0xC60 82#define __LC_CPUID 0xC60
81#define __LC_CPUADDR 0xC68 83#define __LC_CPUADDR 0xC68
82#define __LC_IPLDEV 0xC7C 84#define __LC_IPLDEV 0xC7C
83#define __LC_JIFFY_TIMER 0xC80
84#define __LC_CURRENT 0xC90 85#define __LC_CURRENT 0xC90
85#define __LC_INT_CLOCK 0xC98 86#define __LC_INT_CLOCK 0xC98
86#else /* __s390x__ */ 87#else /* __s390x__ */
@@ -103,7 +104,6 @@
103#define __LC_CPUID 0xD80 104#define __LC_CPUID 0xD80
104#define __LC_CPUADDR 0xD88 105#define __LC_CPUADDR 0xD88
105#define __LC_IPLDEV 0xDB8 106#define __LC_IPLDEV 0xDB8
106#define __LC_JIFFY_TIMER 0xDC0
107#define __LC_CURRENT 0xDD8 107#define __LC_CURRENT 0xDD8
108#define __LC_INT_CLOCK 0xDE8 108#define __LC_INT_CLOCK 0xDE8
109#endif /* __s390x__ */ 109#endif /* __s390x__ */
@@ -276,7 +276,7 @@ struct _lowcore
276 /* entry.S sensitive area end */ 276 /* entry.S sensitive area end */
277 277
278 /* SMP info area: defined by DJB */ 278 /* SMP info area: defined by DJB */
279 __u64 jiffy_timer; /* 0xc80 */ 279 __u64 clock_comparator; /* 0xc80 */
280 __u32 ext_call_fast; /* 0xc88 */ 280 __u32 ext_call_fast; /* 0xc88 */
281 __u32 percpu_offset; /* 0xc8c */ 281 __u32 percpu_offset; /* 0xc8c */
282 __u32 current_task; /* 0xc90 */ 282 __u32 current_task; /* 0xc90 */
@@ -368,11 +368,12 @@ struct _lowcore
368 /* entry.S sensitive area end */ 368 /* entry.S sensitive area end */
369 369
370 /* SMP info area: defined by DJB */ 370 /* SMP info area: defined by DJB */
371 __u64 jiffy_timer; /* 0xdc0 */ 371 __u64 clock_comparator; /* 0xdc0 */
372 __u64 ext_call_fast; /* 0xdc8 */ 372 __u64 ext_call_fast; /* 0xdc8 */
373 __u64 percpu_offset; /* 0xdd0 */ 373 __u64 percpu_offset; /* 0xdd0 */
374 __u64 current_task; /* 0xdd8 */ 374 __u64 current_task; /* 0xdd8 */
375 __u64 softirq_pending; /* 0xde0 */ 375 __u32 softirq_pending; /* 0xde0 */
376 __u32 pad_0x0de4; /* 0xde4 */
376 __u64 int_clock; /* 0xde8 */ 377 __u64 int_clock; /* 0xde8 */
377 __u8 pad12[0xe00-0xdf0]; /* 0xdf0 */ 378 __u8 pad12[0xe00-0xdf0]; /* 0xdf0 */
378 379
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h
index 51d88912aa20..8eaf343a12a8 100644
--- a/include/asm-s390/processor.h
+++ b/include/asm-s390/processor.h
@@ -175,6 +175,13 @@ extern void task_show_regs(struct seq_file *m, struct task_struct *task);
175extern void show_registers(struct pt_regs *regs); 175extern void show_registers(struct pt_regs *regs);
176extern void show_code(struct pt_regs *regs); 176extern void show_code(struct pt_regs *regs);
177extern void show_trace(struct task_struct *task, unsigned long *sp); 177extern void show_trace(struct task_struct *task, unsigned long *sp);
178#ifdef CONFIG_64BIT
179extern void show_last_breaking_event(struct pt_regs *regs);
180#else
181static inline void show_last_breaking_event(struct pt_regs *regs)
182{
183}
184#endif
178 185
179unsigned long get_wchan(struct task_struct *p); 186unsigned long get_wchan(struct task_struct *p);
180#define task_pt_regs(tsk) ((struct pt_regs *) \ 187#define task_pt_regs(tsk) ((struct pt_regs *) \
diff --git a/include/asm-s390/semaphore.h b/include/asm-s390/semaphore.h
index 0e7001ad8392..d9b2034ed1d2 100644
--- a/include/asm-s390/semaphore.h
+++ b/include/asm-s390/semaphore.h
@@ -1,107 +1 @@
1/* #include <linux/semaphore.h>
2 * include/asm-s390/semaphore.h
3 *
4 * S390 version
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 *
7 * Derived from "include/asm-i386/semaphore.h"
8 * (C) Copyright 1996 Linus Torvalds
9 */
10
11#ifndef _S390_SEMAPHORE_H
12#define _S390_SEMAPHORE_H
13
14#include <asm/system.h>
15#include <asm/atomic.h>
16#include <linux/wait.h>
17#include <linux/rwsem.h>
18
19struct semaphore {
20 /*
21 * Note that any negative value of count is equivalent to 0,
22 * but additionally indicates that some process(es) might be
23 * sleeping on `wait'.
24 */
25 atomic_t count;
26 wait_queue_head_t wait;
27};
28
29#define __SEMAPHORE_INITIALIZER(name,count) \
30 { ATOMIC_INIT(count), __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) }
31
32#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
33 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
34
35#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
36
37static inline void sema_init (struct semaphore *sem, int val)
38{
39 atomic_set(&sem->count, val);
40 init_waitqueue_head(&sem->wait);
41}
42
43static inline void init_MUTEX (struct semaphore *sem)
44{
45 sema_init(sem, 1);
46}
47
48static inline void init_MUTEX_LOCKED (struct semaphore *sem)
49{
50 sema_init(sem, 0);
51}
52
53asmlinkage void __down(struct semaphore * sem);
54asmlinkage int __down_interruptible(struct semaphore * sem);
55asmlinkage int __down_trylock(struct semaphore * sem);
56asmlinkage void __up(struct semaphore * sem);
57
58static inline void down(struct semaphore * sem)
59{
60 might_sleep();
61 if (atomic_dec_return(&sem->count) < 0)
62 __down(sem);
63}
64
65static inline int down_interruptible(struct semaphore * sem)
66{
67 int ret = 0;
68
69 might_sleep();
70 if (atomic_dec_return(&sem->count) < 0)
71 ret = __down_interruptible(sem);
72 return ret;
73}
74
75static inline int down_trylock(struct semaphore * sem)
76{
77 int old_val, new_val;
78
79 /*
80 * This inline assembly atomically implements the equivalent
81 * to the following C code:
82 * old_val = sem->count.counter;
83 * if ((new_val = old_val) > 0)
84 * sem->count.counter = --new_val;
85 * In the ppc code this is called atomic_dec_if_positive.
86 */
87 asm volatile(
88 " l %0,0(%3)\n"
89 "0: ltr %1,%0\n"
90 " jle 1f\n"
91 " ahi %1,-1\n"
92 " cs %0,%1,0(%3)\n"
93 " jl 0b\n"
94 "1:"
95 : "=&d" (old_val), "=&d" (new_val), "=m" (sem->count.counter)
96 : "a" (&sem->count.counter), "m" (sem->count.counter)
97 : "cc", "memory");
98 return old_val <= 0;
99}
100
101static inline void up(struct semaphore * sem)
102{
103 if (atomic_inc_return(&sem->count) <= 0)
104 __up(sem);
105}
106
107#endif
diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h
index c7b74326a527..6f3821a6a902 100644
--- a/include/asm-s390/smp.h
+++ b/include/asm-s390/smp.h
@@ -90,6 +90,9 @@ extern void __cpu_die (unsigned int cpu);
90extern void cpu_die (void) __attribute__ ((noreturn)); 90extern void cpu_die (void) __attribute__ ((noreturn));
91extern int __cpu_up (unsigned int cpu); 91extern int __cpu_up (unsigned int cpu);
92 92
93extern struct mutex smp_cpu_state_mutex;
94extern int smp_cpu_polarization[];
95
93extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), 96extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
94 void *info, int wait); 97 void *info, int wait);
95#endif 98#endif
diff --git a/include/asm-s390/sysinfo.h b/include/asm-s390/sysinfo.h
new file mode 100644
index 000000000000..abe10ae15e46
--- /dev/null
+++ b/include/asm-s390/sysinfo.h
@@ -0,0 +1,116 @@
1/*
2 * definition for store system information stsi
3 *
4 * Copyright IBM Corp. 2001,2008
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Ulrich Weigand <weigand@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 */
13
14struct sysinfo_1_1_1 {
15 char reserved_0[32];
16 char manufacturer[16];
17 char type[4];
18 char reserved_1[12];
19 char model_capacity[16];
20 char sequence[16];
21 char plant[4];
22 char model[16];
23 char model_perm_cap[16];
24 char model_temp_cap[16];
25 char model_cap_rating[4];
26 char model_perm_cap_rating[4];
27 char model_temp_cap_rating[4];
28};
29
30struct sysinfo_1_2_1 {
31 char reserved_0[80];
32 char sequence[16];
33 char plant[4];
34 char reserved_1[2];
35 unsigned short cpu_address;
36};
37
38struct sysinfo_1_2_2 {
39 char format;
40 char reserved_0[1];
41 unsigned short acc_offset;
42 char reserved_1[24];
43 unsigned int secondary_capability;
44 unsigned int capability;
45 unsigned short cpus_total;
46 unsigned short cpus_configured;
47 unsigned short cpus_standby;
48 unsigned short cpus_reserved;
49 unsigned short adjustment[0];
50};
51
52struct sysinfo_1_2_2_extension {
53 unsigned int alt_capability;
54 unsigned short alt_adjustment[0];
55};
56
57struct sysinfo_2_2_1 {
58 char reserved_0[80];
59 char sequence[16];
60 char plant[4];
61 unsigned short cpu_id;
62 unsigned short cpu_address;
63};
64
65struct sysinfo_2_2_2 {
66 char reserved_0[32];
67 unsigned short lpar_number;
68 char reserved_1;
69 unsigned char characteristics;
70 unsigned short cpus_total;
71 unsigned short cpus_configured;
72 unsigned short cpus_standby;
73 unsigned short cpus_reserved;
74 char name[8];
75 unsigned int caf;
76 char reserved_2[16];
77 unsigned short cpus_dedicated;
78 unsigned short cpus_shared;
79};
80
81#define LPAR_CHAR_DEDICATED (1 << 7)
82#define LPAR_CHAR_SHARED (1 << 6)
83#define LPAR_CHAR_LIMITED (1 << 5)
84
85struct sysinfo_3_2_2 {
86 char reserved_0[31];
87 unsigned char count;
88 struct {
89 char reserved_0[4];
90 unsigned short cpus_total;
91 unsigned short cpus_configured;
92 unsigned short cpus_standby;
93 unsigned short cpus_reserved;
94 char name[8];
95 unsigned int caf;
96 char cpi[16];
97 char reserved_1[24];
98
99 } vm[8];
100};
101
102static inline int stsi(void *sysinfo, int fc, int sel1, int sel2)
103{
104 register int r0 asm("0") = (fc << 28) | sel1;
105 register int r1 asm("1") = sel2;
106
107 asm volatile(
108 " stsi 0(%2)\n"
109 "0: jz 2f\n"
110 "1: lhi %0,%3\n"
111 "2:\n"
112 EX_TABLE(0b, 1b)
113 : "+d" (r0) : "d" (r1), "a" (sysinfo), "K" (-ENOSYS)
114 : "cc", "memory");
115 return r0;
116}
diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h
index 15aba30601a3..92098df4d6e3 100644
--- a/include/asm-s390/system.h
+++ b/include/asm-s390/system.h
@@ -406,6 +406,8 @@ __set_psw_mask(unsigned long mask)
406#define local_mcck_enable() __set_psw_mask(psw_kernel_bits) 406#define local_mcck_enable() __set_psw_mask(psw_kernel_bits)
407#define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK) 407#define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK)
408 408
409int stfle(unsigned long long *list, int doublewords);
410
409#ifdef CONFIG_SMP 411#ifdef CONFIG_SMP
410 412
411extern void smp_ctl_set_bit(int cr, int bit); 413extern void smp_ctl_set_bit(int cr, int bit);
diff --git a/include/asm-s390/timex.h b/include/asm-s390/timex.h
index 98229db24314..d744c3d62de5 100644
--- a/include/asm-s390/timex.h
+++ b/include/asm-s390/timex.h
@@ -62,16 +62,18 @@ static inline unsigned long long get_clock (void)
62 return clk; 62 return clk;
63} 63}
64 64
65static inline void get_clock_extended(void *dest) 65static inline unsigned long long get_clock_xt(void)
66{ 66{
67 typedef struct { unsigned long long clk[2]; } __clock_t; 67 unsigned char clk[16];
68 68
69#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) 69#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
70 asm volatile("stcke %0" : "=Q" (*((__clock_t *)dest)) : : "cc"); 70 asm volatile("stcke %0" : "=Q" (clk) : : "cc");
71#else /* __GNUC__ */ 71#else /* __GNUC__ */
72 asm volatile("stcke 0(%1)" : "=m" (*((__clock_t *)dest)) 72 asm volatile("stcke 0(%1)" : "=m" (clk)
73 : "a" ((__clock_t *)dest) : "cc"); 73 : "a" (clk) : "cc");
74#endif /* __GNUC__ */ 74#endif /* __GNUC__ */
75
76 return *((unsigned long long *)&clk[1]);
75} 77}
76 78
77static inline cycles_t get_cycles(void) 79static inline cycles_t get_cycles(void)
@@ -81,5 +83,6 @@ static inline cycles_t get_cycles(void)
81 83
82int get_sync_clock(unsigned long long *clock); 84int get_sync_clock(unsigned long long *clock);
83void init_cpu_timer(void); 85void init_cpu_timer(void);
86unsigned long long monotonic_clock(void);
84 87
85#endif 88#endif
diff --git a/include/asm-s390/tlbflush.h b/include/asm-s390/tlbflush.h
index 35fb4f9127b2..9e57a93d7de1 100644
--- a/include/asm-s390/tlbflush.h
+++ b/include/asm-s390/tlbflush.h
@@ -13,12 +13,14 @@ static inline void __tlb_flush_local(void)
13 asm volatile("ptlb" : : : "memory"); 13 asm volatile("ptlb" : : : "memory");
14} 14}
15 15
16#ifdef CONFIG_SMP
16/* 17/*
17 * Flush all tlb entries on all cpus. 18 * Flush all tlb entries on all cpus.
18 */ 19 */
20void smp_ptlb_all(void);
21
19static inline void __tlb_flush_global(void) 22static inline void __tlb_flush_global(void)
20{ 23{
21 extern void smp_ptlb_all(void);
22 register unsigned long reg2 asm("2"); 24 register unsigned long reg2 asm("2");
23 register unsigned long reg3 asm("3"); 25 register unsigned long reg3 asm("3");
24 register unsigned long reg4 asm("4"); 26 register unsigned long reg4 asm("4");
@@ -39,6 +41,25 @@ static inline void __tlb_flush_global(void)
39 : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" ); 41 : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" );
40} 42}
41 43
44static inline void __tlb_flush_full(struct mm_struct *mm)
45{
46 cpumask_t local_cpumask;
47
48 preempt_disable();
49 /*
50 * If the process only ran on the local cpu, do a local flush.
51 */
52 local_cpumask = cpumask_of_cpu(smp_processor_id());
53 if (cpus_equal(mm->cpu_vm_mask, local_cpumask))
54 __tlb_flush_local();
55 else
56 __tlb_flush_global();
57 preempt_enable();
58}
59#else
60#define __tlb_flush_full(mm) __tlb_flush_local()
61#endif
62
42/* 63/*
43 * Flush all tlb entries of a page table on all cpus. 64 * Flush all tlb entries of a page table on all cpus.
44 */ 65 */
@@ -51,8 +72,6 @@ static inline void __tlb_flush_idte(unsigned long asce)
51 72
52static inline void __tlb_flush_mm(struct mm_struct * mm) 73static inline void __tlb_flush_mm(struct mm_struct * mm)
53{ 74{
54 cpumask_t local_cpumask;
55
56 if (unlikely(cpus_empty(mm->cpu_vm_mask))) 75 if (unlikely(cpus_empty(mm->cpu_vm_mask)))
57 return; 76 return;
58 /* 77 /*
@@ -69,16 +88,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
69 mm->context.asce_bits); 88 mm->context.asce_bits);
70 return; 89 return;
71 } 90 }
72 preempt_disable(); 91 __tlb_flush_full(mm);
73 /*
74 * If the process only ran on the local cpu, do a local flush.
75 */
76 local_cpumask = cpumask_of_cpu(smp_processor_id());
77 if (cpus_equal(mm->cpu_vm_mask, local_cpumask))
78 __tlb_flush_local();
79 else
80 __tlb_flush_global();
81 preempt_enable();
82} 92}
83 93
84static inline void __tlb_flush_mm_cond(struct mm_struct * mm) 94static inline void __tlb_flush_mm_cond(struct mm_struct * mm)
diff --git a/include/asm-s390/topology.h b/include/asm-s390/topology.h
index 613aa64019da..8e97b06f298a 100644
--- a/include/asm-s390/topology.h
+++ b/include/asm-s390/topology.h
@@ -1,6 +1,29 @@
1#ifndef _ASM_S390_TOPOLOGY_H 1#ifndef _ASM_S390_TOPOLOGY_H
2#define _ASM_S390_TOPOLOGY_H 2#define _ASM_S390_TOPOLOGY_H
3 3
4#include <linux/cpumask.h>
5
6#define mc_capable() (1)
7
8cpumask_t cpu_coregroup_map(unsigned int cpu);
9
10int topology_set_cpu_management(int fc);
11void topology_schedule_update(void);
12
13#define POLARIZATION_UNKNWN (-1)
14#define POLARIZATION_HRZ (0)
15#define POLARIZATION_VL (1)
16#define POLARIZATION_VM (2)
17#define POLARIZATION_VH (3)
18
19#ifdef CONFIG_SMP
20void s390_init_cpu_topology(void);
21#else
22static inline void s390_init_cpu_topology(void)
23{
24};
25#endif
26
4#include <asm-generic/topology.h> 27#include <asm-generic/topology.h>
5 28
6#endif /* _ASM_S390_TOPOLOGY_H */ 29#endif /* _ASM_S390_TOPOLOGY_H */
diff --git a/include/asm-sh/semaphore-helper.h b/include/asm-sh/semaphore-helper.h
deleted file mode 100644
index bd8230c369ca..000000000000
--- a/include/asm-sh/semaphore-helper.h
+++ /dev/null
@@ -1,89 +0,0 @@
1#ifndef __ASM_SH_SEMAPHORE_HELPER_H
2#define __ASM_SH_SEMAPHORE_HELPER_H
3
4/*
5 * SMP- and interrupt-safe semaphores helper functions.
6 *
7 * (C) Copyright 1996 Linus Torvalds
8 * (C) Copyright 1999 Andrea Arcangeli
9 */
10
11/*
12 * These two _must_ execute atomically wrt each other.
13 *
14 * This is trivially done with load_locked/store_cond,
15 * which we have. Let the rest of the losers suck eggs.
16 */
17static __inline__ void wake_one_more(struct semaphore * sem)
18{
19 atomic_inc((atomic_t *)&sem->sleepers);
20}
21
22static __inline__ int waking_non_zero(struct semaphore *sem)
23{
24 unsigned long flags;
25 int ret = 0;
26
27 spin_lock_irqsave(&semaphore_wake_lock, flags);
28 if (sem->sleepers > 0) {
29 sem->sleepers--;
30 ret = 1;
31 }
32 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
33 return ret;
34}
35
36/*
37 * waking_non_zero_interruptible:
38 * 1 got the lock
39 * 0 go to sleep
40 * -EINTR interrupted
41 *
42 * We must undo the sem->count down_interruptible() increment while we are
43 * protected by the spinlock in order to make atomic this atomic_inc() with the
44 * atomic_read() in wake_one_more(), otherwise we can race. -arca
45 */
46static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
47 struct task_struct *tsk)
48{
49 unsigned long flags;
50 int ret = 0;
51
52 spin_lock_irqsave(&semaphore_wake_lock, flags);
53 if (sem->sleepers > 0) {
54 sem->sleepers--;
55 ret = 1;
56 } else if (signal_pending(tsk)) {
57 atomic_inc(&sem->count);
58 ret = -EINTR;
59 }
60 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
61 return ret;
62}
63
64/*
65 * waking_non_zero_trylock:
66 * 1 failed to lock
67 * 0 got the lock
68 *
69 * We must undo the sem->count down_trylock() increment while we are
70 * protected by the spinlock in order to make atomic this atomic_inc() with the
71 * atomic_read() in wake_one_more(), otherwise we can race. -arca
72 */
73static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
74{
75 unsigned long flags;
76 int ret = 1;
77
78 spin_lock_irqsave(&semaphore_wake_lock, flags);
79 if (sem->sleepers <= 0)
80 atomic_inc(&sem->count);
81 else {
82 sem->sleepers--;
83 ret = 0;
84 }
85 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
86 return ret;
87}
88
89#endif /* __ASM_SH_SEMAPHORE_HELPER_H */
diff --git a/include/asm-sh/semaphore.h b/include/asm-sh/semaphore.h
index 9e5a37c4dce2..d9b2034ed1d2 100644
--- a/include/asm-sh/semaphore.h
+++ b/include/asm-sh/semaphore.h
@@ -1,115 +1 @@
1#ifndef __ASM_SH_SEMAPHORE_H #include <linux/semaphore.h>
2#define __ASM_SH_SEMAPHORE_H
3
4#include <linux/linkage.h>
5
6#ifdef __KERNEL__
7/*
8 * SMP- and interrupt-safe semaphores.
9 *
10 * (C) Copyright 1996 Linus Torvalds
11 *
12 * SuperH verison by Niibe Yutaka
13 * (Currently no asm implementation but generic C code...)
14 */
15
16#include <linux/spinlock.h>
17#include <linux/rwsem.h>
18#include <linux/wait.h>
19
20#include <asm/system.h>
21#include <asm/atomic.h>
22
23struct semaphore {
24 atomic_t count;
25 int sleepers;
26 wait_queue_head_t wait;
27};
28
29#define __SEMAPHORE_INITIALIZER(name, n) \
30{ \
31 .count = ATOMIC_INIT(n), \
32 .sleepers = 0, \
33 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
34}
35
36#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
37 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
38
39#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
40
41static inline void sema_init (struct semaphore *sem, int val)
42{
43/*
44 * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
45 *
46 * i'd rather use the more flexible initialization above, but sadly
47 * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well.
48 */
49 atomic_set(&sem->count, val);
50 sem->sleepers = 0;
51 init_waitqueue_head(&sem->wait);
52}
53
54static inline void init_MUTEX (struct semaphore *sem)
55{
56 sema_init(sem, 1);
57}
58
59static inline void init_MUTEX_LOCKED (struct semaphore *sem)
60{
61 sema_init(sem, 0);
62}
63
64#if 0
65asmlinkage void __down_failed(void /* special register calling convention */);
66asmlinkage int __down_failed_interruptible(void /* params in registers */);
67asmlinkage int __down_failed_trylock(void /* params in registers */);
68asmlinkage void __up_wakeup(void /* special register calling convention */);
69#endif
70
71asmlinkage void __down(struct semaphore * sem);
72asmlinkage int __down_interruptible(struct semaphore * sem);
73asmlinkage int __down_trylock(struct semaphore * sem);
74asmlinkage void __up(struct semaphore * sem);
75
76extern spinlock_t semaphore_wake_lock;
77
78static inline void down(struct semaphore * sem)
79{
80 might_sleep();
81 if (atomic_dec_return(&sem->count) < 0)
82 __down(sem);
83}
84
85static inline int down_interruptible(struct semaphore * sem)
86{
87 int ret = 0;
88
89 might_sleep();
90 if (atomic_dec_return(&sem->count) < 0)
91 ret = __down_interruptible(sem);
92 return ret;
93}
94
95static inline int down_trylock(struct semaphore * sem)
96{
97 int ret = 0;
98
99 if (atomic_dec_return(&sem->count) < 0)
100 ret = __down_trylock(sem);
101 return ret;
102}
103
104/*
105 * Note! This is subtle. We jump to wake people up only if
106 * the semaphore was negative (== somebody was waiting on it).
107 */
108static inline void up(struct semaphore * sem)
109{
110 if (atomic_inc_return(&sem->count) <= 0)
111 __up(sem);
112}
113
114#endif
115#endif /* __ASM_SH_SEMAPHORE_H */
diff --git a/include/asm-sparc/semaphore.h b/include/asm-sparc/semaphore.h
index 8018f9f4d497..d9b2034ed1d2 100644
--- a/include/asm-sparc/semaphore.h
+++ b/include/asm-sparc/semaphore.h
@@ -1,192 +1 @@
1#ifndef _SPARC_SEMAPHORE_H #include <linux/semaphore.h>
2#define _SPARC_SEMAPHORE_H
3
4/* Dinky, good for nothing, just barely irq safe, Sparc semaphores. */
5
6#ifdef __KERNEL__
7
8#include <asm/atomic.h>
9#include <linux/wait.h>
10#include <linux/rwsem.h>
11
12struct semaphore {
13 atomic24_t count;
14 int sleepers;
15 wait_queue_head_t wait;
16};
17
18#define __SEMAPHORE_INITIALIZER(name, n) \
19{ \
20 .count = ATOMIC24_INIT(n), \
21 .sleepers = 0, \
22 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
23}
24
25#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
26 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
27
28#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
29
30static inline void sema_init (struct semaphore *sem, int val)
31{
32 atomic24_set(&sem->count, val);
33 sem->sleepers = 0;
34 init_waitqueue_head(&sem->wait);
35}
36
37static inline void init_MUTEX (struct semaphore *sem)
38{
39 sema_init(sem, 1);
40}
41
42static inline void init_MUTEX_LOCKED (struct semaphore *sem)
43{
44 sema_init(sem, 0);
45}
46
47extern void __down(struct semaphore * sem);
48extern int __down_interruptible(struct semaphore * sem);
49extern int __down_trylock(struct semaphore * sem);
50extern void __up(struct semaphore * sem);
51
52static inline void down(struct semaphore * sem)
53{
54 register volatile int *ptr asm("g1");
55 register int increment asm("g2");
56
57 might_sleep();
58
59 ptr = &(sem->count.counter);
60 increment = 1;
61
62 __asm__ __volatile__(
63 "mov %%o7, %%g4\n\t"
64 "call ___atomic24_sub\n\t"
65 " add %%o7, 8, %%o7\n\t"
66 "tst %%g2\n\t"
67 "bl 2f\n\t"
68 " nop\n"
69 "1:\n\t"
70 ".subsection 2\n"
71 "2:\n\t"
72 "save %%sp, -64, %%sp\n\t"
73 "mov %%g1, %%l1\n\t"
74 "mov %%g5, %%l5\n\t"
75 "call %3\n\t"
76 " mov %%g1, %%o0\n\t"
77 "mov %%l1, %%g1\n\t"
78 "ba 1b\n\t"
79 " restore %%l5, %%g0, %%g5\n\t"
80 ".previous\n"
81 : "=&r" (increment)
82 : "0" (increment), "r" (ptr), "i" (__down)
83 : "g3", "g4", "g7", "memory", "cc");
84}
85
86static inline int down_interruptible(struct semaphore * sem)
87{
88 register volatile int *ptr asm("g1");
89 register int increment asm("g2");
90
91 might_sleep();
92
93 ptr = &(sem->count.counter);
94 increment = 1;
95
96 __asm__ __volatile__(
97 "mov %%o7, %%g4\n\t"
98 "call ___atomic24_sub\n\t"
99 " add %%o7, 8, %%o7\n\t"
100 "tst %%g2\n\t"
101 "bl 2f\n\t"
102 " clr %%g2\n"
103 "1:\n\t"
104 ".subsection 2\n"
105 "2:\n\t"
106 "save %%sp, -64, %%sp\n\t"
107 "mov %%g1, %%l1\n\t"
108 "mov %%g5, %%l5\n\t"
109 "call %3\n\t"
110 " mov %%g1, %%o0\n\t"
111 "mov %%l1, %%g1\n\t"
112 "mov %%l5, %%g5\n\t"
113 "ba 1b\n\t"
114 " restore %%o0, %%g0, %%g2\n\t"
115 ".previous\n"
116 : "=&r" (increment)
117 : "0" (increment), "r" (ptr), "i" (__down_interruptible)
118 : "g3", "g4", "g7", "memory", "cc");
119
120 return increment;
121}
122
123static inline int down_trylock(struct semaphore * sem)
124{
125 register volatile int *ptr asm("g1");
126 register int increment asm("g2");
127
128 ptr = &(sem->count.counter);
129 increment = 1;
130
131 __asm__ __volatile__(
132 "mov %%o7, %%g4\n\t"
133 "call ___atomic24_sub\n\t"
134 " add %%o7, 8, %%o7\n\t"
135 "tst %%g2\n\t"
136 "bl 2f\n\t"
137 " clr %%g2\n"
138 "1:\n\t"
139 ".subsection 2\n"
140 "2:\n\t"
141 "save %%sp, -64, %%sp\n\t"
142 "mov %%g1, %%l1\n\t"
143 "mov %%g5, %%l5\n\t"
144 "call %3\n\t"
145 " mov %%g1, %%o0\n\t"
146 "mov %%l1, %%g1\n\t"
147 "mov %%l5, %%g5\n\t"
148 "ba 1b\n\t"
149 " restore %%o0, %%g0, %%g2\n\t"
150 ".previous\n"
151 : "=&r" (increment)
152 : "0" (increment), "r" (ptr), "i" (__down_trylock)
153 : "g3", "g4", "g7", "memory", "cc");
154
155 return increment;
156}
157
158static inline void up(struct semaphore * sem)
159{
160 register volatile int *ptr asm("g1");
161 register int increment asm("g2");
162
163 ptr = &(sem->count.counter);
164 increment = 1;
165
166 __asm__ __volatile__(
167 "mov %%o7, %%g4\n\t"
168 "call ___atomic24_add\n\t"
169 " add %%o7, 8, %%o7\n\t"
170 "tst %%g2\n\t"
171 "ble 2f\n\t"
172 " nop\n"
173 "1:\n\t"
174 ".subsection 2\n"
175 "2:\n\t"
176 "save %%sp, -64, %%sp\n\t"
177 "mov %%g1, %%l1\n\t"
178 "mov %%g5, %%l5\n\t"
179 "call %3\n\t"
180 " mov %%g1, %%o0\n\t"
181 "mov %%l1, %%g1\n\t"
182 "ba 1b\n\t"
183 " restore %%l5, %%g0, %%g5\n\t"
184 ".previous\n"
185 : "=&r" (increment)
186 : "0" (increment), "r" (ptr), "i" (__up)
187 : "g3", "g4", "g7", "memory", "cc");
188}
189
190#endif /* __KERNEL__ */
191
192#endif /* !(_SPARC_SEMAPHORE_H) */
diff --git a/include/asm-sparc64/semaphore.h b/include/asm-sparc64/semaphore.h
index 7f7c0c4e024f..d9b2034ed1d2 100644
--- a/include/asm-sparc64/semaphore.h
+++ b/include/asm-sparc64/semaphore.h
@@ -1,53 +1 @@
1#ifndef _SPARC64_SEMAPHORE_H #include <linux/semaphore.h>
2#define _SPARC64_SEMAPHORE_H
3
4/* These are actually reasonable on the V9.
5 *
6 * See asm-ppc/semaphore.h for implementation commentary,
7 * only sparc64 specific issues are commented here.
8 */
9#ifdef __KERNEL__
10
11#include <asm/atomic.h>
12#include <asm/system.h>
13#include <linux/wait.h>
14#include <linux/rwsem.h>
15
16struct semaphore {
17 atomic_t count;
18 wait_queue_head_t wait;
19};
20
21#define __SEMAPHORE_INITIALIZER(name, count) \
22 { ATOMIC_INIT(count), \
23 __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) }
24
25#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
26 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
27
28#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
29
30static inline void sema_init (struct semaphore *sem, int val)
31{
32 atomic_set(&sem->count, val);
33 init_waitqueue_head(&sem->wait);
34}
35
36static inline void init_MUTEX (struct semaphore *sem)
37{
38 sema_init(sem, 1);
39}
40
41static inline void init_MUTEX_LOCKED (struct semaphore *sem)
42{
43 sema_init(sem, 0);
44}
45
46extern void up(struct semaphore *sem);
47extern void down(struct semaphore *sem);
48extern int down_trylock(struct semaphore *sem);
49extern int down_interruptible(struct semaphore *sem);
50
51#endif /* __KERNEL__ */
52
53#endif /* !(_SPARC64_SEMAPHORE_H) */
diff --git a/include/asm-um/semaphore.h b/include/asm-um/semaphore.h
index ff13c34de421..d9b2034ed1d2 100644
--- a/include/asm-um/semaphore.h
+++ b/include/asm-um/semaphore.h
@@ -1,6 +1 @@
1#ifndef __UM_SEMAPHORE_H #include <linux/semaphore.h>
2#define __UM_SEMAPHORE_H
3
4#include "asm/arch/semaphore.h"
5
6#endif
diff --git a/include/asm-v850/semaphore.h b/include/asm-v850/semaphore.h
index 10ed0ccf37df..d9b2034ed1d2 100644
--- a/include/asm-v850/semaphore.h
+++ b/include/asm-v850/semaphore.h
@@ -1,84 +1 @@
1#ifndef __V850_SEMAPHORE_H__ #include <linux/semaphore.h>
2#define __V850_SEMAPHORE_H__
3
4#include <linux/linkage.h>
5#include <linux/spinlock.h>
6#include <linux/wait.h>
7#include <linux/rwsem.h>
8
9#include <asm/atomic.h>
10
11struct semaphore {
12 atomic_t count;
13 int sleepers;
14 wait_queue_head_t wait;
15};
16
17#define __SEMAPHORE_INITIALIZER(name,count) \
18 { ATOMIC_INIT (count), 0, \
19 __WAIT_QUEUE_HEAD_INITIALIZER ((name).wait) }
20
21#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
22 struct semaphore name = __SEMAPHORE_INITIALIZER (name,count)
23
24#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC (name,1)
25
26static inline void sema_init (struct semaphore *sem, int val)
27{
28 *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
29}
30
31static inline void init_MUTEX (struct semaphore *sem)
32{
33 sema_init (sem, 1);
34}
35
36static inline void init_MUTEX_LOCKED (struct semaphore *sem)
37{
38 sema_init (sem, 0);
39}
40
41/*
42 * special register calling convention
43 */
44asmlinkage void __down_failed (void);
45asmlinkage int __down_interruptible_failed (void);
46asmlinkage int __down_trylock_failed (void);
47asmlinkage void __up_wakeup (void);
48
49extern void __down (struct semaphore * sem);
50extern int __down_interruptible (struct semaphore * sem);
51extern int __down_trylock (struct semaphore * sem);
52extern void __up (struct semaphore * sem);
53
54static inline void down (struct semaphore * sem)
55{
56 might_sleep();
57 if (atomic_dec_return (&sem->count) < 0)
58 __down (sem);
59}
60
61static inline int down_interruptible (struct semaphore * sem)
62{
63 int ret = 0;
64 might_sleep();
65 if (atomic_dec_return (&sem->count) < 0)
66 ret = __down_interruptible (sem);
67 return ret;
68}
69
70static inline int down_trylock (struct semaphore *sem)
71{
72 int ret = 0;
73 if (atomic_dec_return (&sem->count) < 0)
74 ret = __down_trylock (sem);
75 return ret;
76}
77
78static inline void up (struct semaphore * sem)
79{
80 if (atomic_inc_return (&sem->count) <= 0)
81 __up (sem);
82}
83
84#endif /* __V850_SEMAPHORE_H__ */
diff --git a/include/asm-x86/semaphore.h b/include/asm-x86/semaphore.h
index 572c0b67a6b0..d9b2034ed1d2 100644
--- a/include/asm-x86/semaphore.h
+++ b/include/asm-x86/semaphore.h
@@ -1,5 +1 @@
1#ifdef CONFIG_X86_32 #include <linux/semaphore.h>
2# include "semaphore_32.h"
3#else
4# include "semaphore_64.h"
5#endif
diff --git a/include/asm-x86/semaphore_32.h b/include/asm-x86/semaphore_32.h
deleted file mode 100644
index ac96d3804d0c..000000000000
--- a/include/asm-x86/semaphore_32.h
+++ /dev/null
@@ -1,175 +0,0 @@
1#ifndef _I386_SEMAPHORE_H
2#define _I386_SEMAPHORE_H
3
4#include <linux/linkage.h>
5
6#ifdef __KERNEL__
7
8/*
9 * SMP- and interrupt-safe semaphores..
10 *
11 * (C) Copyright 1996 Linus Torvalds
12 *
13 * Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in
14 * the original code and to make semaphore waits
15 * interruptible so that processes waiting on
16 * semaphores can be killed.
17 * Modified 1999-02-14 by Andrea Arcangeli, split the sched.c helper
18 * functions in asm/sempahore-helper.h while fixing a
19 * potential and subtle race discovered by Ulrich Schmid
20 * in down_interruptible(). Since I started to play here I
21 * also implemented the `trylock' semaphore operation.
22 * 1999-07-02 Artur Skawina <skawina@geocities.com>
23 * Optimized "0(ecx)" -> "(ecx)" (the assembler does not
24 * do this). Changed calling sequences from push/jmp to
25 * traditional call/ret.
26 * Modified 2001-01-01 Andreas Franck <afranck@gmx.de>
27 * Some hacks to ensure compatibility with recent
28 * GCC snapshots, to avoid stack corruption when compiling
29 * with -fomit-frame-pointer. It's not sure if this will
30 * be fixed in GCC, as our previous implementation was a
31 * bit dubious.
32 *
33 * If you would like to see an analysis of this implementation, please
34 * ftp to gcom.com and download the file
35 * /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz.
36 *
37 */
38
39#include <asm/system.h>
40#include <asm/atomic.h>
41#include <linux/wait.h>
42#include <linux/rwsem.h>
43
44struct semaphore {
45 atomic_t count;
46 int sleepers;
47 wait_queue_head_t wait;
48};
49
50
51#define __SEMAPHORE_INITIALIZER(name, n) \
52{ \
53 .count = ATOMIC_INIT(n), \
54 .sleepers = 0, \
55 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
56}
57
58#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
59 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
60
61#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
62
63static inline void sema_init (struct semaphore *sem, int val)
64{
65/*
66 * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
67 *
68 * i'd rather use the more flexible initialization above, but sadly
69 * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well.
70 */
71 atomic_set(&sem->count, val);
72 sem->sleepers = 0;
73 init_waitqueue_head(&sem->wait);
74}
75
76static inline void init_MUTEX (struct semaphore *sem)
77{
78 sema_init(sem, 1);
79}
80
81static inline void init_MUTEX_LOCKED (struct semaphore *sem)
82{
83 sema_init(sem, 0);
84}
85
86extern asmregparm void __down_failed(atomic_t *count_ptr);
87extern asmregparm int __down_failed_interruptible(atomic_t *count_ptr);
88extern asmregparm int __down_failed_trylock(atomic_t *count_ptr);
89extern asmregparm void __up_wakeup(atomic_t *count_ptr);
90
91/*
92 * This is ugly, but we want the default case to fall through.
93 * "__down_failed" is a special asm handler that calls the C
94 * routine that actually waits. See arch/i386/kernel/semaphore.c
95 */
96static inline void down(struct semaphore * sem)
97{
98 might_sleep();
99 __asm__ __volatile__(
100 "# atomic down operation\n\t"
101 LOCK_PREFIX "decl %0\n\t" /* --sem->count */
102 "jns 2f\n"
103 "\tlea %0,%%eax\n\t"
104 "call __down_failed\n"
105 "2:"
106 :"+m" (sem->count)
107 :
108 :"memory","ax");
109}
110
111/*
112 * Interruptible try to acquire a semaphore. If we obtained
113 * it, return zero. If we were interrupted, returns -EINTR
114 */
115static inline int down_interruptible(struct semaphore * sem)
116{
117 int result;
118
119 might_sleep();
120 __asm__ __volatile__(
121 "# atomic interruptible down operation\n\t"
122 "xorl %0,%0\n\t"
123 LOCK_PREFIX "decl %1\n\t" /* --sem->count */
124 "jns 2f\n\t"
125 "lea %1,%%eax\n\t"
126 "call __down_failed_interruptible\n"
127 "2:"
128 :"=&a" (result), "+m" (sem->count)
129 :
130 :"memory");
131 return result;
132}
133
134/*
135 * Non-blockingly attempt to down() a semaphore.
136 * Returns zero if we acquired it
137 */
138static inline int down_trylock(struct semaphore * sem)
139{
140 int result;
141
142 __asm__ __volatile__(
143 "# atomic interruptible down operation\n\t"
144 "xorl %0,%0\n\t"
145 LOCK_PREFIX "decl %1\n\t" /* --sem->count */
146 "jns 2f\n\t"
147 "lea %1,%%eax\n\t"
148 "call __down_failed_trylock\n\t"
149 "2:\n"
150 :"=&a" (result), "+m" (sem->count)
151 :
152 :"memory");
153 return result;
154}
155
156/*
157 * Note! This is subtle. We jump to wake people up only if
158 * the semaphore was negative (== somebody was waiting on it).
159 */
160static inline void up(struct semaphore * sem)
161{
162 __asm__ __volatile__(
163 "# atomic up operation\n\t"
164 LOCK_PREFIX "incl %0\n\t" /* ++sem->count */
165 "jg 1f\n\t"
166 "lea %0,%%eax\n\t"
167 "call __up_wakeup\n"
168 "1:"
169 :"+m" (sem->count)
170 :
171 :"memory","ax");
172}
173
174#endif
175#endif
diff --git a/include/asm-x86/semaphore_64.h b/include/asm-x86/semaphore_64.h
deleted file mode 100644
index 79694306bf7d..000000000000
--- a/include/asm-x86/semaphore_64.h
+++ /dev/null
@@ -1,180 +0,0 @@
1#ifndef _X86_64_SEMAPHORE_H
2#define _X86_64_SEMAPHORE_H
3
4#include <linux/linkage.h>
5
6#ifdef __KERNEL__
7
8/*
9 * SMP- and interrupt-safe semaphores..
10 *
11 * (C) Copyright 1996 Linus Torvalds
12 *
13 * Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in
14 * the original code and to make semaphore waits
15 * interruptible so that processes waiting on
16 * semaphores can be killed.
17 * Modified 1999-02-14 by Andrea Arcangeli, split the sched.c helper
18 * functions in asm/sempahore-helper.h while fixing a
19 * potential and subtle race discovered by Ulrich Schmid
20 * in down_interruptible(). Since I started to play here I
21 * also implemented the `trylock' semaphore operation.
22 * 1999-07-02 Artur Skawina <skawina@geocities.com>
23 * Optimized "0(ecx)" -> "(ecx)" (the assembler does not
24 * do this). Changed calling sequences from push/jmp to
25 * traditional call/ret.
26 * Modified 2001-01-01 Andreas Franck <afranck@gmx.de>
27 * Some hacks to ensure compatibility with recent
28 * GCC snapshots, to avoid stack corruption when compiling
29 * with -fomit-frame-pointer. It's not sure if this will
30 * be fixed in GCC, as our previous implementation was a
31 * bit dubious.
32 *
33 * If you would like to see an analysis of this implementation, please
34 * ftp to gcom.com and download the file
35 * /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz.
36 *
37 */
38
39#include <asm/system.h>
40#include <asm/atomic.h>
41#include <asm/rwlock.h>
42#include <linux/wait.h>
43#include <linux/rwsem.h>
44#include <linux/stringify.h>
45
46struct semaphore {
47 atomic_t count;
48 int sleepers;
49 wait_queue_head_t wait;
50};
51
52#define __SEMAPHORE_INITIALIZER(name, n) \
53{ \
54 .count = ATOMIC_INIT(n), \
55 .sleepers = 0, \
56 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
57}
58
59#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
60 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
61
62#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
63
64static inline void sema_init (struct semaphore *sem, int val)
65{
66/*
67 * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
68 *
69 * i'd rather use the more flexible initialization above, but sadly
70 * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well.
71 */
72 atomic_set(&sem->count, val);
73 sem->sleepers = 0;
74 init_waitqueue_head(&sem->wait);
75}
76
77static inline void init_MUTEX (struct semaphore *sem)
78{
79 sema_init(sem, 1);
80}
81
82static inline void init_MUTEX_LOCKED (struct semaphore *sem)
83{
84 sema_init(sem, 0);
85}
86
87asmlinkage void __down_failed(void /* special register calling convention */);
88asmlinkage int __down_failed_interruptible(void /* params in registers */);
89asmlinkage int __down_failed_trylock(void /* params in registers */);
90asmlinkage void __up_wakeup(void /* special register calling convention */);
91
92asmlinkage void __down(struct semaphore * sem);
93asmlinkage int __down_interruptible(struct semaphore * sem);
94asmlinkage int __down_trylock(struct semaphore * sem);
95asmlinkage void __up(struct semaphore * sem);
96
97/*
98 * This is ugly, but we want the default case to fall through.
99 * "__down_failed" is a special asm handler that calls the C
100 * routine that actually waits. See arch/x86_64/kernel/semaphore.c
101 */
102static inline void down(struct semaphore * sem)
103{
104 might_sleep();
105
106 __asm__ __volatile__(
107 "# atomic down operation\n\t"
108 LOCK_PREFIX "decl %0\n\t" /* --sem->count */
109 "jns 1f\n\t"
110 "call __down_failed\n"
111 "1:"
112 :"=m" (sem->count)
113 :"D" (sem)
114 :"memory");
115}
116
117/*
118 * Interruptible try to acquire a semaphore. If we obtained
119 * it, return zero. If we were interrupted, returns -EINTR
120 */
121static inline int down_interruptible(struct semaphore * sem)
122{
123 int result;
124
125 might_sleep();
126
127 __asm__ __volatile__(
128 "# atomic interruptible down operation\n\t"
129 "xorl %0,%0\n\t"
130 LOCK_PREFIX "decl %1\n\t" /* --sem->count */
131 "jns 2f\n\t"
132 "call __down_failed_interruptible\n"
133 "2:\n"
134 :"=&a" (result), "=m" (sem->count)
135 :"D" (sem)
136 :"memory");
137 return result;
138}
139
140/*
141 * Non-blockingly attempt to down() a semaphore.
142 * Returns zero if we acquired it
143 */
144static inline int down_trylock(struct semaphore * sem)
145{
146 int result;
147
148 __asm__ __volatile__(
149 "# atomic interruptible down operation\n\t"
150 "xorl %0,%0\n\t"
151 LOCK_PREFIX "decl %1\n\t" /* --sem->count */
152 "jns 2f\n\t"
153 "call __down_failed_trylock\n\t"
154 "2:\n"
155 :"=&a" (result), "=m" (sem->count)
156 :"D" (sem)
157 :"memory","cc");
158 return result;
159}
160
161/*
162 * Note! This is subtle. We jump to wake people up only if
163 * the semaphore was negative (== somebody was waiting on it).
164 * The default case (no contention) will result in NO
165 * jumps for both down() and up().
166 */
167static inline void up(struct semaphore * sem)
168{
169 __asm__ __volatile__(
170 "# atomic up operation\n\t"
171 LOCK_PREFIX "incl %0\n\t" /* ++sem->count */
172 "jg 1f\n\t"
173 "call __up_wakeup\n"
174 "1:"
175 :"=m" (sem->count)
176 :"D" (sem)
177 :"memory");
178}
179#endif /* __KERNEL__ */
180#endif
diff --git a/include/asm-xtensa/semaphore.h b/include/asm-xtensa/semaphore.h
index 3e04167cd9dc..d9b2034ed1d2 100644
--- a/include/asm-xtensa/semaphore.h
+++ b/include/asm-xtensa/semaphore.h
@@ -1,99 +1 @@
1/* #include <linux/semaphore.h>
2 * linux/include/asm-xtensa/semaphore.h
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2001 - 2005 Tensilica Inc.
9 */
10
11#ifndef _XTENSA_SEMAPHORE_H
12#define _XTENSA_SEMAPHORE_H
13
14#include <asm/atomic.h>
15#include <asm/system.h>
16#include <linux/wait.h>
17#include <linux/rwsem.h>
18
19struct semaphore {
20 atomic_t count;
21 int sleepers;
22 wait_queue_head_t wait;
23};
24
25#define __SEMAPHORE_INITIALIZER(name,n) \
26{ \
27 .count = ATOMIC_INIT(n), \
28 .sleepers = 0, \
29 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
30}
31
32#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
33 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
34
35#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
36
37static inline void sema_init (struct semaphore *sem, int val)
38{
39 atomic_set(&sem->count, val);
40 sem->sleepers = 0;
41 init_waitqueue_head(&sem->wait);
42}
43
44static inline void init_MUTEX (struct semaphore *sem)
45{
46 sema_init(sem, 1);
47}
48
49static inline void init_MUTEX_LOCKED (struct semaphore *sem)
50{
51 sema_init(sem, 0);
52}
53
54asmlinkage void __down(struct semaphore * sem);
55asmlinkage int __down_interruptible(struct semaphore * sem);
56asmlinkage int __down_trylock(struct semaphore * sem);
57asmlinkage void __up(struct semaphore * sem);
58
59extern spinlock_t semaphore_wake_lock;
60
61static inline void down(struct semaphore * sem)
62{
63 might_sleep();
64
65 if (atomic_sub_return(1, &sem->count) < 0)
66 __down(sem);
67}
68
69static inline int down_interruptible(struct semaphore * sem)
70{
71 int ret = 0;
72
73 might_sleep();
74
75 if (atomic_sub_return(1, &sem->count) < 0)
76 ret = __down_interruptible(sem);
77 return ret;
78}
79
80static inline int down_trylock(struct semaphore * sem)
81{
82 int ret = 0;
83
84 if (atomic_sub_return(1, &sem->count) < 0)
85 ret = __down_trylock(sem);
86 return ret;
87}
88
89/*
90 * Note! This is subtle. We jump to wake people up only if
91 * the semaphore was negative (== somebody was waiting on it).
92 */
93static inline void up(struct semaphore * sem)
94{
95 if (atomic_add_return(1, &sem->count) <= 0)
96 __up(sem);
97}
98
99#endif /* _XTENSA_SEMAPHORE_H */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index f8ab4ce70564..b5fef13148bd 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -102,6 +102,25 @@ extern void disable_irq_nosync(unsigned int irq);
102extern void disable_irq(unsigned int irq); 102extern void disable_irq(unsigned int irq);
103extern void enable_irq(unsigned int irq); 103extern void enable_irq(unsigned int irq);
104 104
105#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
106
107extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask);
108extern int irq_can_set_affinity(unsigned int irq);
109
110#else /* CONFIG_SMP */
111
112static inline int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
113{
114 return -EINVAL;
115}
116
117static inline int irq_can_set_affinity(unsigned int irq)
118{
119 return 0;
120}
121
122#endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */
123
105#ifdef CONFIG_GENERIC_HARDIRQS 124#ifdef CONFIG_GENERIC_HARDIRQS
106/* 125/*
107 * Special lockdep variants of irq disabling/enabling. 126 * Special lockdep variants of irq disabling/enabling.
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 176e5e790a44..1883a85625dd 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -228,21 +228,11 @@ static inline void set_pending_irq(unsigned int irq, cpumask_t mask)
228 228
229#endif /* CONFIG_GENERIC_PENDING_IRQ */ 229#endif /* CONFIG_GENERIC_PENDING_IRQ */
230 230
231extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask);
232extern int irq_can_set_affinity(unsigned int irq);
233
234#else /* CONFIG_SMP */ 231#else /* CONFIG_SMP */
235 232
236#define move_native_irq(x) 233#define move_native_irq(x)
237#define move_masked_irq(x) 234#define move_masked_irq(x)
238 235
239static inline int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
240{
241 return -EINVAL;
242}
243
244static inline int irq_can_set_affinity(unsigned int irq) { return 0; }
245
246#endif /* CONFIG_SMP */ 236#endif /* CONFIG_SMP */
247 237
248#ifdef CONFIG_IRQBALANCE 238#ifdef CONFIG_IRQBALANCE
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 7d1eaa97de13..77323a72dd3c 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -81,7 +81,7 @@ enum {
81 MLX4_CMD_SW2HW_CQ = 0x16, 81 MLX4_CMD_SW2HW_CQ = 0x16,
82 MLX4_CMD_HW2SW_CQ = 0x17, 82 MLX4_CMD_HW2SW_CQ = 0x17,
83 MLX4_CMD_QUERY_CQ = 0x18, 83 MLX4_CMD_QUERY_CQ = 0x18,
84 MLX4_CMD_RESIZE_CQ = 0x2c, 84 MLX4_CMD_MODIFY_CQ = 0x2c,
85 85
86 /* SRQ commands */ 86 /* SRQ commands */
87 MLX4_CMD_SW2HW_SRQ = 0x35, 87 MLX4_CMD_SW2HW_SRQ = 0x35,
diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h
index 0181e0a57cbf..071cf96cf01f 100644
--- a/include/linux/mlx4/cq.h
+++ b/include/linux/mlx4/cq.h
@@ -45,11 +45,11 @@ struct mlx4_cqe {
45 u8 sl; 45 u8 sl;
46 u8 reserved1; 46 u8 reserved1;
47 __be16 rlid; 47 __be16 rlid;
48 u32 reserved2; 48 __be32 ipoib_status;
49 __be32 byte_cnt; 49 __be32 byte_cnt;
50 __be16 wqe_index; 50 __be16 wqe_index;
51 __be16 checksum; 51 __be16 checksum;
52 u8 reserved3[3]; 52 u8 reserved2[3];
53 u8 owner_sr_opcode; 53 u8 owner_sr_opcode;
54}; 54};
55 55
@@ -85,6 +85,16 @@ enum {
85 MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR = 0x22, 85 MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR = 0x22,
86}; 86};
87 87
88enum {
89 MLX4_CQE_IPOIB_STATUS_IPV4 = 1 << 22,
90 MLX4_CQE_IPOIB_STATUS_IPV4F = 1 << 23,
91 MLX4_CQE_IPOIB_STATUS_IPV6 = 1 << 24,
92 MLX4_CQE_IPOIB_STATUS_IPV4OPT = 1 << 25,
93 MLX4_CQE_IPOIB_STATUS_TCP = 1 << 26,
94 MLX4_CQE_IPOIB_STATUS_UDP = 1 << 27,
95 MLX4_CQE_IPOIB_STATUS_IPOK = 1 << 28,
96};
97
88static inline void mlx4_cq_arm(struct mlx4_cq *cq, u32 cmd, 98static inline void mlx4_cq_arm(struct mlx4_cq *cq, u32 cmd,
89 void __iomem *uar_page, 99 void __iomem *uar_page,
90 spinlock_t *doorbell_lock) 100 spinlock_t *doorbell_lock)
@@ -120,4 +130,9 @@ enum {
120 MLX4_CQ_DB_REQ_NOT = 2 << 24 130 MLX4_CQ_DB_REQ_NOT = 2 << 24
121}; 131};
122 132
133int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
134 u16 count, u16 period);
135int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
136 int entries, struct mlx4_mtt *mtt);
137
123#endif /* MLX4_CQ_H */ 138#endif /* MLX4_CQ_H */
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 6cdf813cd478..ff7df1a2222f 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -186,6 +186,7 @@ struct mlx4_caps {
186 u32 flags; 186 u32 flags;
187 u16 stat_rate_support; 187 u16 stat_rate_support;
188 u8 port_width_cap[MLX4_MAX_PORTS + 1]; 188 u8 port_width_cap[MLX4_MAX_PORTS + 1];
189 int max_gso_sz;
189}; 190};
190 191
191struct mlx4_buf_list { 192struct mlx4_buf_list {
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h
index 1b835ca49df1..53c5fdb6eac4 100644
--- a/include/linux/mlx4/driver.h
+++ b/include/linux/mlx4/driver.h
@@ -48,8 +48,7 @@ struct mlx4_interface {
48 void * (*add) (struct mlx4_dev *dev); 48 void * (*add) (struct mlx4_dev *dev);
49 void (*remove)(struct mlx4_dev *dev, void *context); 49 void (*remove)(struct mlx4_dev *dev, void *context);
50 void (*event) (struct mlx4_dev *dev, void *context, 50 void (*event) (struct mlx4_dev *dev, void *context,
51 enum mlx4_dev_event event, int subtype, 51 enum mlx4_dev_event event, int port);
52 int port);
53 struct list_head list; 52 struct list_head list;
54}; 53};
55 54
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 09a2230923f2..a5e43febee4f 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -158,10 +158,12 @@ struct mlx4_qp_context {
158#define MLX4_FW_VER_WQE_CTRL_NEC mlx4_fw_ver(2, 2, 232) 158#define MLX4_FW_VER_WQE_CTRL_NEC mlx4_fw_ver(2, 2, 232)
159 159
160enum { 160enum {
161 MLX4_WQE_CTRL_NEC = 1 << 29, 161 MLX4_WQE_CTRL_NEC = 1 << 29,
162 MLX4_WQE_CTRL_FENCE = 1 << 6, 162 MLX4_WQE_CTRL_FENCE = 1 << 6,
163 MLX4_WQE_CTRL_CQ_UPDATE = 3 << 2, 163 MLX4_WQE_CTRL_CQ_UPDATE = 3 << 2,
164 MLX4_WQE_CTRL_SOLICITED = 1 << 1, 164 MLX4_WQE_CTRL_SOLICITED = 1 << 1,
165 MLX4_WQE_CTRL_IP_CSUM = 1 << 4,
166 MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5,
165}; 167};
166 168
167struct mlx4_wqe_ctrl_seg { 169struct mlx4_wqe_ctrl_seg {
@@ -217,6 +219,11 @@ struct mlx4_wqe_datagram_seg {
217 __be32 reservd[2]; 219 __be32 reservd[2];
218}; 220};
219 221
222struct mlx4_lso_seg {
223 __be32 mss_hdr_size;
224 __be32 header[0];
225};
226
220struct mlx4_wqe_bind_seg { 227struct mlx4_wqe_bind_seg {
221 __be32 flags1; 228 __be32 flags1;
222 __be32 flags2; 229 __be32 flags2;
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 6e0393a5b2ea..eb560d031acd 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -160,14 +160,18 @@ enum {
160 160
161 161
162#ifdef __KERNEL__ 162#ifdef __KERNEL__
163#include <linux/spinlock.h> 163#include <linux/list.h>
164#include <linux/rwsem.h>
165#include <linux/mutex.h> 164#include <linux/mutex.h>
165#include <linux/rwsem.h>
166#include <linux/spinlock.h>
167#include <linux/wait.h>
166 168
167#include <linux/dqblk_xfs.h> 169#include <linux/dqblk_xfs.h>
168#include <linux/dqblk_v1.h> 170#include <linux/dqblk_v1.h>
169#include <linux/dqblk_v2.h> 171#include <linux/dqblk_v2.h>
170 172
173#include <asm/atomic.h>
174
171extern spinlock_t dq_data_lock; 175extern spinlock_t dq_data_lock;
172 176
173/* Maximal numbers of writes for quota operation (insert/delete/update) 177/* Maximal numbers of writes for quota operation (insert/delete/update)
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
new file mode 100644
index 000000000000..9cae64b00d6b
--- /dev/null
+++ b/include/linux/semaphore.h
@@ -0,0 +1,51 @@
1/*
2 * Copyright (c) 2008 Intel Corporation
3 * Author: Matthew Wilcox <willy@linux.intel.com>
4 *
5 * Distributed under the terms of the GNU GPL, version 2
6 *
7 * Please see kernel/semaphore.c for documentation of these functions
8 */
9#ifndef __LINUX_SEMAPHORE_H
10#define __LINUX_SEMAPHORE_H
11
12#include <linux/list.h>
13#include <linux/spinlock.h>
14
15/* Please don't access any members of this structure directly */
16struct semaphore {
17 spinlock_t lock;
18 unsigned int count;
19 struct list_head wait_list;
20};
21
22#define __SEMAPHORE_INITIALIZER(name, n) \
23{ \
24 .lock = __SPIN_LOCK_UNLOCKED((name).lock), \
25 .count = n, \
26 .wait_list = LIST_HEAD_INIT((name).wait_list), \
27}
28
29#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
30 struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)
31
32#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
33
34static inline void sema_init(struct semaphore *sem, int val)
35{
36 static struct lock_class_key __key;
37 *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);
38 lockdep_init_map(&sem->lock.dep_map, "semaphore->lock", &__key, 0);
39}
40
41#define init_MUTEX(sem) sema_init(sem, 1)
42#define init_MUTEX_LOCKED(sem) sema_init(sem, 0)
43
44extern void down(struct semaphore *sem);
45extern int __must_check down_interruptible(struct semaphore *sem);
46extern int __must_check down_killable(struct semaphore *sem);
47extern int __must_check down_trylock(struct semaphore *sem);
48extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
49extern void up(struct semaphore *sem);
50
51#endif /* __LINUX_SEMAPHORE_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index b00c1c73eb0a..79d59c937fac 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -45,9 +45,9 @@ struct kmem_cache_cpu {
45struct kmem_cache_node { 45struct kmem_cache_node {
46 spinlock_t list_lock; /* Protect partial list and nr_partial */ 46 spinlock_t list_lock; /* Protect partial list and nr_partial */
47 unsigned long nr_partial; 47 unsigned long nr_partial;
48 atomic_long_t nr_slabs;
49 struct list_head partial; 48 struct list_head partial;
50#ifdef CONFIG_SLUB_DEBUG 49#ifdef CONFIG_SLUB_DEBUG
50 atomic_long_t nr_slabs;
51 struct list_head full; 51 struct list_head full;
52#endif 52#endif
53}; 53};
diff --git a/include/rdma/ib_user_verbs.h b/include/rdma/ib_user_verbs.h
index 64a721fcbc1c..8d65bf0a625b 100644
--- a/include/rdma/ib_user_verbs.h
+++ b/include/rdma/ib_user_verbs.h
@@ -533,7 +533,10 @@ struct ib_uverbs_send_wr {
533 __u32 num_sge; 533 __u32 num_sge;
534 __u32 opcode; 534 __u32 opcode;
535 __u32 send_flags; 535 __u32 send_flags;
536 __u32 imm_data; 536 union {
537 __u32 imm_data;
538 __u32 invalidate_rkey;
539 } ex;
537 union { 540 union {
538 struct { 541 struct {
539 __u64 remote_addr; 542 __u64 remote_addr;
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 701e7b40560a..95bf4bac44cb 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -94,7 +94,7 @@ enum ib_device_cap_flags {
94 IB_DEVICE_SRQ_RESIZE = (1<<13), 94 IB_DEVICE_SRQ_RESIZE = (1<<13),
95 IB_DEVICE_N_NOTIFY_CQ = (1<<14), 95 IB_DEVICE_N_NOTIFY_CQ = (1<<14),
96 IB_DEVICE_ZERO_STAG = (1<<15), 96 IB_DEVICE_ZERO_STAG = (1<<15),
97 IB_DEVICE_SEND_W_INV = (1<<16), 97 IB_DEVICE_RESERVED = (1<<16), /* old SEND_W_INV */
98 IB_DEVICE_MEM_WINDOW = (1<<17), 98 IB_DEVICE_MEM_WINDOW = (1<<17),
99 /* 99 /*
100 * Devices should set IB_DEVICE_UD_IP_SUM if they support 100 * Devices should set IB_DEVICE_UD_IP_SUM if they support
@@ -104,6 +104,8 @@ enum ib_device_cap_flags {
104 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode. 104 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
105 */ 105 */
106 IB_DEVICE_UD_IP_CSUM = (1<<18), 106 IB_DEVICE_UD_IP_CSUM = (1<<18),
107 IB_DEVICE_UD_TSO = (1<<19),
108 IB_DEVICE_SEND_W_INV = (1<<21),
107}; 109};
108 110
109enum ib_atomic_cap { 111enum ib_atomic_cap {
@@ -411,6 +413,7 @@ enum ib_wc_opcode {
411 IB_WC_COMP_SWAP, 413 IB_WC_COMP_SWAP,
412 IB_WC_FETCH_ADD, 414 IB_WC_FETCH_ADD,
413 IB_WC_BIND_MW, 415 IB_WC_BIND_MW,
416 IB_WC_LSO,
414/* 417/*
415 * Set value of IB_WC_RECV so consumers can test if a completion is a 418 * Set value of IB_WC_RECV so consumers can test if a completion is a
416 * receive by testing (opcode & IB_WC_RECV). 419 * receive by testing (opcode & IB_WC_RECV).
@@ -495,6 +498,10 @@ enum ib_qp_type {
495 IB_QPT_RAW_ETY 498 IB_QPT_RAW_ETY
496}; 499};
497 500
501enum ib_qp_create_flags {
502 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
503};
504
498struct ib_qp_init_attr { 505struct ib_qp_init_attr {
499 void (*event_handler)(struct ib_event *, void *); 506 void (*event_handler)(struct ib_event *, void *);
500 void *qp_context; 507 void *qp_context;
@@ -504,6 +511,7 @@ struct ib_qp_init_attr {
504 struct ib_qp_cap cap; 511 struct ib_qp_cap cap;
505 enum ib_sig_type sq_sig_type; 512 enum ib_sig_type sq_sig_type;
506 enum ib_qp_type qp_type; 513 enum ib_qp_type qp_type;
514 enum ib_qp_create_flags create_flags;
507 u8 port_num; /* special QP types only */ 515 u8 port_num; /* special QP types only */
508}; 516};
509 517
@@ -617,7 +625,9 @@ enum ib_wr_opcode {
617 IB_WR_SEND_WITH_IMM, 625 IB_WR_SEND_WITH_IMM,
618 IB_WR_RDMA_READ, 626 IB_WR_RDMA_READ,
619 IB_WR_ATOMIC_CMP_AND_SWP, 627 IB_WR_ATOMIC_CMP_AND_SWP,
620 IB_WR_ATOMIC_FETCH_AND_ADD 628 IB_WR_ATOMIC_FETCH_AND_ADD,
629 IB_WR_LSO,
630 IB_WR_SEND_WITH_INV,
621}; 631};
622 632
623enum ib_send_flags { 633enum ib_send_flags {
@@ -641,7 +651,10 @@ struct ib_send_wr {
641 int num_sge; 651 int num_sge;
642 enum ib_wr_opcode opcode; 652 enum ib_wr_opcode opcode;
643 int send_flags; 653 int send_flags;
644 __be32 imm_data; 654 union {
655 __be32 imm_data;
656 u32 invalidate_rkey;
657 } ex;
645 union { 658 union {
646 struct { 659 struct {
647 u64 remote_addr; 660 u64 remote_addr;
@@ -655,6 +668,9 @@ struct ib_send_wr {
655 } atomic; 668 } atomic;
656 struct { 669 struct {
657 struct ib_ah *ah; 670 struct ib_ah *ah;
671 void *header;
672 int hlen;
673 int mss;
658 u32 remote_qpn; 674 u32 remote_qpn;
659 u32 remote_qkey; 675 u32 remote_qkey;
660 u16 pkey_index; /* valid for GSI only */ 676 u16 pkey_index; /* valid for GSI only */
@@ -730,7 +746,7 @@ struct ib_uobject {
730 struct ib_ucontext *context; /* associated user context */ 746 struct ib_ucontext *context; /* associated user context */
731 void *object; /* containing object */ 747 void *object; /* containing object */
732 struct list_head list; /* link to context's list */ 748 struct list_head list; /* link to context's list */
733 u32 id; /* index into kernel idr */ 749 int id; /* index into kernel idr */
734 struct kref ref; 750 struct kref ref;
735 struct rw_semaphore mutex; /* protects .live */ 751 struct rw_semaphore mutex; /* protects .live */
736 int live; 752 int live;
@@ -971,6 +987,8 @@ struct ib_device {
971 int comp_vector, 987 int comp_vector,
972 struct ib_ucontext *context, 988 struct ib_ucontext *context,
973 struct ib_udata *udata); 989 struct ib_udata *udata);
990 int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
991 u16 cq_period);
974 int (*destroy_cq)(struct ib_cq *cq); 992 int (*destroy_cq)(struct ib_cq *cq);
975 int (*resize_cq)(struct ib_cq *cq, int cqe, 993 int (*resize_cq)(struct ib_cq *cq, int cqe,
976 struct ib_udata *udata); 994 struct ib_udata *udata);
@@ -1376,6 +1394,15 @@ struct ib_cq *ib_create_cq(struct ib_device *device,
1376int ib_resize_cq(struct ib_cq *cq, int cqe); 1394int ib_resize_cq(struct ib_cq *cq, int cqe);
1377 1395
1378/** 1396/**
1397 * ib_modify_cq - Modifies moderation params of the CQ
1398 * @cq: The CQ to modify.
1399 * @cq_count: number of CQEs that will trigger an event
1400 * @cq_period: max period of time in usec before triggering an event
1401 *
1402 */
1403int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
1404
1405/**
1379 * ib_destroy_cq - Destroys the specified CQ. 1406 * ib_destroy_cq - Destroys the specified CQ.
1380 * @cq: The CQ to destroy. 1407 * @cq: The CQ to destroy.
1381 */ 1408 */
diff --git a/init/Kconfig b/init/Kconfig
index a97924bc5b8d..7fccf09bb95a 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -763,7 +763,7 @@ endmenu # General setup
763config SLABINFO 763config SLABINFO
764 bool 764 bool
765 depends on PROC_FS 765 depends on PROC_FS
766 depends on SLAB || SLUB 766 depends on SLAB || SLUB_DEBUG
767 default y 767 default y
768 768
769config RT_MUTEXES 769config RT_MUTEXES
diff --git a/kernel/Makefile b/kernel/Makefile
index 6c584c55a6e9..f45c69e69688 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -8,7 +8,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
8 signal.o sys.o kmod.o workqueue.o pid.o \ 8 signal.o sys.o kmod.o workqueue.o pid.o \
9 rcupdate.o extable.o params.o posix-timers.o \ 9 rcupdate.o extable.o params.o posix-timers.o \
10 kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ 10 kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
11 hrtimer.o rwsem.o nsproxy.o srcu.o \ 11 hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
12 notifier.o ksysfs.o pm_qos_params.o 12 notifier.o ksysfs.o pm_qos_params.o
13 13
14obj-$(CONFIG_SYSCTL) += sysctl_check.o 14obj-$(CONFIG_SYSCTL) += sysctl_check.o
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 2727f9238359..6d8de051382b 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1722,7 +1722,12 @@ void cgroup_enable_task_cg_lists(void)
1722 use_task_css_set_links = 1; 1722 use_task_css_set_links = 1;
1723 do_each_thread(g, p) { 1723 do_each_thread(g, p) {
1724 task_lock(p); 1724 task_lock(p);
1725 if (list_empty(&p->cg_list)) 1725 /*
1726 * We should check if the process is exiting, otherwise
1727 * it will race with cgroup_exit() in that the list
1728 * entry won't be deleted though the process has exited.
1729 */
1730 if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list))
1726 list_add(&p->cg_list, &p->cgroups->tasks); 1731 list_add(&p->cg_list, &p->cgroups->tasks);
1727 task_unlock(p); 1732 task_unlock(p);
1728 } while_each_thread(g, p); 1733 } while_each_thread(g, p);
diff --git a/kernel/semaphore.c b/kernel/semaphore.c
new file mode 100644
index 000000000000..5c2942e768cd
--- /dev/null
+++ b/kernel/semaphore.c
@@ -0,0 +1,264 @@
1/*
2 * Copyright (c) 2008 Intel Corporation
3 * Author: Matthew Wilcox <willy@linux.intel.com>
4 *
5 * Distributed under the terms of the GNU GPL, version 2
6 *
7 * This file implements counting semaphores.
8 * A counting semaphore may be acquired 'n' times before sleeping.
9 * See mutex.c for single-acquisition sleeping locks which enforce
10 * rules which allow code to be debugged more easily.
11 */
12
13/*
14 * Some notes on the implementation:
15 *
16 * The spinlock controls access to the other members of the semaphore.
17 * down_trylock() and up() can be called from interrupt context, so we
18 * have to disable interrupts when taking the lock. It turns out various
19 * parts of the kernel expect to be able to use down() on a semaphore in
20 * interrupt context when they know it will succeed, so we have to use
21 * irqsave variants for down(), down_interruptible() and down_killable()
22 * too.
23 *
24 * The ->count variable represents how many more tasks can acquire this
25 * semaphore. If it's zero, there may be tasks waiting on the wait_list.
26 */
27
28#include <linux/compiler.h>
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/sched.h>
32#include <linux/semaphore.h>
33#include <linux/spinlock.h>
34
35static noinline void __down(struct semaphore *sem);
36static noinline int __down_interruptible(struct semaphore *sem);
37static noinline int __down_killable(struct semaphore *sem);
38static noinline int __down_timeout(struct semaphore *sem, long jiffies);
39static noinline void __up(struct semaphore *sem);
40
41/**
42 * down - acquire the semaphore
43 * @sem: the semaphore to be acquired
44 *
45 * Acquires the semaphore. If no more tasks are allowed to acquire the
46 * semaphore, calling this function will put the task to sleep until the
47 * semaphore is released.
48 *
49 * Use of this function is deprecated, please use down_interruptible() or
50 * down_killable() instead.
51 */
52void down(struct semaphore *sem)
53{
54 unsigned long flags;
55
56 spin_lock_irqsave(&sem->lock, flags);
57 if (likely(sem->count > 0))
58 sem->count--;
59 else
60 __down(sem);
61 spin_unlock_irqrestore(&sem->lock, flags);
62}
63EXPORT_SYMBOL(down);
64
65/**
66 * down_interruptible - acquire the semaphore unless interrupted
67 * @sem: the semaphore to be acquired
68 *
69 * Attempts to acquire the semaphore. If no more tasks are allowed to
70 * acquire the semaphore, calling this function will put the task to sleep.
71 * If the sleep is interrupted by a signal, this function will return -EINTR.
72 * If the semaphore is successfully acquired, this function returns 0.
73 */
74int down_interruptible(struct semaphore *sem)
75{
76 unsigned long flags;
77 int result = 0;
78
79 spin_lock_irqsave(&sem->lock, flags);
80 if (likely(sem->count > 0))
81 sem->count--;
82 else
83 result = __down_interruptible(sem);
84 spin_unlock_irqrestore(&sem->lock, flags);
85
86 return result;
87}
88EXPORT_SYMBOL(down_interruptible);
89
90/**
91 * down_killable - acquire the semaphore unless killed
92 * @sem: the semaphore to be acquired
93 *
94 * Attempts to acquire the semaphore. If no more tasks are allowed to
95 * acquire the semaphore, calling this function will put the task to sleep.
96 * If the sleep is interrupted by a fatal signal, this function will return
97 * -EINTR. If the semaphore is successfully acquired, this function returns
98 * 0.
99 */
100int down_killable(struct semaphore *sem)
101{
102 unsigned long flags;
103 int result = 0;
104
105 spin_lock_irqsave(&sem->lock, flags);
106 if (likely(sem->count > 0))
107 sem->count--;
108 else
109 result = __down_killable(sem);
110 spin_unlock_irqrestore(&sem->lock, flags);
111
112 return result;
113}
114EXPORT_SYMBOL(down_killable);
115
116/**
117 * down_trylock - try to acquire the semaphore, without waiting
118 * @sem: the semaphore to be acquired
119 *
120 * Try to acquire the semaphore atomically. Returns 0 if the mutex has
121 * been acquired successfully or 1 if it it cannot be acquired.
122 *
123 * NOTE: This return value is inverted from both spin_trylock and
124 * mutex_trylock! Be careful about this when converting code.
125 *
126 * Unlike mutex_trylock, this function can be used from interrupt context,
127 * and the semaphore can be released by any task or interrupt.
128 */
129int down_trylock(struct semaphore *sem)
130{
131 unsigned long flags;
132 int count;
133
134 spin_lock_irqsave(&sem->lock, flags);
135 count = sem->count - 1;
136 if (likely(count >= 0))
137 sem->count = count;
138 spin_unlock_irqrestore(&sem->lock, flags);
139
140 return (count < 0);
141}
142EXPORT_SYMBOL(down_trylock);
143
144/**
145 * down_timeout - acquire the semaphore within a specified time
146 * @sem: the semaphore to be acquired
147 * @jiffies: how long to wait before failing
148 *
149 * Attempts to acquire the semaphore. If no more tasks are allowed to
150 * acquire the semaphore, calling this function will put the task to sleep.
151 * If the semaphore is not released within the specified number of jiffies,
152 * this function returns -ETIME. It returns 0 if the semaphore was acquired.
153 */
154int down_timeout(struct semaphore *sem, long jiffies)
155{
156 unsigned long flags;
157 int result = 0;
158
159 spin_lock_irqsave(&sem->lock, flags);
160 if (likely(sem->count > 0))
161 sem->count--;
162 else
163 result = __down_timeout(sem, jiffies);
164 spin_unlock_irqrestore(&sem->lock, flags);
165
166 return result;
167}
168EXPORT_SYMBOL(down_timeout);
169
170/**
171 * up - release the semaphore
172 * @sem: the semaphore to release
173 *
174 * Release the semaphore. Unlike mutexes, up() may be called from any
175 * context and even by tasks which have never called down().
176 */
177void up(struct semaphore *sem)
178{
179 unsigned long flags;
180
181 spin_lock_irqsave(&sem->lock, flags);
182 if (likely(list_empty(&sem->wait_list)))
183 sem->count++;
184 else
185 __up(sem);
186 spin_unlock_irqrestore(&sem->lock, flags);
187}
188EXPORT_SYMBOL(up);
189
190/* Functions for the contended case */
191
192struct semaphore_waiter {
193 struct list_head list;
194 struct task_struct *task;
195 int up;
196};
197
198/*
199 * Because this function is inlined, the 'state' parameter will be
200 * constant, and thus optimised away by the compiler. Likewise the
201 * 'timeout' parameter for the cases without timeouts.
202 */
203static inline int __sched __down_common(struct semaphore *sem, long state,
204 long timeout)
205{
206 struct task_struct *task = current;
207 struct semaphore_waiter waiter;
208
209 list_add_tail(&waiter.list, &sem->wait_list);
210 waiter.task = task;
211 waiter.up = 0;
212
213 for (;;) {
214 if (state == TASK_INTERRUPTIBLE && signal_pending(task))
215 goto interrupted;
216 if (state == TASK_KILLABLE && fatal_signal_pending(task))
217 goto interrupted;
218 if (timeout <= 0)
219 goto timed_out;
220 __set_task_state(task, state);
221 spin_unlock_irq(&sem->lock);
222 timeout = schedule_timeout(timeout);
223 spin_lock_irq(&sem->lock);
224 if (waiter.up)
225 return 0;
226 }
227
228 timed_out:
229 list_del(&waiter.list);
230 return -ETIME;
231
232 interrupted:
233 list_del(&waiter.list);
234 return -EINTR;
235}
236
237static noinline void __sched __down(struct semaphore *sem)
238{
239 __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
240}
241
242static noinline int __sched __down_interruptible(struct semaphore *sem)
243{
244 return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
245}
246
247static noinline int __sched __down_killable(struct semaphore *sem)
248{
249 return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
250}
251
252static noinline int __sched __down_timeout(struct semaphore *sem, long jiffies)
253{
254 return __down_common(sem, TASK_UNINTERRUPTIBLE, jiffies);
255}
256
257static noinline void __sched __up(struct semaphore *sem)
258{
259 struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
260 struct semaphore_waiter, list);
261 list_del(&waiter->list);
262 waiter->up = 1;
263 wake_up_process(waiter->task);
264}
diff --git a/kernel/signal.c b/kernel/signal.c
index 6af1210092c3..cc8303cd093d 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1757,6 +1757,45 @@ static int do_signal_stop(int signr)
1757 return 1; 1757 return 1;
1758} 1758}
1759 1759
1760static int ptrace_signal(int signr, siginfo_t *info,
1761 struct pt_regs *regs, void *cookie)
1762{
1763 if (!(current->ptrace & PT_PTRACED))
1764 return signr;
1765
1766 ptrace_signal_deliver(regs, cookie);
1767
1768 /* Let the debugger run. */
1769 ptrace_stop(signr, 0, info);
1770
1771 /* We're back. Did the debugger cancel the sig? */
1772 signr = current->exit_code;
1773 if (signr == 0)
1774 return signr;
1775
1776 current->exit_code = 0;
1777
1778 /* Update the siginfo structure if the signal has
1779 changed. If the debugger wanted something
1780 specific in the siginfo structure then it should
1781 have updated *info via PTRACE_SETSIGINFO. */
1782 if (signr != info->si_signo) {
1783 info->si_signo = signr;
1784 info->si_errno = 0;
1785 info->si_code = SI_USER;
1786 info->si_pid = task_pid_vnr(current->parent);
1787 info->si_uid = current->parent->uid;
1788 }
1789
1790 /* If the (new) signal is now blocked, requeue it. */
1791 if (sigismember(&current->blocked, signr)) {
1792 specific_send_sig_info(signr, info, current);
1793 signr = 0;
1794 }
1795
1796 return signr;
1797}
1798
1760int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, 1799int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1761 struct pt_regs *regs, void *cookie) 1800 struct pt_regs *regs, void *cookie)
1762{ 1801{
@@ -1785,36 +1824,10 @@ relock:
1785 if (!signr) 1824 if (!signr)
1786 break; /* will return 0 */ 1825 break; /* will return 0 */
1787 1826
1788 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) { 1827 if (signr != SIGKILL) {
1789 ptrace_signal_deliver(regs, cookie); 1828 signr = ptrace_signal(signr, info, regs, cookie);
1790 1829 if (!signr)
1791 /* Let the debugger run. */
1792 ptrace_stop(signr, 0, info);
1793
1794 /* We're back. Did the debugger cancel the sig? */
1795 signr = current->exit_code;
1796 if (signr == 0)
1797 continue;
1798
1799 current->exit_code = 0;
1800
1801 /* Update the siginfo structure if the signal has
1802 changed. If the debugger wanted something
1803 specific in the siginfo structure then it should
1804 have updated *info via PTRACE_SETSIGINFO. */
1805 if (signr != info->si_signo) {
1806 info->si_signo = signr;
1807 info->si_errno = 0;
1808 info->si_code = SI_USER;
1809 info->si_pid = task_pid_vnr(current->parent);
1810 info->si_uid = current->parent->uid;
1811 }
1812
1813 /* If the (new) signal is now blocked, requeue it. */
1814 if (sigismember(&current->blocked, signr)) {
1815 specific_send_sig_info(signr, info, current);
1816 continue; 1830 continue;
1817 }
1818 } 1831 }
1819 1832
1820 ka = &current->sighand->action[signr-1]; 1833 ka = &current->sighand->action[signr-1];
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index e1bd50cbbf5d..fdfa0c745bb6 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -14,7 +14,7 @@
14#include <linux/cpu.h> 14#include <linux/cpu.h>
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/hrtimer.h> 16#include <linux/hrtimer.h>
17#include <linux/irq.h> 17#include <linux/interrupt.h>
18#include <linux/percpu.h> 18#include <linux/percpu.h>
19#include <linux/profile.h> 19#include <linux/profile.h>
20#include <linux/sched.h> 20#include <linux/sched.h>
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 1bea399a9ef0..4f3886562b8c 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -14,12 +14,14 @@
14#include <linux/cpu.h> 14#include <linux/cpu.h>
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/hrtimer.h> 16#include <linux/hrtimer.h>
17#include <linux/irq.h> 17#include <linux/interrupt.h>
18#include <linux/percpu.h> 18#include <linux/percpu.h>
19#include <linux/profile.h> 19#include <linux/profile.h>
20#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/tick.h> 21#include <linux/tick.h>
22 22
23#include <asm/irq_regs.h>
24
23#include "tick-internal.h" 25#include "tick-internal.h"
24 26
25/* 27/*
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
index 0258d3115d54..450c04935b66 100644
--- a/kernel/time/tick-oneshot.c
+++ b/kernel/time/tick-oneshot.c
@@ -14,7 +14,7 @@
14#include <linux/cpu.h> 14#include <linux/cpu.h>
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/hrtimer.h> 16#include <linux/hrtimer.h>
17#include <linux/irq.h> 17#include <linux/interrupt.h>
18#include <linux/percpu.h> 18#include <linux/percpu.h>
19#include <linux/profile.h> 19#include <linux/profile.h>
20#include <linux/sched.h> 20#include <linux/sched.h>
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 0796c1a090c0..80db357b0a42 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -211,7 +211,7 @@ config SLUB_DEBUG_ON
211config SLUB_STATS 211config SLUB_STATS
212 default n 212 default n
213 bool "Enable SLUB performance statistics" 213 bool "Enable SLUB performance statistics"
214 depends on SLUB 214 depends on SLUB && SLUB_DEBUG && SYSFS
215 help 215 help
216 SLUB statistics are useful to debug SLUBs allocation behavior in 216 SLUB statistics are useful to debug SLUBs allocation behavior in
217 order find ways to optimize the allocator. This should never be 217 order find ways to optimize the allocator. This should never be
@@ -265,16 +265,6 @@ config DEBUG_MUTEXES
265 This feature allows mutex semantics violations to be detected and 265 This feature allows mutex semantics violations to be detected and
266 reported. 266 reported.
267 267
268config DEBUG_SEMAPHORE
269 bool "Semaphore debugging"
270 depends on DEBUG_KERNEL
271 depends on ALPHA || FRV
272 default n
273 help
274 If you say Y here then semaphore processing will issue lots of
275 verbose debugging messages. If you suspect a semaphore problem or a
276 kernel hacker asks for this option then say Y. Otherwise say N.
277
278config DEBUG_LOCK_ALLOC 268config DEBUG_LOCK_ALLOC
279 bool "Lock debugging: detect incorrect freeing of live locks" 269 bool "Lock debugging: detect incorrect freeing of live locks"
280 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 270 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
diff --git a/lib/Makefile b/lib/Makefile
index 23de261a4c83..28dba90d5020 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -29,7 +29,6 @@ obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
29obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o 29obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
30lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o 30lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
31lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o 31lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
32lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o
33lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o 32lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
34obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o 33obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
35obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o 34obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index 812dbf00844b..fbc11a336bc5 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -8,6 +8,7 @@
8#include <linux/smp_lock.h> 8#include <linux/smp_lock.h>
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/kallsyms.h> 10#include <linux/kallsyms.h>
11#include <asm/semaphore.h>
11 12
12/* 13/*
13 * The 'big kernel semaphore' 14 * The 'big kernel semaphore'
diff --git a/lib/semaphore-sleepers.c b/lib/semaphore-sleepers.c
deleted file mode 100644
index 0198782cdacb..000000000000
--- a/lib/semaphore-sleepers.c
+++ /dev/null
@@ -1,176 +0,0 @@
1/*
2 * i386 and x86-64 semaphore implementation.
3 *
4 * (C) Copyright 1999 Linus Torvalds
5 *
6 * Portions Copyright 1999 Red Hat, Inc.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
14 */
15#include <linux/sched.h>
16#include <linux/err.h>
17#include <linux/init.h>
18#include <asm/semaphore.h>
19
20/*
21 * Semaphores are implemented using a two-way counter:
22 * The "count" variable is decremented for each process
23 * that tries to acquire the semaphore, while the "sleeping"
24 * variable is a count of such acquires.
25 *
26 * Notably, the inline "up()" and "down()" functions can
27 * efficiently test if they need to do any extra work (up
28 * needs to do something only if count was negative before
29 * the increment operation.
30 *
31 * "sleeping" and the contention routine ordering is protected
32 * by the spinlock in the semaphore's waitqueue head.
33 *
34 * Note that these functions are only called when there is
35 * contention on the lock, and as such all this is the
36 * "non-critical" part of the whole semaphore business. The
37 * critical part is the inline stuff in <asm/semaphore.h>
38 * where we want to avoid any extra jumps and calls.
39 */
40
41/*
42 * Logic:
43 * - only on a boundary condition do we need to care. When we go
44 * from a negative count to a non-negative, we wake people up.
45 * - when we go from a non-negative count to a negative do we
46 * (a) synchronize with the "sleeper" count and (b) make sure
47 * that we're on the wakeup list before we synchronize so that
48 * we cannot lose wakeup events.
49 */
50
51void __up(struct semaphore *sem)
52{
53 wake_up(&sem->wait);
54}
55
56void __sched __down(struct semaphore *sem)
57{
58 struct task_struct *tsk = current;
59 DECLARE_WAITQUEUE(wait, tsk);
60 unsigned long flags;
61
62 tsk->state = TASK_UNINTERRUPTIBLE;
63 spin_lock_irqsave(&sem->wait.lock, flags);
64 add_wait_queue_exclusive_locked(&sem->wait, &wait);
65
66 sem->sleepers++;
67 for (;;) {
68 int sleepers = sem->sleepers;
69
70 /*
71 * Add "everybody else" into it. They aren't
72 * playing, because we own the spinlock in
73 * the wait_queue_head.
74 */
75 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
76 sem->sleepers = 0;
77 break;
78 }
79 sem->sleepers = 1; /* us - see -1 above */
80 spin_unlock_irqrestore(&sem->wait.lock, flags);
81
82 schedule();
83
84 spin_lock_irqsave(&sem->wait.lock, flags);
85 tsk->state = TASK_UNINTERRUPTIBLE;
86 }
87 remove_wait_queue_locked(&sem->wait, &wait);
88 wake_up_locked(&sem->wait);
89 spin_unlock_irqrestore(&sem->wait.lock, flags);
90 tsk->state = TASK_RUNNING;
91}
92
93int __sched __down_interruptible(struct semaphore *sem)
94{
95 int retval = 0;
96 struct task_struct *tsk = current;
97 DECLARE_WAITQUEUE(wait, tsk);
98 unsigned long flags;
99
100 tsk->state = TASK_INTERRUPTIBLE;
101 spin_lock_irqsave(&sem->wait.lock, flags);
102 add_wait_queue_exclusive_locked(&sem->wait, &wait);
103
104 sem->sleepers++;
105 for (;;) {
106 int sleepers = sem->sleepers;
107
108 /*
109 * With signals pending, this turns into
110 * the trylock failure case - we won't be
111 * sleeping, and we* can't get the lock as
112 * it has contention. Just correct the count
113 * and exit.
114 */
115 if (signal_pending(current)) {
116 retval = -EINTR;
117 sem->sleepers = 0;
118 atomic_add(sleepers, &sem->count);
119 break;
120 }
121
122 /*
123 * Add "everybody else" into it. They aren't
124 * playing, because we own the spinlock in
125 * wait_queue_head. The "-1" is because we're
126 * still hoping to get the semaphore.
127 */
128 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
129 sem->sleepers = 0;
130 break;
131 }
132 sem->sleepers = 1; /* us - see -1 above */
133 spin_unlock_irqrestore(&sem->wait.lock, flags);
134
135 schedule();
136
137 spin_lock_irqsave(&sem->wait.lock, flags);
138 tsk->state = TASK_INTERRUPTIBLE;
139 }
140 remove_wait_queue_locked(&sem->wait, &wait);
141 wake_up_locked(&sem->wait);
142 spin_unlock_irqrestore(&sem->wait.lock, flags);
143
144 tsk->state = TASK_RUNNING;
145 return retval;
146}
147
148/*
149 * Trylock failed - make sure we correct for
150 * having decremented the count.
151 *
152 * We could have done the trylock with a
153 * single "cmpxchg" without failure cases,
154 * but then it wouldn't work on a 386.
155 */
156int __down_trylock(struct semaphore *sem)
157{
158 int sleepers;
159 unsigned long flags;
160
161 spin_lock_irqsave(&sem->wait.lock, flags);
162 sleepers = sem->sleepers + 1;
163 sem->sleepers = 0;
164
165 /*
166 * Add "everybody else" and us into it. They aren't
167 * playing, because we own the spinlock in the
168 * wait_queue_head.
169 */
170 if (!atomic_add_negative(sleepers, &sem->count)) {
171 wake_up_locked(&sem->wait);
172 }
173
174 spin_unlock_irqrestore(&sem->wait.lock, flags);
175 return 1;
176}
diff --git a/mm/slub.c b/mm/slub.c
index acc975fcc8cc..7f8aaa291a4e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -837,6 +837,35 @@ static void remove_full(struct kmem_cache *s, struct page *page)
837 spin_unlock(&n->list_lock); 837 spin_unlock(&n->list_lock);
838} 838}
839 839
840/* Tracking of the number of slabs for debugging purposes */
841static inline unsigned long slabs_node(struct kmem_cache *s, int node)
842{
843 struct kmem_cache_node *n = get_node(s, node);
844
845 return atomic_long_read(&n->nr_slabs);
846}
847
848static inline void inc_slabs_node(struct kmem_cache *s, int node)
849{
850 struct kmem_cache_node *n = get_node(s, node);
851
852 /*
853 * May be called early in order to allocate a slab for the
854 * kmem_cache_node structure. Solve the chicken-egg
855 * dilemma by deferring the increment of the count during
856 * bootstrap (see early_kmem_cache_node_alloc).
857 */
858 if (!NUMA_BUILD || n)
859 atomic_long_inc(&n->nr_slabs);
860}
861static inline void dec_slabs_node(struct kmem_cache *s, int node)
862{
863 struct kmem_cache_node *n = get_node(s, node);
864
865 atomic_long_dec(&n->nr_slabs);
866}
867
868/* Object debug checks for alloc/free paths */
840static void setup_object_debug(struct kmem_cache *s, struct page *page, 869static void setup_object_debug(struct kmem_cache *s, struct page *page,
841 void *object) 870 void *object)
842{ 871{
@@ -1028,6 +1057,11 @@ static inline unsigned long kmem_cache_flags(unsigned long objsize,
1028 return flags; 1057 return flags;
1029} 1058}
1030#define slub_debug 0 1059#define slub_debug 0
1060
1061static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1062 { return 0; }
1063static inline void inc_slabs_node(struct kmem_cache *s, int node) {}
1064static inline void dec_slabs_node(struct kmem_cache *s, int node) {}
1031#endif 1065#endif
1032/* 1066/*
1033 * Slab allocation and freeing 1067 * Slab allocation and freeing
@@ -1066,7 +1100,6 @@ static void setup_object(struct kmem_cache *s, struct page *page,
1066static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) 1100static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1067{ 1101{
1068 struct page *page; 1102 struct page *page;
1069 struct kmem_cache_node *n;
1070 void *start; 1103 void *start;
1071 void *last; 1104 void *last;
1072 void *p; 1105 void *p;
@@ -1078,9 +1111,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1078 if (!page) 1111 if (!page)
1079 goto out; 1112 goto out;
1080 1113
1081 n = get_node(s, page_to_nid(page)); 1114 inc_slabs_node(s, page_to_nid(page));
1082 if (n)
1083 atomic_long_inc(&n->nr_slabs);
1084 page->slab = s; 1115 page->slab = s;
1085 page->flags |= 1 << PG_slab; 1116 page->flags |= 1 << PG_slab;
1086 if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | 1117 if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
@@ -1125,6 +1156,8 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1125 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1156 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1126 -pages); 1157 -pages);
1127 1158
1159 __ClearPageSlab(page);
1160 reset_page_mapcount(page);
1128 __free_pages(page, s->order); 1161 __free_pages(page, s->order);
1129} 1162}
1130 1163
@@ -1151,11 +1184,7 @@ static void free_slab(struct kmem_cache *s, struct page *page)
1151 1184
1152static void discard_slab(struct kmem_cache *s, struct page *page) 1185static void discard_slab(struct kmem_cache *s, struct page *page)
1153{ 1186{
1154 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1187 dec_slabs_node(s, page_to_nid(page));
1155
1156 atomic_long_dec(&n->nr_slabs);
1157 reset_page_mapcount(page);
1158 __ClearPageSlab(page);
1159 free_slab(s, page); 1188 free_slab(s, page);
1160} 1189}
1161 1190
@@ -1886,15 +1915,18 @@ static void init_kmem_cache_cpu(struct kmem_cache *s,
1886 c->node = 0; 1915 c->node = 0;
1887 c->offset = s->offset / sizeof(void *); 1916 c->offset = s->offset / sizeof(void *);
1888 c->objsize = s->objsize; 1917 c->objsize = s->objsize;
1918#ifdef CONFIG_SLUB_STATS
1919 memset(c->stat, 0, NR_SLUB_STAT_ITEMS * sizeof(unsigned));
1920#endif
1889} 1921}
1890 1922
1891static void init_kmem_cache_node(struct kmem_cache_node *n) 1923static void init_kmem_cache_node(struct kmem_cache_node *n)
1892{ 1924{
1893 n->nr_partial = 0; 1925 n->nr_partial = 0;
1894 atomic_long_set(&n->nr_slabs, 0);
1895 spin_lock_init(&n->list_lock); 1926 spin_lock_init(&n->list_lock);
1896 INIT_LIST_HEAD(&n->partial); 1927 INIT_LIST_HEAD(&n->partial);
1897#ifdef CONFIG_SLUB_DEBUG 1928#ifdef CONFIG_SLUB_DEBUG
1929 atomic_long_set(&n->nr_slabs, 0);
1898 INIT_LIST_HEAD(&n->full); 1930 INIT_LIST_HEAD(&n->full);
1899#endif 1931#endif
1900} 1932}
@@ -2063,7 +2095,7 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
2063 init_tracking(kmalloc_caches, n); 2095 init_tracking(kmalloc_caches, n);
2064#endif 2096#endif
2065 init_kmem_cache_node(n); 2097 init_kmem_cache_node(n);
2066 atomic_long_inc(&n->nr_slabs); 2098 inc_slabs_node(kmalloc_caches, node);
2067 2099
2068 /* 2100 /*
2069 * lockdep requires consistent irq usage for each lock 2101 * lockdep requires consistent irq usage for each lock
@@ -2376,7 +2408,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
2376 struct kmem_cache_node *n = get_node(s, node); 2408 struct kmem_cache_node *n = get_node(s, node);
2377 2409
2378 n->nr_partial -= free_list(s, n, &n->partial); 2410 n->nr_partial -= free_list(s, n, &n->partial);
2379 if (atomic_long_read(&n->nr_slabs)) 2411 if (slabs_node(s, node))
2380 return 1; 2412 return 1;
2381 } 2413 }
2382 free_kmem_cache_nodes(s); 2414 free_kmem_cache_nodes(s);
@@ -2409,10 +2441,6 @@ EXPORT_SYMBOL(kmem_cache_destroy);
2409struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned; 2441struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned;
2410EXPORT_SYMBOL(kmalloc_caches); 2442EXPORT_SYMBOL(kmalloc_caches);
2411 2443
2412#ifdef CONFIG_ZONE_DMA
2413static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1];
2414#endif
2415
2416static int __init setup_slub_min_order(char *str) 2444static int __init setup_slub_min_order(char *str)
2417{ 2445{
2418 get_option(&str, &slub_min_order); 2446 get_option(&str, &slub_min_order);
@@ -2472,6 +2500,7 @@ panic:
2472} 2500}
2473 2501
2474#ifdef CONFIG_ZONE_DMA 2502#ifdef CONFIG_ZONE_DMA
2503static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1];
2475 2504
2476static void sysfs_add_func(struct work_struct *w) 2505static void sysfs_add_func(struct work_struct *w)
2477{ 2506{
@@ -2688,21 +2717,6 @@ void kfree(const void *x)
2688} 2717}
2689EXPORT_SYMBOL(kfree); 2718EXPORT_SYMBOL(kfree);
2690 2719
2691#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SLABINFO)
2692static unsigned long count_partial(struct kmem_cache_node *n)
2693{
2694 unsigned long flags;
2695 unsigned long x = 0;
2696 struct page *page;
2697
2698 spin_lock_irqsave(&n->list_lock, flags);
2699 list_for_each_entry(page, &n->partial, lru)
2700 x += page->inuse;
2701 spin_unlock_irqrestore(&n->list_lock, flags);
2702 return x;
2703}
2704#endif
2705
2706/* 2720/*
2707 * kmem_cache_shrink removes empty slabs from the partial lists and sorts 2721 * kmem_cache_shrink removes empty slabs from the partial lists and sorts
2708 * the remaining slabs by the number of items in use. The slabs with the 2722 * the remaining slabs by the number of items in use. The slabs with the
@@ -2816,7 +2830,7 @@ static void slab_mem_offline_callback(void *arg)
2816 * and offline_pages() function shoudn't call this 2830 * and offline_pages() function shoudn't call this
2817 * callback. So, we must fail. 2831 * callback. So, we must fail.
2818 */ 2832 */
2819 BUG_ON(atomic_long_read(&n->nr_slabs)); 2833 BUG_ON(slabs_node(s, offline_node));
2820 2834
2821 s->node[offline_node] = NULL; 2835 s->node[offline_node] = NULL;
2822 kmem_cache_free(kmalloc_caches, n); 2836 kmem_cache_free(kmalloc_caches, n);
@@ -3181,6 +3195,21 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3181 return slab_alloc(s, gfpflags, node, caller); 3195 return slab_alloc(s, gfpflags, node, caller);
3182} 3196}
3183 3197
3198#if (defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)) || defined(CONFIG_SLABINFO)
3199static unsigned long count_partial(struct kmem_cache_node *n)
3200{
3201 unsigned long flags;
3202 unsigned long x = 0;
3203 struct page *page;
3204
3205 spin_lock_irqsave(&n->list_lock, flags);
3206 list_for_each_entry(page, &n->partial, lru)
3207 x += page->inuse;
3208 spin_unlock_irqrestore(&n->list_lock, flags);
3209 return x;
3210}
3211#endif
3212
3184#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) 3213#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
3185static int validate_slab(struct kmem_cache *s, struct page *page, 3214static int validate_slab(struct kmem_cache *s, struct page *page,
3186 unsigned long *map) 3215 unsigned long *map)
@@ -3979,10 +4008,12 @@ static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
3979 4008
3980 len = sprintf(buf, "%lu", sum); 4009 len = sprintf(buf, "%lu", sum);
3981 4010
4011#ifdef CONFIG_SMP
3982 for_each_online_cpu(cpu) { 4012 for_each_online_cpu(cpu) {
3983 if (data[cpu] && len < PAGE_SIZE - 20) 4013 if (data[cpu] && len < PAGE_SIZE - 20)
3984 len += sprintf(buf + len, " c%d=%u", cpu, data[cpu]); 4014 len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
3985 } 4015 }
4016#endif
3986 kfree(data); 4017 kfree(data);
3987 return len + sprintf(buf + len, "\n"); 4018 return len + sprintf(buf + len, "\n");
3988} 4019}
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index ffbf22a1d2ca..8ea283ecc522 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -1573,7 +1573,6 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia,
1573 send_wr.sg_list = req->rl_send_iov; 1573 send_wr.sg_list = req->rl_send_iov;
1574 send_wr.num_sge = req->rl_niovs; 1574 send_wr.num_sge = req->rl_niovs;
1575 send_wr.opcode = IB_WR_SEND; 1575 send_wr.opcode = IB_WR_SEND;
1576 send_wr.imm_data = 0;
1577 if (send_wr.num_sge == 4) /* no need to sync any pad (constant) */ 1576 if (send_wr.num_sge == 4) /* no need to sync any pad (constant) */
1578 ib_dma_sync_single_for_device(ia->ri_id->device, 1577 ib_dma_sync_single_for_device(ia->ri_id->device,
1579 req->rl_send_iov[3].addr, req->rl_send_iov[3].length, 1578 req->rl_send_iov[3].addr, req->rl_send_iov[3].length,
diff --git a/security/Kconfig b/security/Kconfig
index 5dfc206748cf..49b51f964897 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -113,10 +113,12 @@ config SECURITY_DEFAULT_MMAP_MIN_ADDR
113 from userspace allocation. Keeping a user from writing to low pages 113 from userspace allocation. Keeping a user from writing to low pages
114 can help reduce the impact of kernel NULL pointer bugs. 114 can help reduce the impact of kernel NULL pointer bugs.
115 115
116 For most users with lots of address space a value of 65536 is 116 For most ia64, ppc64 and x86 users with lots of address space
117 reasonable and should cause no problems. Programs which use vm86 117 a value of 65536 is reasonable and should cause no problems.
118 functionality would either need additional permissions from either 118 On arm and other archs it should not be higher than 32768.
119 the LSM or the capabilities module or have this protection disabled. 119 Programs which use vm86 functionality would either need additional
120 permissions from either the LSM or the capabilities module or have
121 this protection disabled.
120 122
121 This value can be changed after boot using the 123 This value can be changed after boot using the
122 /proc/sys/vm/mmap_min_addr tunable. 124 /proc/sys/vm/mmap_min_addr tunable.
diff --git a/security/commoncap.c b/security/commoncap.c
index 06d5c9469ba3..852905789caf 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -267,7 +267,7 @@ static int get_file_caps(struct linux_binprm *bprm)
267 rc = cap_from_disk(&vcaps, bprm, rc); 267 rc = cap_from_disk(&vcaps, bprm, rc);
268 if (rc) 268 if (rc)
269 printk(KERN_NOTICE "%s: cap_from_disk returned %d for %s\n", 269 printk(KERN_NOTICE "%s: cap_from_disk returned %d for %s\n",
270 __FUNCTION__, rc, bprm->filename); 270 __func__, rc, bprm->filename);
271 271
272out: 272out:
273 dput(dentry); 273 dput(dentry);
@@ -302,7 +302,7 @@ int cap_bprm_set_security (struct linux_binprm *bprm)
302 ret = get_file_caps(bprm); 302 ret = get_file_caps(bprm);
303 if (ret) 303 if (ret)
304 printk(KERN_NOTICE "%s: get_file_caps returned %d for %s\n", 304 printk(KERN_NOTICE "%s: get_file_caps returned %d for %s\n",
305 __FUNCTION__, ret, bprm->filename); 305 __func__, ret, bprm->filename);
306 306
307 /* To support inheritance of root-permissions and suid-root 307 /* To support inheritance of root-permissions and suid-root
308 * executables under compatibility mode, we raise all three 308 * executables under compatibility mode, we raise all three
diff --git a/security/keys/internal.h b/security/keys/internal.h
index d36d69393356..7d894ef70370 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -22,16 +22,16 @@ void no_printk(const char *fmt, ...)
22 22
23#ifdef __KDEBUG 23#ifdef __KDEBUG
24#define kenter(FMT, ...) \ 24#define kenter(FMT, ...) \
25 printk(KERN_DEBUG "==> %s("FMT")\n", __FUNCTION__, ##__VA_ARGS__) 25 printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
26#define kleave(FMT, ...) \ 26#define kleave(FMT, ...) \
27 printk(KERN_DEBUG "<== %s()"FMT"\n", __FUNCTION__, ##__VA_ARGS__) 27 printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
28#define kdebug(FMT, ...) \ 28#define kdebug(FMT, ...) \
29 printk(KERN_DEBUG "xxx" FMT"yyy\n", ##__VA_ARGS__) 29 printk(KERN_DEBUG "xxx" FMT"yyy\n", ##__VA_ARGS__)
30#else 30#else
31#define kenter(FMT, ...) \ 31#define kenter(FMT, ...) \
32 no_printk(KERN_DEBUG "==> %s("FMT")\n", __FUNCTION__, ##__VA_ARGS__) 32 no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
33#define kleave(FMT, ...) \ 33#define kleave(FMT, ...) \
34 no_printk(KERN_DEBUG "<== %s()"FMT"\n", __FUNCTION__, ##__VA_ARGS__) 34 no_printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
35#define kdebug(FMT, ...) \ 35#define kdebug(FMT, ...) \
36 no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__) 36 no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__)
37#endif 37#endif
diff --git a/security/root_plug.c b/security/root_plug.c
index 870f13095bb6..6112d1404c81 100644
--- a/security/root_plug.c
+++ b/security/root_plug.c
@@ -49,7 +49,7 @@ module_param(debug, bool, 0600);
49 do { \ 49 do { \
50 if (debug) \ 50 if (debug) \
51 printk(KERN_DEBUG "%s: %s: " fmt , \ 51 printk(KERN_DEBUG "%s: %s: " fmt , \
52 MY_NAME , __FUNCTION__ , \ 52 MY_NAME , __func__ , \
53 ## arg); \ 53 ## arg); \
54 } while (0) 54 } while (0)
55 55
diff --git a/security/security.c b/security/security.c
index b1387a6b416d..9beecac933b4 100644
--- a/security/security.c
+++ b/security/security.c
@@ -57,7 +57,7 @@ int __init security_init(void)
57 57
58 if (verify(&dummy_security_ops)) { 58 if (verify(&dummy_security_ops)) {
59 printk(KERN_ERR "%s could not verify " 59 printk(KERN_ERR "%s could not verify "
60 "dummy_security_ops structure.\n", __FUNCTION__); 60 "dummy_security_ops structure.\n", __func__);
61 return -EIO; 61 return -EIO;
62 } 62 }
63 63
@@ -82,7 +82,7 @@ int register_security(struct security_operations *ops)
82{ 82{
83 if (verify(ops)) { 83 if (verify(ops)) {
84 printk(KERN_DEBUG "%s could not verify " 84 printk(KERN_DEBUG "%s could not verify "
85 "security_operations structure.\n", __FUNCTION__); 85 "security_operations structure.\n", __func__);
86 return -EINVAL; 86 return -EINVAL;
87 } 87 }
88 88
@@ -110,13 +110,13 @@ int mod_reg_security(const char *name, struct security_operations *ops)
110{ 110{
111 if (verify(ops)) { 111 if (verify(ops)) {
112 printk(KERN_INFO "%s could not verify " 112 printk(KERN_INFO "%s could not verify "
113 "security operations.\n", __FUNCTION__); 113 "security operations.\n", __func__);
114 return -EINVAL; 114 return -EINVAL;
115 } 115 }
116 116
117 if (ops == security_ops) { 117 if (ops == security_ops) {
118 printk(KERN_INFO "%s security operations " 118 printk(KERN_INFO "%s security operations "
119 "already registered.\n", __FUNCTION__); 119 "already registered.\n", __func__);
120 return -EINVAL; 120 return -EINVAL;
121 } 121 }
122 122
diff --git a/security/selinux/Kconfig b/security/selinux/Kconfig
index 2b517d618672..a436d1cfa88b 100644
--- a/security/selinux/Kconfig
+++ b/security/selinux/Kconfig
@@ -145,7 +145,7 @@ config SECURITY_SELINUX_POLICYDB_VERSION_MAX
145config SECURITY_SELINUX_POLICYDB_VERSION_MAX_VALUE 145config SECURITY_SELINUX_POLICYDB_VERSION_MAX_VALUE
146 int "NSA SELinux maximum supported policy format version value" 146 int "NSA SELinux maximum supported policy format version value"
147 depends on SECURITY_SELINUX_POLICYDB_VERSION_MAX 147 depends on SECURITY_SELINUX_POLICYDB_VERSION_MAX
148 range 15 22 148 range 15 23
149 default 19 149 default 19
150 help 150 help
151 This option sets the value for the maximum policy format version 151 This option sets the value for the maximum policy format version
diff --git a/security/selinux/Makefile b/security/selinux/Makefile
index 00afd85f1edb..d47fc5e545e0 100644
--- a/security/selinux/Makefile
+++ b/security/selinux/Makefile
@@ -11,6 +11,7 @@ selinux-y := avc.o \
11 nlmsgtab.o \ 11 nlmsgtab.o \
12 netif.o \ 12 netif.o \
13 netnode.o \ 13 netnode.o \
14 netport.o \
14 exports.o 15 exports.o
15 16
16selinux-$(CONFIG_SECURITY_NETWORK_XFRM) += xfrm.o 17selinux-$(CONFIG_SECURITY_NETWORK_XFRM) += xfrm.o
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 187964e88af1..a4fc6e6d038a 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -871,6 +871,8 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid,
871 int rc = 0; 871 int rc = 0;
872 u32 denied; 872 u32 denied;
873 873
874 BUG_ON(!requested);
875
874 rcu_read_lock(); 876 rcu_read_lock();
875 877
876 node = avc_lookup(ssid, tsid, tclass, requested); 878 node = avc_lookup(ssid, tsid, tclass, requested);
@@ -890,13 +892,14 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid,
890 892
891 denied = requested & ~(p_ae->avd.allowed); 893 denied = requested & ~(p_ae->avd.allowed);
892 894
893 if (!requested || denied) { 895 if (denied) {
894 if (selinux_enforcing || (flags & AVC_STRICT)) 896 if (flags & AVC_STRICT)
895 rc = -EACCES; 897 rc = -EACCES;
898 else if (!selinux_enforcing || security_permissive_sid(ssid))
899 avc_update_node(AVC_CALLBACK_GRANT, requested, ssid,
900 tsid, tclass);
896 else 901 else
897 if (node) 902 rc = -EACCES;
898 avc_update_node(AVC_CALLBACK_GRANT,requested,
899 ssid,tsid,tclass);
900 } 903 }
901 904
902 rcu_read_unlock(); 905 rcu_read_unlock();
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index d39b59cf8a08..34f2d46c7984 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -80,6 +80,7 @@
80#include "objsec.h" 80#include "objsec.h"
81#include "netif.h" 81#include "netif.h"
82#include "netnode.h" 82#include "netnode.h"
83#include "netport.h"
83#include "xfrm.h" 84#include "xfrm.h"
84#include "netlabel.h" 85#include "netlabel.h"
85 86
@@ -161,8 +162,7 @@ static int task_alloc_security(struct task_struct *task)
161 if (!tsec) 162 if (!tsec)
162 return -ENOMEM; 163 return -ENOMEM;
163 164
164 tsec->task = task; 165 tsec->osid = tsec->sid = SECINITSID_UNLABELED;
165 tsec->osid = tsec->sid = tsec->ptrace_sid = SECINITSID_UNLABELED;
166 task->security = tsec; 166 task->security = tsec;
167 167
168 return 0; 168 return 0;
@@ -218,7 +218,6 @@ static int file_alloc_security(struct file *file)
218 if (!fsec) 218 if (!fsec)
219 return -ENOMEM; 219 return -ENOMEM;
220 220
221 fsec->file = file;
222 fsec->sid = tsec->sid; 221 fsec->sid = tsec->sid;
223 fsec->fown_sid = tsec->sid; 222 fsec->fown_sid = tsec->sid;
224 file->f_security = fsec; 223 file->f_security = fsec;
@@ -275,12 +274,11 @@ static int sk_alloc_security(struct sock *sk, int family, gfp_t priority)
275 if (!ssec) 274 if (!ssec)
276 return -ENOMEM; 275 return -ENOMEM;
277 276
278 ssec->sk = sk;
279 ssec->peer_sid = SECINITSID_UNLABELED; 277 ssec->peer_sid = SECINITSID_UNLABELED;
280 ssec->sid = SECINITSID_UNLABELED; 278 ssec->sid = SECINITSID_UNLABELED;
281 sk->sk_security = ssec; 279 sk->sk_security = ssec;
282 280
283 selinux_netlbl_sk_security_init(ssec, family); 281 selinux_netlbl_sk_security_reset(ssec, family);
284 282
285 return 0; 283 return 0;
286} 284}
@@ -324,10 +322,10 @@ enum {
324}; 322};
325 323
326static match_table_t tokens = { 324static match_table_t tokens = {
327 {Opt_context, "context=%s"}, 325 {Opt_context, CONTEXT_STR "%s"},
328 {Opt_fscontext, "fscontext=%s"}, 326 {Opt_fscontext, FSCONTEXT_STR "%s"},
329 {Opt_defcontext, "defcontext=%s"}, 327 {Opt_defcontext, DEFCONTEXT_STR "%s"},
330 {Opt_rootcontext, "rootcontext=%s"}, 328 {Opt_rootcontext, ROOTCONTEXT_STR "%s"},
331 {Opt_error, NULL}, 329 {Opt_error, NULL},
332}; 330};
333 331
@@ -671,7 +669,7 @@ static int selinux_set_mnt_opts(struct super_block *sb,
671 rc = security_fs_use(sb->s_type->name, &sbsec->behavior, &sbsec->sid); 669 rc = security_fs_use(sb->s_type->name, &sbsec->behavior, &sbsec->sid);
672 if (rc) { 670 if (rc) {
673 printk(KERN_WARNING "%s: security_fs_use(%s) returned %d\n", 671 printk(KERN_WARNING "%s: security_fs_use(%s) returned %d\n",
674 __FUNCTION__, sb->s_type->name, rc); 672 __func__, sb->s_type->name, rc);
675 goto out; 673 goto out;
676 } 674 }
677 675
@@ -1137,7 +1135,7 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
1137 } 1135 }
1138 if (!dentry) { 1136 if (!dentry) {
1139 printk(KERN_WARNING "%s: no dentry for dev=%s " 1137 printk(KERN_WARNING "%s: no dentry for dev=%s "
1140 "ino=%ld\n", __FUNCTION__, inode->i_sb->s_id, 1138 "ino=%ld\n", __func__, inode->i_sb->s_id,
1141 inode->i_ino); 1139 inode->i_ino);
1142 goto out_unlock; 1140 goto out_unlock;
1143 } 1141 }
@@ -1175,7 +1173,7 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
1175 if (rc < 0) { 1173 if (rc < 0) {
1176 if (rc != -ENODATA) { 1174 if (rc != -ENODATA) {
1177 printk(KERN_WARNING "%s: getxattr returned " 1175 printk(KERN_WARNING "%s: getxattr returned "
1178 "%d for dev=%s ino=%ld\n", __FUNCTION__, 1176 "%d for dev=%s ino=%ld\n", __func__,
1179 -rc, inode->i_sb->s_id, inode->i_ino); 1177 -rc, inode->i_sb->s_id, inode->i_ino);
1180 kfree(context); 1178 kfree(context);
1181 goto out_unlock; 1179 goto out_unlock;
@@ -1190,7 +1188,7 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
1190 if (rc) { 1188 if (rc) {
1191 printk(KERN_WARNING "%s: context_to_sid(%s) " 1189 printk(KERN_WARNING "%s: context_to_sid(%s) "
1192 "returned %d for dev=%s ino=%ld\n", 1190 "returned %d for dev=%s ino=%ld\n",
1193 __FUNCTION__, context, -rc, 1191 __func__, context, -rc,
1194 inode->i_sb->s_id, inode->i_ino); 1192 inode->i_sb->s_id, inode->i_ino);
1195 kfree(context); 1193 kfree(context);
1196 /* Leave with the unlabeled SID */ 1194 /* Leave with the unlabeled SID */
@@ -1618,6 +1616,35 @@ static inline u32 file_mask_to_av(int mode, int mask)
1618 return av; 1616 return av;
1619} 1617}
1620 1618
1619/*
1620 * Convert a file mask to an access vector and include the correct open
1621 * open permission.
1622 */
1623static inline u32 open_file_mask_to_av(int mode, int mask)
1624{
1625 u32 av = file_mask_to_av(mode, mask);
1626
1627 if (selinux_policycap_openperm) {
1628 /*
1629 * lnk files and socks do not really have an 'open'
1630 */
1631 if (S_ISREG(mode))
1632 av |= FILE__OPEN;
1633 else if (S_ISCHR(mode))
1634 av |= CHR_FILE__OPEN;
1635 else if (S_ISBLK(mode))
1636 av |= BLK_FILE__OPEN;
1637 else if (S_ISFIFO(mode))
1638 av |= FIFO_FILE__OPEN;
1639 else if (S_ISDIR(mode))
1640 av |= DIR__OPEN;
1641 else
1642 printk(KERN_ERR "SELinux: WARNING: inside open_file_to_av "
1643 "with unknown mode:%x\n", mode);
1644 }
1645 return av;
1646}
1647
1621/* Convert a Linux file to an access vector. */ 1648/* Convert a Linux file to an access vector. */
1622static inline u32 file_to_av(struct file *file) 1649static inline u32 file_to_av(struct file *file)
1623{ 1650{
@@ -1645,19 +1672,13 @@ static inline u32 file_to_av(struct file *file)
1645 1672
1646static int selinux_ptrace(struct task_struct *parent, struct task_struct *child) 1673static int selinux_ptrace(struct task_struct *parent, struct task_struct *child)
1647{ 1674{
1648 struct task_security_struct *psec = parent->security;
1649 struct task_security_struct *csec = child->security;
1650 int rc; 1675 int rc;
1651 1676
1652 rc = secondary_ops->ptrace(parent,child); 1677 rc = secondary_ops->ptrace(parent,child);
1653 if (rc) 1678 if (rc)
1654 return rc; 1679 return rc;
1655 1680
1656 rc = task_has_perm(parent, child, PROCESS__PTRACE); 1681 return task_has_perm(parent, child, PROCESS__PTRACE);
1657 /* Save the SID of the tracing process for later use in apply_creds. */
1658 if (!(child->ptrace & PT_PTRACED) && !rc)
1659 csec->ptrace_sid = psec->sid;
1660 return rc;
1661} 1682}
1662 1683
1663static int selinux_capget(struct task_struct *target, kernel_cap_t *effective, 1684static int selinux_capget(struct task_struct *target, kernel_cap_t *effective,
@@ -1879,6 +1900,22 @@ static int selinux_vm_enough_memory(struct mm_struct *mm, long pages)
1879 return __vm_enough_memory(mm, pages, cap_sys_admin); 1900 return __vm_enough_memory(mm, pages, cap_sys_admin);
1880} 1901}
1881 1902
1903/**
1904 * task_tracer_task - return the task that is tracing the given task
1905 * @task: task to consider
1906 *
1907 * Returns NULL if noone is tracing @task, or the &struct task_struct
1908 * pointer to its tracer.
1909 *
1910 * Must be called under rcu_read_lock().
1911 */
1912static struct task_struct *task_tracer_task(struct task_struct *task)
1913{
1914 if (task->ptrace & PT_PTRACED)
1915 return rcu_dereference(task->parent);
1916 return NULL;
1917}
1918
1882/* binprm security operations */ 1919/* binprm security operations */
1883 1920
1884static int selinux_bprm_alloc_security(struct linux_binprm *bprm) 1921static int selinux_bprm_alloc_security(struct linux_binprm *bprm)
@@ -1889,7 +1926,6 @@ static int selinux_bprm_alloc_security(struct linux_binprm *bprm)
1889 if (!bsec) 1926 if (!bsec)
1890 return -ENOMEM; 1927 return -ENOMEM;
1891 1928
1892 bsec->bprm = bprm;
1893 bsec->sid = SECINITSID_UNLABELED; 1929 bsec->sid = SECINITSID_UNLABELED;
1894 bsec->set = 0; 1930 bsec->set = 0;
1895 1931
@@ -2126,12 +2162,25 @@ static void selinux_bprm_apply_creds(struct linux_binprm *bprm, int unsafe)
2126 /* Check for ptracing, and update the task SID if ok. 2162 /* Check for ptracing, and update the task SID if ok.
2127 Otherwise, leave SID unchanged and kill. */ 2163 Otherwise, leave SID unchanged and kill. */
2128 if (unsafe & (LSM_UNSAFE_PTRACE | LSM_UNSAFE_PTRACE_CAP)) { 2164 if (unsafe & (LSM_UNSAFE_PTRACE | LSM_UNSAFE_PTRACE_CAP)) {
2129 rc = avc_has_perm(tsec->ptrace_sid, sid, 2165 struct task_struct *tracer;
2130 SECCLASS_PROCESS, PROCESS__PTRACE, 2166 struct task_security_struct *sec;
2131 NULL); 2167 u32 ptsid = 0;
2132 if (rc) { 2168
2133 bsec->unsafe = 1; 2169 rcu_read_lock();
2134 return; 2170 tracer = task_tracer_task(current);
2171 if (likely(tracer != NULL)) {
2172 sec = tracer->security;
2173 ptsid = sec->sid;
2174 }
2175 rcu_read_unlock();
2176
2177 if (ptsid != 0) {
2178 rc = avc_has_perm(ptsid, sid, SECCLASS_PROCESS,
2179 PROCESS__PTRACE, NULL);
2180 if (rc) {
2181 bsec->unsafe = 1;
2182 return;
2183 }
2135 } 2184 }
2136 } 2185 }
2137 tsec->sid = sid; 2186 tsec->sid = sid;
@@ -2239,10 +2288,10 @@ static inline int match_prefix(char *prefix, int plen, char *option, int olen)
2239 2288
2240static inline int selinux_option(char *option, int len) 2289static inline int selinux_option(char *option, int len)
2241{ 2290{
2242 return (match_prefix("context=", sizeof("context=")-1, option, len) || 2291 return (match_prefix(CONTEXT_STR, sizeof(CONTEXT_STR)-1, option, len) ||
2243 match_prefix("fscontext=", sizeof("fscontext=")-1, option, len) || 2292 match_prefix(FSCONTEXT_STR, sizeof(FSCONTEXT_STR)-1, option, len) ||
2244 match_prefix("defcontext=", sizeof("defcontext=")-1, option, len) || 2293 match_prefix(DEFCONTEXT_STR, sizeof(DEFCONTEXT_STR)-1, option, len) ||
2245 match_prefix("rootcontext=", sizeof("rootcontext=")-1, option, len)); 2294 match_prefix(ROOTCONTEXT_STR, sizeof(ROOTCONTEXT_STR)-1, option, len));
2246} 2295}
2247 2296
2248static inline void take_option(char **to, char *from, int *first, int len) 2297static inline void take_option(char **to, char *from, int *first, int len)
@@ -2412,7 +2461,7 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
2412 printk(KERN_WARNING "%s: " 2461 printk(KERN_WARNING "%s: "
2413 "security_transition_sid failed, rc=%d (dev=%s " 2462 "security_transition_sid failed, rc=%d (dev=%s "
2414 "ino=%ld)\n", 2463 "ino=%ld)\n",
2415 __FUNCTION__, 2464 __func__,
2416 -rc, inode->i_sb->s_id, inode->i_ino); 2465 -rc, inode->i_sb->s_id, inode->i_ino);
2417 return rc; 2466 return rc;
2418 } 2467 }
@@ -2536,7 +2585,7 @@ static int selinux_inode_permission(struct inode *inode, int mask,
2536 } 2585 }
2537 2586
2538 return inode_has_perm(current, inode, 2587 return inode_has_perm(current, inode,
2539 file_mask_to_av(inode->i_mode, mask), NULL); 2588 open_file_mask_to_av(inode->i_mode, mask), NULL);
2540} 2589}
2541 2590
2542static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr) 2591static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr)
@@ -2646,7 +2695,7 @@ static void selinux_inode_post_setxattr(struct dentry *dentry, char *name,
2646 rc = security_context_to_sid(value, size, &newsid); 2695 rc = security_context_to_sid(value, size, &newsid);
2647 if (rc) { 2696 if (rc) {
2648 printk(KERN_WARNING "%s: unable to obtain SID for context " 2697 printk(KERN_WARNING "%s: unable to obtain SID for context "
2649 "%s, rc=%d\n", __FUNCTION__, (char*)value, -rc); 2698 "%s, rc=%d\n", __func__, (char *)value, -rc);
2650 return; 2699 return;
2651 } 2700 }
2652 2701
@@ -3087,11 +3136,6 @@ static int selinux_task_alloc_security(struct task_struct *tsk)
3087 tsec2->keycreate_sid = tsec1->keycreate_sid; 3136 tsec2->keycreate_sid = tsec1->keycreate_sid;
3088 tsec2->sockcreate_sid = tsec1->sockcreate_sid; 3137 tsec2->sockcreate_sid = tsec1->sockcreate_sid;
3089 3138
3090 /* Retain ptracer SID across fork, if any.
3091 This will be reset by the ptrace hook upon any
3092 subsequent ptrace_attach operations. */
3093 tsec2->ptrace_sid = tsec1->ptrace_sid;
3094
3095 return 0; 3139 return 0;
3096} 3140}
3097 3141
@@ -3627,10 +3671,8 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
3627 inet_get_local_port_range(&low, &high); 3671 inet_get_local_port_range(&low, &high);
3628 3672
3629 if (snum < max(PROT_SOCK, low) || snum > high) { 3673 if (snum < max(PROT_SOCK, low) || snum > high) {
3630 err = security_port_sid(sk->sk_family, 3674 err = sel_netport_sid(sk->sk_protocol,
3631 sk->sk_type, 3675 snum, &sid);
3632 sk->sk_protocol, snum,
3633 &sid);
3634 if (err) 3676 if (err)
3635 goto out; 3677 goto out;
3636 AVC_AUDIT_DATA_INIT(&ad,NET); 3678 AVC_AUDIT_DATA_INIT(&ad,NET);
@@ -3718,8 +3760,7 @@ static int selinux_socket_connect(struct socket *sock, struct sockaddr *address,
3718 snum = ntohs(addr6->sin6_port); 3760 snum = ntohs(addr6->sin6_port);
3719 } 3761 }
3720 3762
3721 err = security_port_sid(sk->sk_family, sk->sk_type, 3763 err = sel_netport_sid(sk->sk_protocol, snum, &sid);
3722 sk->sk_protocol, snum, &sid);
3723 if (err) 3764 if (err)
3724 goto out; 3765 goto out;
3725 3766
@@ -3950,9 +3991,8 @@ static int selinux_sock_rcv_skb_iptables_compat(struct sock *sk,
3950 3991
3951 if (!recv_perm) 3992 if (!recv_perm)
3952 return 0; 3993 return 0;
3953 err = security_port_sid(sk->sk_family, sk->sk_type, 3994 err = sel_netport_sid(sk->sk_protocol,
3954 sk->sk_protocol, ntohs(ad->u.net.sport), 3995 ntohs(ad->u.net.sport), &port_sid);
3955 &port_sid);
3956 if (unlikely(err)) { 3996 if (unlikely(err)) {
3957 printk(KERN_WARNING 3997 printk(KERN_WARNING
3958 "SELinux: failure in" 3998 "SELinux: failure in"
@@ -4139,7 +4179,7 @@ static void selinux_sk_clone_security(const struct sock *sk, struct sock *newsk)
4139 newssec->peer_sid = ssec->peer_sid; 4179 newssec->peer_sid = ssec->peer_sid;
4140 newssec->sclass = ssec->sclass; 4180 newssec->sclass = ssec->sclass;
4141 4181
4142 selinux_netlbl_sk_security_clone(ssec, newssec); 4182 selinux_netlbl_sk_security_reset(newssec, newsk->sk_family);
4143} 4183}
4144 4184
4145static void selinux_sk_getsecid(struct sock *sk, u32 *secid) 4185static void selinux_sk_getsecid(struct sock *sk, u32 *secid)
@@ -4373,9 +4413,8 @@ static int selinux_ip_postroute_iptables_compat(struct sock *sk,
4373 if (send_perm != 0) 4413 if (send_perm != 0)
4374 return 0; 4414 return 0;
4375 4415
4376 err = security_port_sid(sk->sk_family, sk->sk_type, 4416 err = sel_netport_sid(sk->sk_protocol,
4377 sk->sk_protocol, ntohs(ad->u.net.dport), 4417 ntohs(ad->u.net.dport), &port_sid);
4378 &port_sid);
4379 if (unlikely(err)) { 4418 if (unlikely(err)) {
4380 printk(KERN_WARNING 4419 printk(KERN_WARNING
4381 "SELinux: failure in" 4420 "SELinux: failure in"
@@ -4561,7 +4600,6 @@ static int ipc_alloc_security(struct task_struct *task,
4561 return -ENOMEM; 4600 return -ENOMEM;
4562 4601
4563 isec->sclass = sclass; 4602 isec->sclass = sclass;
4564 isec->ipc_perm = perm;
4565 isec->sid = tsec->sid; 4603 isec->sid = tsec->sid;
4566 perm->security = isec; 4604 perm->security = isec;
4567 4605
@@ -4583,7 +4621,6 @@ static int msg_msg_alloc_security(struct msg_msg *msg)
4583 if (!msec) 4621 if (!msec)
4584 return -ENOMEM; 4622 return -ENOMEM;
4585 4623
4586 msec->msg = msg;
4587 msec->sid = SECINITSID_UNLABELED; 4624 msec->sid = SECINITSID_UNLABELED;
4588 msg->security = msec; 4625 msg->security = msec;
4589 4626
@@ -4994,14 +5031,14 @@ static int selinux_register_security (const char *name, struct security_operatio
4994{ 5031{
4995 if (secondary_ops != original_ops) { 5032 if (secondary_ops != original_ops) {
4996 printk(KERN_ERR "%s: There is already a secondary security " 5033 printk(KERN_ERR "%s: There is already a secondary security "
4997 "module registered.\n", __FUNCTION__); 5034 "module registered.\n", __func__);
4998 return -EINVAL; 5035 return -EINVAL;
4999 } 5036 }
5000 5037
5001 secondary_ops = ops; 5038 secondary_ops = ops;
5002 5039
5003 printk(KERN_INFO "%s: Registering secondary module %s\n", 5040 printk(KERN_INFO "%s: Registering secondary module %s\n",
5004 __FUNCTION__, 5041 __func__,
5005 name); 5042 name);
5006 5043
5007 return 0; 5044 return 0;
@@ -5057,6 +5094,7 @@ static int selinux_setprocattr(struct task_struct *p,
5057 char *name, void *value, size_t size) 5094 char *name, void *value, size_t size)
5058{ 5095{
5059 struct task_security_struct *tsec; 5096 struct task_security_struct *tsec;
5097 struct task_struct *tracer;
5060 u32 sid = 0; 5098 u32 sid = 0;
5061 int error; 5099 int error;
5062 char *str = value; 5100 char *str = value;
@@ -5145,18 +5183,24 @@ static int selinux_setprocattr(struct task_struct *p,
5145 /* Check for ptracing, and update the task SID if ok. 5183 /* Check for ptracing, and update the task SID if ok.
5146 Otherwise, leave SID unchanged and fail. */ 5184 Otherwise, leave SID unchanged and fail. */
5147 task_lock(p); 5185 task_lock(p);
5148 if (p->ptrace & PT_PTRACED) { 5186 rcu_read_lock();
5149 error = avc_has_perm_noaudit(tsec->ptrace_sid, sid, 5187 tracer = task_tracer_task(p);
5188 if (tracer != NULL) {
5189 struct task_security_struct *ptsec = tracer->security;
5190 u32 ptsid = ptsec->sid;
5191 rcu_read_unlock();
5192 error = avc_has_perm_noaudit(ptsid, sid,
5150 SECCLASS_PROCESS, 5193 SECCLASS_PROCESS,
5151 PROCESS__PTRACE, 0, &avd); 5194 PROCESS__PTRACE, 0, &avd);
5152 if (!error) 5195 if (!error)
5153 tsec->sid = sid; 5196 tsec->sid = sid;
5154 task_unlock(p); 5197 task_unlock(p);
5155 avc_audit(tsec->ptrace_sid, sid, SECCLASS_PROCESS, 5198 avc_audit(ptsid, sid, SECCLASS_PROCESS,
5156 PROCESS__PTRACE, &avd, error, NULL); 5199 PROCESS__PTRACE, &avd, error, NULL);
5157 if (error) 5200 if (error)
5158 return error; 5201 return error;
5159 } else { 5202 } else {
5203 rcu_read_unlock();
5160 tsec->sid = sid; 5204 tsec->sid = sid;
5161 task_unlock(p); 5205 task_unlock(p);
5162 } 5206 }
@@ -5194,7 +5238,6 @@ static int selinux_key_alloc(struct key *k, struct task_struct *tsk,
5194 if (!ksec) 5238 if (!ksec)
5195 return -ENOMEM; 5239 return -ENOMEM;
5196 5240
5197 ksec->obj = k;
5198 if (tsec->keycreate_sid) 5241 if (tsec->keycreate_sid)
5199 ksec->sid = tsec->keycreate_sid; 5242 ksec->sid = tsec->keycreate_sid;
5200 else 5243 else
@@ -5631,5 +5674,3 @@ int selinux_disable(void)
5631 return 0; 5674 return 0;
5632} 5675}
5633#endif 5676#endif
5634
5635
diff --git a/security/selinux/include/av_perm_to_string.h b/security/selinux/include/av_perm_to_string.h
index d5696690d3a2..1223b4ff9bee 100644
--- a/security/selinux/include/av_perm_to_string.h
+++ b/security/selinux/include/av_perm_to_string.h
@@ -14,12 +14,17 @@
14 S_(SECCLASS_DIR, DIR__REPARENT, "reparent") 14 S_(SECCLASS_DIR, DIR__REPARENT, "reparent")
15 S_(SECCLASS_DIR, DIR__SEARCH, "search") 15 S_(SECCLASS_DIR, DIR__SEARCH, "search")
16 S_(SECCLASS_DIR, DIR__RMDIR, "rmdir") 16 S_(SECCLASS_DIR, DIR__RMDIR, "rmdir")
17 S_(SECCLASS_DIR, DIR__OPEN, "open")
17 S_(SECCLASS_FILE, FILE__EXECUTE_NO_TRANS, "execute_no_trans") 18 S_(SECCLASS_FILE, FILE__EXECUTE_NO_TRANS, "execute_no_trans")
18 S_(SECCLASS_FILE, FILE__ENTRYPOINT, "entrypoint") 19 S_(SECCLASS_FILE, FILE__ENTRYPOINT, "entrypoint")
19 S_(SECCLASS_FILE, FILE__EXECMOD, "execmod") 20 S_(SECCLASS_FILE, FILE__EXECMOD, "execmod")
21 S_(SECCLASS_FILE, FILE__OPEN, "open")
20 S_(SECCLASS_CHR_FILE, CHR_FILE__EXECUTE_NO_TRANS, "execute_no_trans") 22 S_(SECCLASS_CHR_FILE, CHR_FILE__EXECUTE_NO_TRANS, "execute_no_trans")
21 S_(SECCLASS_CHR_FILE, CHR_FILE__ENTRYPOINT, "entrypoint") 23 S_(SECCLASS_CHR_FILE, CHR_FILE__ENTRYPOINT, "entrypoint")
22 S_(SECCLASS_CHR_FILE, CHR_FILE__EXECMOD, "execmod") 24 S_(SECCLASS_CHR_FILE, CHR_FILE__EXECMOD, "execmod")
25 S_(SECCLASS_CHR_FILE, CHR_FILE__OPEN, "open")
26 S_(SECCLASS_BLK_FILE, BLK_FILE__OPEN, "open")
27 S_(SECCLASS_FIFO_FILE, FIFO_FILE__OPEN, "open")
23 S_(SECCLASS_FD, FD__USE, "use") 28 S_(SECCLASS_FD, FD__USE, "use")
24 S_(SECCLASS_TCP_SOCKET, TCP_SOCKET__CONNECTTO, "connectto") 29 S_(SECCLASS_TCP_SOCKET, TCP_SOCKET__CONNECTTO, "connectto")
25 S_(SECCLASS_TCP_SOCKET, TCP_SOCKET__NEWCONN, "newconn") 30 S_(SECCLASS_TCP_SOCKET, TCP_SOCKET__NEWCONN, "newconn")
diff --git a/security/selinux/include/av_permissions.h b/security/selinux/include/av_permissions.h
index 75b41311ab86..c4c51165c505 100644
--- a/security/selinux/include/av_permissions.h
+++ b/security/selinux/include/av_permissions.h
@@ -79,6 +79,7 @@
79#define DIR__REPARENT 0x00080000UL 79#define DIR__REPARENT 0x00080000UL
80#define DIR__SEARCH 0x00100000UL 80#define DIR__SEARCH 0x00100000UL
81#define DIR__RMDIR 0x00200000UL 81#define DIR__RMDIR 0x00200000UL
82#define DIR__OPEN 0x00400000UL
82#define FILE__IOCTL 0x00000001UL 83#define FILE__IOCTL 0x00000001UL
83#define FILE__READ 0x00000002UL 84#define FILE__READ 0x00000002UL
84#define FILE__WRITE 0x00000004UL 85#define FILE__WRITE 0x00000004UL
@@ -99,6 +100,7 @@
99#define FILE__EXECUTE_NO_TRANS 0x00020000UL 100#define FILE__EXECUTE_NO_TRANS 0x00020000UL
100#define FILE__ENTRYPOINT 0x00040000UL 101#define FILE__ENTRYPOINT 0x00040000UL
101#define FILE__EXECMOD 0x00080000UL 102#define FILE__EXECMOD 0x00080000UL
103#define FILE__OPEN 0x00100000UL
102#define LNK_FILE__IOCTL 0x00000001UL 104#define LNK_FILE__IOCTL 0x00000001UL
103#define LNK_FILE__READ 0x00000002UL 105#define LNK_FILE__READ 0x00000002UL
104#define LNK_FILE__WRITE 0x00000004UL 106#define LNK_FILE__WRITE 0x00000004UL
@@ -136,6 +138,7 @@
136#define CHR_FILE__EXECUTE_NO_TRANS 0x00020000UL 138#define CHR_FILE__EXECUTE_NO_TRANS 0x00020000UL
137#define CHR_FILE__ENTRYPOINT 0x00040000UL 139#define CHR_FILE__ENTRYPOINT 0x00040000UL
138#define CHR_FILE__EXECMOD 0x00080000UL 140#define CHR_FILE__EXECMOD 0x00080000UL
141#define CHR_FILE__OPEN 0x00100000UL
139#define BLK_FILE__IOCTL 0x00000001UL 142#define BLK_FILE__IOCTL 0x00000001UL
140#define BLK_FILE__READ 0x00000002UL 143#define BLK_FILE__READ 0x00000002UL
141#define BLK_FILE__WRITE 0x00000004UL 144#define BLK_FILE__WRITE 0x00000004UL
@@ -153,6 +156,7 @@
153#define BLK_FILE__SWAPON 0x00004000UL 156#define BLK_FILE__SWAPON 0x00004000UL
154#define BLK_FILE__QUOTAON 0x00008000UL 157#define BLK_FILE__QUOTAON 0x00008000UL
155#define BLK_FILE__MOUNTON 0x00010000UL 158#define BLK_FILE__MOUNTON 0x00010000UL
159#define BLK_FILE__OPEN 0x00020000UL
156#define SOCK_FILE__IOCTL 0x00000001UL 160#define SOCK_FILE__IOCTL 0x00000001UL
157#define SOCK_FILE__READ 0x00000002UL 161#define SOCK_FILE__READ 0x00000002UL
158#define SOCK_FILE__WRITE 0x00000004UL 162#define SOCK_FILE__WRITE 0x00000004UL
@@ -187,6 +191,7 @@
187#define FIFO_FILE__SWAPON 0x00004000UL 191#define FIFO_FILE__SWAPON 0x00004000UL
188#define FIFO_FILE__QUOTAON 0x00008000UL 192#define FIFO_FILE__QUOTAON 0x00008000UL
189#define FIFO_FILE__MOUNTON 0x00010000UL 193#define FIFO_FILE__MOUNTON 0x00010000UL
194#define FIFO_FILE__OPEN 0x00020000UL
190#define FD__USE 0x00000001UL 195#define FD__USE 0x00000001UL
191#define SOCKET__IOCTL 0x00000001UL 196#define SOCKET__IOCTL 0x00000001UL
192#define SOCKET__READ 0x00000002UL 197#define SOCKET__READ 0x00000002UL
diff --git a/security/selinux/include/netlabel.h b/security/selinux/include/netlabel.h
index 00a2809c8506..9a9e7cd9a379 100644
--- a/security/selinux/include/netlabel.h
+++ b/security/selinux/include/netlabel.h
@@ -41,10 +41,6 @@ void selinux_netlbl_cache_invalidate(void);
41 41
42void selinux_netlbl_sk_security_reset(struct sk_security_struct *ssec, 42void selinux_netlbl_sk_security_reset(struct sk_security_struct *ssec,
43 int family); 43 int family);
44void selinux_netlbl_sk_security_init(struct sk_security_struct *ssec,
45 int family);
46void selinux_netlbl_sk_security_clone(struct sk_security_struct *ssec,
47 struct sk_security_struct *newssec);
48 44
49int selinux_netlbl_skbuff_getsid(struct sk_buff *skb, 45int selinux_netlbl_skbuff_getsid(struct sk_buff *skb,
50 u16 family, 46 u16 family,
@@ -73,18 +69,6 @@ static inline void selinux_netlbl_sk_security_reset(
73{ 69{
74 return; 70 return;
75} 71}
76static inline void selinux_netlbl_sk_security_init(
77 struct sk_security_struct *ssec,
78 int family)
79{
80 return;
81}
82static inline void selinux_netlbl_sk_security_clone(
83 struct sk_security_struct *ssec,
84 struct sk_security_struct *newssec)
85{
86 return;
87}
88 72
89static inline int selinux_netlbl_skbuff_getsid(struct sk_buff *skb, 73static inline int selinux_netlbl_skbuff_getsid(struct sk_buff *skb,
90 u16 family, 74 u16 family,
diff --git a/security/selinux/include/netport.h b/security/selinux/include/netport.h
new file mode 100644
index 000000000000..8991752eaf93
--- /dev/null
+++ b/security/selinux/include/netport.h
@@ -0,0 +1,31 @@
1/*
2 * Network port table
3 *
4 * SELinux must keep a mapping of network ports to labels/SIDs. This
5 * mapping is maintained as part of the normal policy but a fast cache is
6 * needed to reduce the lookup overhead.
7 *
8 * Author: Paul Moore <paul.moore@hp.com>
9 *
10 */
11
12/*
13 * (c) Copyright Hewlett-Packard Development Company, L.P., 2008
14 *
15 * This program is free software: you can redistribute it and/or modify
16 * it under the terms of version 2 of the GNU General Public License as
17 * published by the Free Software Foundation.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 */
25
26#ifndef _SELINUX_NETPORT_H
27#define _SELINUX_NETPORT_H
28
29int sel_netport_sid(u8 protocol, u16 pnum, u32 *sid);
30
31#endif
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index c6c2bb4ebacc..300b61bad7b3 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -28,14 +28,12 @@
28#include "avc.h" 28#include "avc.h"
29 29
30struct task_security_struct { 30struct task_security_struct {
31 struct task_struct *task; /* back pointer to task object */
32 u32 osid; /* SID prior to last execve */ 31 u32 osid; /* SID prior to last execve */
33 u32 sid; /* current SID */ 32 u32 sid; /* current SID */
34 u32 exec_sid; /* exec SID */ 33 u32 exec_sid; /* exec SID */
35 u32 create_sid; /* fscreate SID */ 34 u32 create_sid; /* fscreate SID */
36 u32 keycreate_sid; /* keycreate SID */ 35 u32 keycreate_sid; /* keycreate SID */
37 u32 sockcreate_sid; /* fscreate SID */ 36 u32 sockcreate_sid; /* fscreate SID */
38 u32 ptrace_sid; /* SID of ptrace parent */
39}; 37};
40 38
41struct inode_security_struct { 39struct inode_security_struct {
@@ -50,7 +48,6 @@ struct inode_security_struct {
50}; 48};
51 49
52struct file_security_struct { 50struct file_security_struct {
53 struct file *file; /* back pointer to file object */
54 u32 sid; /* SID of open file description */ 51 u32 sid; /* SID of open file description */
55 u32 fown_sid; /* SID of file owner (for SIGIO) */ 52 u32 fown_sid; /* SID of file owner (for SIGIO) */
56 u32 isid; /* SID of inode at the time of file open */ 53 u32 isid; /* SID of inode at the time of file open */
@@ -73,18 +70,15 @@ struct superblock_security_struct {
73}; 70};
74 71
75struct msg_security_struct { 72struct msg_security_struct {
76 struct msg_msg *msg; /* back pointer */
77 u32 sid; /* SID of message */ 73 u32 sid; /* SID of message */
78}; 74};
79 75
80struct ipc_security_struct { 76struct ipc_security_struct {
81 struct kern_ipc_perm *ipc_perm; /* back pointer */
82 u16 sclass; /* security class of this object */ 77 u16 sclass; /* security class of this object */
83 u32 sid; /* SID of IPC resource */ 78 u32 sid; /* SID of IPC resource */
84}; 79};
85 80
86struct bprm_security_struct { 81struct bprm_security_struct {
87 struct linux_binprm *bprm; /* back pointer to bprm object */
88 u32 sid; /* SID for transformed process */ 82 u32 sid; /* SID for transformed process */
89 unsigned char set; 83 unsigned char set;
90 84
@@ -109,8 +103,13 @@ struct netnode_security_struct {
109 u16 family; /* address family */ 103 u16 family; /* address family */
110}; 104};
111 105
106struct netport_security_struct {
107 u32 sid; /* SID for this node */
108 u16 port; /* port number */
109 u8 protocol; /* transport protocol */
110};
111
112struct sk_security_struct { 112struct sk_security_struct {
113 struct sock *sk; /* back pointer to sk object */
114 u32 sid; /* SID of this object */ 113 u32 sid; /* SID of this object */
115 u32 peer_sid; /* SID of peer */ 114 u32 peer_sid; /* SID of peer */
116 u16 sclass; /* sock security class */ 115 u16 sclass; /* sock security class */
@@ -120,12 +119,10 @@ struct sk_security_struct {
120 NLBL_REQUIRE, 119 NLBL_REQUIRE,
121 NLBL_LABELED, 120 NLBL_LABELED,
122 } nlbl_state; 121 } nlbl_state;
123 spinlock_t nlbl_lock; /* protects nlbl_state */
124#endif 122#endif
125}; 123};
126 124
127struct key_security_struct { 125struct key_security_struct {
128 struct key *obj; /* back pointer */
129 u32 sid; /* SID of key */ 126 u32 sid; /* SID of key */
130}; 127};
131 128
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 44e12ec88090..1904c462a605 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -26,13 +26,14 @@
26#define POLICYDB_VERSION_AVTAB 20 26#define POLICYDB_VERSION_AVTAB 20
27#define POLICYDB_VERSION_RANGETRANS 21 27#define POLICYDB_VERSION_RANGETRANS 21
28#define POLICYDB_VERSION_POLCAP 22 28#define POLICYDB_VERSION_POLCAP 22
29#define POLICYDB_VERSION_PERMISSIVE 23
29 30
30/* Range of policy versions we understand*/ 31/* Range of policy versions we understand*/
31#define POLICYDB_VERSION_MIN POLICYDB_VERSION_BASE 32#define POLICYDB_VERSION_MIN POLICYDB_VERSION_BASE
32#ifdef CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX 33#ifdef CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX
33#define POLICYDB_VERSION_MAX CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX_VALUE 34#define POLICYDB_VERSION_MAX CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX_VALUE
34#else 35#else
35#define POLICYDB_VERSION_MAX POLICYDB_VERSION_POLCAP 36#define POLICYDB_VERSION_MAX POLICYDB_VERSION_PERMISSIVE
36#endif 37#endif
37 38
38#define CONTEXT_MNT 0x01 39#define CONTEXT_MNT 0x01
@@ -40,6 +41,11 @@
40#define ROOTCONTEXT_MNT 0x04 41#define ROOTCONTEXT_MNT 0x04
41#define DEFCONTEXT_MNT 0x08 42#define DEFCONTEXT_MNT 0x08
42 43
44#define CONTEXT_STR "context="
45#define FSCONTEXT_STR "fscontext="
46#define ROOTCONTEXT_STR "rootcontext="
47#define DEFCONTEXT_STR "defcontext="
48
43struct netlbl_lsm_secattr; 49struct netlbl_lsm_secattr;
44 50
45extern int selinux_enabled; 51extern int selinux_enabled;
@@ -48,11 +54,13 @@ extern int selinux_mls_enabled;
48/* Policy capabilities */ 54/* Policy capabilities */
49enum { 55enum {
50 POLICYDB_CAPABILITY_NETPEER, 56 POLICYDB_CAPABILITY_NETPEER,
57 POLICYDB_CAPABILITY_OPENPERM,
51 __POLICYDB_CAPABILITY_MAX 58 __POLICYDB_CAPABILITY_MAX
52}; 59};
53#define POLICYDB_CAPABILITY_MAX (__POLICYDB_CAPABILITY_MAX - 1) 60#define POLICYDB_CAPABILITY_MAX (__POLICYDB_CAPABILITY_MAX - 1)
54 61
55extern int selinux_policycap_netpeer; 62extern int selinux_policycap_netpeer;
63extern int selinux_policycap_openperm;
56 64
57int security_load_policy(void * data, size_t len); 65int security_load_policy(void * data, size_t len);
58 66
@@ -67,6 +75,8 @@ struct av_decision {
67 u32 seqno; 75 u32 seqno;
68}; 76};
69 77
78int security_permissive_sid(u32 sid);
79
70int security_compute_av(u32 ssid, u32 tsid, 80int security_compute_av(u32 ssid, u32 tsid,
71 u16 tclass, u32 requested, 81 u16 tclass, u32 requested,
72 struct av_decision *avd); 82 struct av_decision *avd);
@@ -92,8 +102,7 @@ int security_context_to_sid_default(char *scontext, u32 scontext_len,
92int security_get_user_sids(u32 callsid, char *username, 102int security_get_user_sids(u32 callsid, char *username,
93 u32 **sids, u32 *nel); 103 u32 **sids, u32 *nel);
94 104
95int security_port_sid(u16 domain, u16 type, u8 protocol, u16 port, 105int security_port_sid(u8 protocol, u16 port, u32 *out_sid);
96 u32 *out_sid);
97 106
98int security_netif_sid(char *name, u32 *if_sid); 107int security_netif_sid(char *name, u32 *if_sid);
99 108
diff --git a/security/selinux/netlabel.c b/security/selinux/netlabel.c
index 0fa2be4149e8..e8ee91ac12ef 100644
--- a/security/selinux/netlabel.c
+++ b/security/selinux/netlabel.c
@@ -34,6 +34,7 @@
34 34
35#include "objsec.h" 35#include "objsec.h"
36#include "security.h" 36#include "security.h"
37#include "netlabel.h"
37 38
38/** 39/**
39 * selinux_netlbl_sidlookup_cached - Cache a SID lookup 40 * selinux_netlbl_sidlookup_cached - Cache a SID lookup
@@ -69,9 +70,7 @@ static int selinux_netlbl_sidlookup_cached(struct sk_buff *skb,
69 * 70 *
70 * Description: 71 * Description:
71 * Attempt to label a socket using the NetLabel mechanism using the given 72 * Attempt to label a socket using the NetLabel mechanism using the given
72 * SID. Returns zero values on success, negative values on failure. The 73 * SID. Returns zero values on success, negative values on failure.
73 * caller is responsibile for calling rcu_read_lock() before calling this
74 * this function and rcu_read_unlock() after this function returns.
75 * 74 *
76 */ 75 */
77static int selinux_netlbl_sock_setsid(struct sock *sk, u32 sid) 76static int selinux_netlbl_sock_setsid(struct sock *sk, u32 sid)
@@ -86,11 +85,8 @@ static int selinux_netlbl_sock_setsid(struct sock *sk, u32 sid)
86 if (rc != 0) 85 if (rc != 0)
87 goto sock_setsid_return; 86 goto sock_setsid_return;
88 rc = netlbl_sock_setattr(sk, &secattr); 87 rc = netlbl_sock_setattr(sk, &secattr);
89 if (rc == 0) { 88 if (rc == 0)
90 spin_lock_bh(&sksec->nlbl_lock);
91 sksec->nlbl_state = NLBL_LABELED; 89 sksec->nlbl_state = NLBL_LABELED;
92 spin_unlock_bh(&sksec->nlbl_lock);
93 }
94 90
95sock_setsid_return: 91sock_setsid_return:
96 netlbl_secattr_destroy(&secattr); 92 netlbl_secattr_destroy(&secattr);
@@ -129,45 +125,6 @@ void selinux_netlbl_sk_security_reset(struct sk_security_struct *ssec,
129} 125}
130 126
131/** 127/**
132 * selinux_netlbl_sk_security_init - Setup the NetLabel fields
133 * @ssec: the sk_security_struct
134 * @family: the socket family
135 *
136 * Description:
137 * Called when a new sk_security_struct is allocated to initialize the NetLabel
138 * fields.
139 *
140 */
141void selinux_netlbl_sk_security_init(struct sk_security_struct *ssec,
142 int family)
143{
144 /* No locking needed, we are the only one who has access to ssec */
145 selinux_netlbl_sk_security_reset(ssec, family);
146 spin_lock_init(&ssec->nlbl_lock);
147}
148
149/**
150 * selinux_netlbl_sk_security_clone - Copy the NetLabel fields
151 * @ssec: the original sk_security_struct
152 * @newssec: the cloned sk_security_struct
153 *
154 * Description:
155 * Clone the NetLabel specific sk_security_struct fields from @ssec to
156 * @newssec.
157 *
158 */
159void selinux_netlbl_sk_security_clone(struct sk_security_struct *ssec,
160 struct sk_security_struct *newssec)
161{
162 /* We don't need to take newssec->nlbl_lock because we are the only
163 * thread with access to newssec, but we do need to take the RCU read
164 * lock as other threads could have access to ssec */
165 rcu_read_lock();
166 selinux_netlbl_sk_security_reset(newssec, ssec->sk->sk_family);
167 rcu_read_unlock();
168}
169
170/**
171 * selinux_netlbl_skbuff_getsid - Get the sid of a packet using NetLabel 128 * selinux_netlbl_skbuff_getsid - Get the sid of a packet using NetLabel
172 * @skb: the packet 129 * @skb: the packet
173 * @family: protocol family 130 * @family: protocol family
@@ -221,12 +178,8 @@ void selinux_netlbl_sock_graft(struct sock *sk, struct socket *sock)
221 struct netlbl_lsm_secattr secattr; 178 struct netlbl_lsm_secattr secattr;
222 u32 nlbl_peer_sid; 179 u32 nlbl_peer_sid;
223 180
224 rcu_read_lock(); 181 if (sksec->nlbl_state != NLBL_REQUIRE)
225
226 if (sksec->nlbl_state != NLBL_REQUIRE) {
227 rcu_read_unlock();
228 return; 182 return;
229 }
230 183
231 netlbl_secattr_init(&secattr); 184 netlbl_secattr_init(&secattr);
232 if (netlbl_sock_getattr(sk, &secattr) == 0 && 185 if (netlbl_sock_getattr(sk, &secattr) == 0 &&
@@ -239,8 +192,6 @@ void selinux_netlbl_sock_graft(struct sock *sk, struct socket *sock)
239 * here we will pick up the pieces in later calls to 192 * here we will pick up the pieces in later calls to
240 * selinux_netlbl_inode_permission(). */ 193 * selinux_netlbl_inode_permission(). */
241 selinux_netlbl_sock_setsid(sk, sksec->sid); 194 selinux_netlbl_sock_setsid(sk, sksec->sid);
242
243 rcu_read_unlock();
244} 195}
245 196
246/** 197/**
@@ -254,16 +205,13 @@ void selinux_netlbl_sock_graft(struct sock *sk, struct socket *sock)
254 */ 205 */
255int selinux_netlbl_socket_post_create(struct socket *sock) 206int selinux_netlbl_socket_post_create(struct socket *sock)
256{ 207{
257 int rc = 0;
258 struct sock *sk = sock->sk; 208 struct sock *sk = sock->sk;
259 struct sk_security_struct *sksec = sk->sk_security; 209 struct sk_security_struct *sksec = sk->sk_security;
260 210
261 rcu_read_lock(); 211 if (sksec->nlbl_state != NLBL_REQUIRE)
262 if (sksec->nlbl_state == NLBL_REQUIRE) 212 return 0;
263 rc = selinux_netlbl_sock_setsid(sk, sksec->sid);
264 rcu_read_unlock();
265 213
266 return rc; 214 return selinux_netlbl_sock_setsid(sk, sksec->sid);
267} 215}
268 216
269/** 217/**
@@ -288,21 +236,21 @@ int selinux_netlbl_inode_permission(struct inode *inode, int mask)
288 if (!S_ISSOCK(inode->i_mode) || 236 if (!S_ISSOCK(inode->i_mode) ||
289 ((mask & (MAY_WRITE | MAY_APPEND)) == 0)) 237 ((mask & (MAY_WRITE | MAY_APPEND)) == 0))
290 return 0; 238 return 0;
239
291 sock = SOCKET_I(inode); 240 sock = SOCKET_I(inode);
292 sk = sock->sk; 241 sk = sock->sk;
293 sksec = sk->sk_security; 242 sksec = sk->sk_security;
294 243 if (sksec->nlbl_state != NLBL_REQUIRE)
295 rcu_read_lock();
296 if (sksec->nlbl_state != NLBL_REQUIRE) {
297 rcu_read_unlock();
298 return 0; 244 return 0;
299 } 245
300 local_bh_disable(); 246 local_bh_disable();
301 bh_lock_sock_nested(sk); 247 bh_lock_sock_nested(sk);
302 rc = selinux_netlbl_sock_setsid(sk, sksec->sid); 248 if (likely(sksec->nlbl_state == NLBL_REQUIRE))
249 rc = selinux_netlbl_sock_setsid(sk, sksec->sid);
250 else
251 rc = 0;
303 bh_unlock_sock(sk); 252 bh_unlock_sock(sk);
304 local_bh_enable(); 253 local_bh_enable();
305 rcu_read_unlock();
306 254
307 return rc; 255 return rc;
308} 256}
@@ -385,7 +333,6 @@ int selinux_netlbl_socket_setsockopt(struct socket *sock,
385 struct sk_security_struct *sksec = sk->sk_security; 333 struct sk_security_struct *sksec = sk->sk_security;
386 struct netlbl_lsm_secattr secattr; 334 struct netlbl_lsm_secattr secattr;
387 335
388 rcu_read_lock();
389 if (level == IPPROTO_IP && optname == IP_OPTIONS && 336 if (level == IPPROTO_IP && optname == IP_OPTIONS &&
390 sksec->nlbl_state == NLBL_LABELED) { 337 sksec->nlbl_state == NLBL_LABELED) {
391 netlbl_secattr_init(&secattr); 338 netlbl_secattr_init(&secattr);
@@ -396,7 +343,6 @@ int selinux_netlbl_socket_setsockopt(struct socket *sock,
396 rc = -EACCES; 343 rc = -EACCES;
397 netlbl_secattr_destroy(&secattr); 344 netlbl_secattr_destroy(&secattr);
398 } 345 }
399 rcu_read_unlock();
400 346
401 return rc; 347 return rc;
402} 348}
diff --git a/security/selinux/netlink.c b/security/selinux/netlink.c
index b59871d74dad..6214a7a73149 100644
--- a/security/selinux/netlink.c
+++ b/security/selinux/netlink.c
@@ -89,7 +89,7 @@ out:
89nlmsg_failure: 89nlmsg_failure:
90 kfree_skb(skb); 90 kfree_skb(skb);
91oom: 91oom:
92 printk(KERN_ERR "SELinux: OOM in %s\n", __FUNCTION__); 92 printk(KERN_ERR "SELinux: OOM in %s\n", __func__);
93 goto out; 93 goto out;
94} 94}
95 95
diff --git a/security/selinux/netport.c b/security/selinux/netport.c
new file mode 100644
index 000000000000..68ede3c498ab
--- /dev/null
+++ b/security/selinux/netport.c
@@ -0,0 +1,286 @@
1/*
2 * Network port table
3 *
4 * SELinux must keep a mapping of network ports to labels/SIDs. This
5 * mapping is maintained as part of the normal policy but a fast cache is
6 * needed to reduce the lookup overhead.
7 *
8 * Author: Paul Moore <paul.moore@hp.com>
9 *
10 * This code is heavily based on the "netif" concept originally developed by
11 * James Morris <jmorris@redhat.com>
12 * (see security/selinux/netif.c for more information)
13 *
14 */
15
16/*
17 * (c) Copyright Hewlett-Packard Development Company, L.P., 2008
18 *
19 * This program is free software: you can redistribute it and/or modify
20 * it under the terms of version 2 of the GNU General Public License as
21 * published by the Free Software Foundation.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 */
29
30#include <linux/types.h>
31#include <linux/rcupdate.h>
32#include <linux/list.h>
33#include <linux/spinlock.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/ip.h>
37#include <linux/ipv6.h>
38#include <net/ip.h>
39#include <net/ipv6.h>
40#include <asm/bug.h>
41
42#include "netport.h"
43#include "objsec.h"
44
45#define SEL_NETPORT_HASH_SIZE 256
46#define SEL_NETPORT_HASH_BKT_LIMIT 16
47
48struct sel_netport_bkt {
49 int size;
50 struct list_head list;
51};
52
53struct sel_netport {
54 struct netport_security_struct psec;
55
56 struct list_head list;
57 struct rcu_head rcu;
58};
59
60/* NOTE: we are using a combined hash table for both IPv4 and IPv6, the reason
61 * for this is that I suspect most users will not make heavy use of both
62 * address families at the same time so one table will usually end up wasted,
63 * if this becomes a problem we can always add a hash table for each address
64 * family later */
65
66static LIST_HEAD(sel_netport_list);
67static DEFINE_SPINLOCK(sel_netport_lock);
68static struct sel_netport_bkt sel_netport_hash[SEL_NETPORT_HASH_SIZE];
69
70/**
71 * sel_netport_free - Frees a port entry
72 * @p: the entry's RCU field
73 *
74 * Description:
75 * This function is designed to be used as a callback to the call_rcu()
76 * function so that memory allocated to a hash table port entry can be
77 * released safely.
78 *
79 */
80static void sel_netport_free(struct rcu_head *p)
81{
82 struct sel_netport *port = container_of(p, struct sel_netport, rcu);
83 kfree(port);
84}
85
86/**
87 * sel_netport_hashfn - Hashing function for the port table
88 * @pnum: port number
89 *
90 * Description:
91 * This is the hashing function for the port table, it returns the bucket
92 * number for the given port.
93 *
94 */
95static unsigned int sel_netport_hashfn(u16 pnum)
96{
97 return (pnum & (SEL_NETPORT_HASH_SIZE - 1));
98}
99
100/**
101 * sel_netport_find - Search for a port record
102 * @protocol: protocol
103 * @port: pnum
104 *
105 * Description:
106 * Search the network port table and return the matching record. If an entry
107 * can not be found in the table return NULL.
108 *
109 */
110static struct sel_netport *sel_netport_find(u8 protocol, u16 pnum)
111{
112 unsigned int idx;
113 struct sel_netport *port;
114
115 idx = sel_netport_hashfn(pnum);
116 list_for_each_entry_rcu(port, &sel_netport_hash[idx].list, list)
117 if (port->psec.port == pnum &&
118 port->psec.protocol == protocol)
119 return port;
120
121 return NULL;
122}
123
124/**
125 * sel_netport_insert - Insert a new port into the table
126 * @port: the new port record
127 *
128 * Description:
129 * Add a new port record to the network address hash table. Returns zero on
130 * success, negative values on failure.
131 *
132 */
133static int sel_netport_insert(struct sel_netport *port)
134{
135 unsigned int idx;
136
137 /* we need to impose a limit on the growth of the hash table so check
138 * this bucket to make sure it is within the specified bounds */
139 idx = sel_netport_hashfn(port->psec.port);
140 list_add_rcu(&port->list, &sel_netport_hash[idx].list);
141 if (sel_netport_hash[idx].size == SEL_NETPORT_HASH_BKT_LIMIT) {
142 struct sel_netport *tail;
143 tail = list_entry(port->list.prev, struct sel_netport, list);
144 list_del_rcu(port->list.prev);
145 call_rcu(&tail->rcu, sel_netport_free);
146 } else
147 sel_netport_hash[idx].size++;
148
149 return 0;
150}
151
152/**
153 * sel_netport_sid_slow - Lookup the SID of a network address using the policy
154 * @protocol: protocol
155 * @pnum: port
156 * @sid: port SID
157 *
158 * Description:
159 * This function determines the SID of a network port by quering the security
160 * policy. The result is added to the network port table to speedup future
161 * queries. Returns zero on success, negative values on failure.
162 *
163 */
164static int sel_netport_sid_slow(u8 protocol, u16 pnum, u32 *sid)
165{
166 int ret;
167 struct sel_netport *port;
168 struct sel_netport *new = NULL;
169
170 spin_lock_bh(&sel_netport_lock);
171 port = sel_netport_find(protocol, pnum);
172 if (port != NULL) {
173 *sid = port->psec.sid;
174 ret = 0;
175 goto out;
176 }
177 new = kzalloc(sizeof(*new), GFP_ATOMIC);
178 if (new == NULL) {
179 ret = -ENOMEM;
180 goto out;
181 }
182 ret = security_port_sid(protocol, pnum, &new->psec.sid);
183 if (ret != 0)
184 goto out;
185 new->psec.port = pnum;
186 new->psec.protocol = protocol;
187 ret = sel_netport_insert(new);
188 if (ret != 0)
189 goto out;
190 *sid = new->psec.sid;
191
192out:
193 spin_unlock_bh(&sel_netport_lock);
194 if (unlikely(ret)) {
195 printk(KERN_WARNING
196 "SELinux: failure in sel_netport_sid_slow(),"
197 " unable to determine network port label\n");
198 kfree(new);
199 }
200 return ret;
201}
202
203/**
204 * sel_netport_sid - Lookup the SID of a network port
205 * @protocol: protocol
206 * @pnum: port
207 * @sid: port SID
208 *
209 * Description:
210 * This function determines the SID of a network port using the fastest method
211 * possible. First the port table is queried, but if an entry can't be found
212 * then the policy is queried and the result is added to the table to speedup
213 * future queries. Returns zero on success, negative values on failure.
214 *
215 */
216int sel_netport_sid(u8 protocol, u16 pnum, u32 *sid)
217{
218 struct sel_netport *port;
219
220 rcu_read_lock();
221 port = sel_netport_find(protocol, pnum);
222 if (port != NULL) {
223 *sid = port->psec.sid;
224 rcu_read_unlock();
225 return 0;
226 }
227 rcu_read_unlock();
228
229 return sel_netport_sid_slow(protocol, pnum, sid);
230}
231
232/**
233 * sel_netport_flush - Flush the entire network port table
234 *
235 * Description:
236 * Remove all entries from the network address table.
237 *
238 */
239static void sel_netport_flush(void)
240{
241 unsigned int idx;
242 struct sel_netport *port;
243
244 spin_lock_bh(&sel_netport_lock);
245 for (idx = 0; idx < SEL_NETPORT_HASH_SIZE; idx++) {
246 list_for_each_entry(port, &sel_netport_hash[idx].list, list) {
247 list_del_rcu(&port->list);
248 call_rcu(&port->rcu, sel_netport_free);
249 }
250 sel_netport_hash[idx].size = 0;
251 }
252 spin_unlock_bh(&sel_netport_lock);
253}
254
255static int sel_netport_avc_callback(u32 event, u32 ssid, u32 tsid,
256 u16 class, u32 perms, u32 *retained)
257{
258 if (event == AVC_CALLBACK_RESET) {
259 sel_netport_flush();
260 synchronize_net();
261 }
262 return 0;
263}
264
265static __init int sel_netport_init(void)
266{
267 int iter;
268 int ret;
269
270 if (!selinux_enabled)
271 return 0;
272
273 for (iter = 0; iter < SEL_NETPORT_HASH_SIZE; iter++) {
274 INIT_LIST_HEAD(&sel_netport_hash[iter].list);
275 sel_netport_hash[iter].size = 0;
276 }
277
278 ret = avc_add_callback(sel_netport_avc_callback, AVC_CALLBACK_RESET,
279 SECSID_NULL, SECSID_NULL, SECCLASS_NULL, 0);
280 if (ret != 0)
281 panic("avc_add_callback() failed, error %d\n", ret);
282
283 return ret;
284}
285
286__initcall(sel_netport_init);
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 0341567665b3..26fabad09769 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -42,7 +42,8 @@
42 42
43/* Policy capability filenames */ 43/* Policy capability filenames */
44static char *policycap_names[] = { 44static char *policycap_names[] = {
45 "network_peer_controls" 45 "network_peer_controls",
46 "open_perms"
46}; 47};
47 48
48unsigned int selinux_checkreqprot = CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE; 49unsigned int selinux_checkreqprot = CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE;
@@ -391,7 +392,7 @@ static ssize_t sel_write_context(struct file * file, char *buf, size_t size)
391 392
392 if (len > SIMPLE_TRANSACTION_LIMIT) { 393 if (len > SIMPLE_TRANSACTION_LIMIT) {
393 printk(KERN_ERR "%s: context size (%u) exceeds payload " 394 printk(KERN_ERR "%s: context size (%u) exceeds payload "
394 "max\n", __FUNCTION__, len); 395 "max\n", __func__, len);
395 length = -ERANGE; 396 length = -ERANGE;
396 goto out; 397 goto out;
397 } 398 }
@@ -644,7 +645,7 @@ static ssize_t sel_write_create(struct file * file, char *buf, size_t size)
644 645
645 if (len > SIMPLE_TRANSACTION_LIMIT) { 646 if (len > SIMPLE_TRANSACTION_LIMIT) {
646 printk(KERN_ERR "%s: context size (%u) exceeds payload " 647 printk(KERN_ERR "%s: context size (%u) exceeds payload "
647 "max\n", __FUNCTION__, len); 648 "max\n", __func__, len);
648 length = -ERANGE; 649 length = -ERANGE;
649 goto out3; 650 goto out3;
650 } 651 }
@@ -821,7 +822,7 @@ static ssize_t sel_write_member(struct file * file, char *buf, size_t size)
821 822
822 if (len > SIMPLE_TRANSACTION_LIMIT) { 823 if (len > SIMPLE_TRANSACTION_LIMIT) {
823 printk(KERN_ERR "%s: context size (%u) exceeds payload " 824 printk(KERN_ERR "%s: context size (%u) exceeds payload "
824 "max\n", __FUNCTION__, len); 825 "max\n", __func__, len);
825 length = -ERANGE; 826 length = -ERANGE;
826 goto out3; 827 goto out3;
827 } 828 }
@@ -1760,7 +1761,7 @@ static int sel_fill_super(struct super_block * sb, void * data, int silent)
1760out: 1761out:
1761 return ret; 1762 return ret;
1762err: 1763err:
1763 printk(KERN_ERR "%s: failed while creating inodes\n", __FUNCTION__); 1764 printk(KERN_ERR "%s: failed while creating inodes\n", __func__);
1764 goto out; 1765 goto out;
1765} 1766}
1766 1767
diff --git a/security/selinux/ss/avtab.c b/security/selinux/ss/avtab.c
index cd10e27fc9e6..916e73a18bc5 100644
--- a/security/selinux/ss/avtab.c
+++ b/security/selinux/ss/avtab.c
@@ -280,8 +280,8 @@ int avtab_alloc(struct avtab *h, u32 nrules)
280 h->nel = 0; 280 h->nel = 0;
281 h->nslot = nslot; 281 h->nslot = nslot;
282 h->mask = mask; 282 h->mask = mask;
283 printk(KERN_DEBUG "SELinux:%d avtab hash slots allocated. " 283 printk(KERN_DEBUG "SELinux: %d avtab hash slots, %d rules.\n",
284 "Num of rules:%d\n", h->nslot, nrules); 284 h->nslot, nrules);
285 return 0; 285 return 0;
286} 286}
287 287
@@ -345,18 +345,18 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
345 if (vers < POLICYDB_VERSION_AVTAB) { 345 if (vers < POLICYDB_VERSION_AVTAB) {
346 rc = next_entry(buf32, fp, sizeof(u32)); 346 rc = next_entry(buf32, fp, sizeof(u32));
347 if (rc < 0) { 347 if (rc < 0) {
348 printk(KERN_ERR "security: avtab: truncated entry\n"); 348 printk(KERN_ERR "SELinux: avtab: truncated entry\n");
349 return -1; 349 return -1;
350 } 350 }
351 items2 = le32_to_cpu(buf32[0]); 351 items2 = le32_to_cpu(buf32[0]);
352 if (items2 > ARRAY_SIZE(buf32)) { 352 if (items2 > ARRAY_SIZE(buf32)) {
353 printk(KERN_ERR "security: avtab: entry overflow\n"); 353 printk(KERN_ERR "SELinux: avtab: entry overflow\n");
354 return -1; 354 return -1;
355 355
356 } 356 }
357 rc = next_entry(buf32, fp, sizeof(u32)*items2); 357 rc = next_entry(buf32, fp, sizeof(u32)*items2);
358 if (rc < 0) { 358 if (rc < 0) {
359 printk(KERN_ERR "security: avtab: truncated entry\n"); 359 printk(KERN_ERR "SELinux: avtab: truncated entry\n");
360 return -1; 360 return -1;
361 } 361 }
362 items = 0; 362 items = 0;
@@ -364,19 +364,19 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
364 val = le32_to_cpu(buf32[items++]); 364 val = le32_to_cpu(buf32[items++]);
365 key.source_type = (u16)val; 365 key.source_type = (u16)val;
366 if (key.source_type != val) { 366 if (key.source_type != val) {
367 printk("security: avtab: truncated source type\n"); 367 printk("SELinux: avtab: truncated source type\n");
368 return -1; 368 return -1;
369 } 369 }
370 val = le32_to_cpu(buf32[items++]); 370 val = le32_to_cpu(buf32[items++]);
371 key.target_type = (u16)val; 371 key.target_type = (u16)val;
372 if (key.target_type != val) { 372 if (key.target_type != val) {
373 printk("security: avtab: truncated target type\n"); 373 printk("SELinux: avtab: truncated target type\n");
374 return -1; 374 return -1;
375 } 375 }
376 val = le32_to_cpu(buf32[items++]); 376 val = le32_to_cpu(buf32[items++]);
377 key.target_class = (u16)val; 377 key.target_class = (u16)val;
378 if (key.target_class != val) { 378 if (key.target_class != val) {
379 printk("security: avtab: truncated target class\n"); 379 printk("SELinux: avtab: truncated target class\n");
380 return -1; 380 return -1;
381 } 381 }
382 382
@@ -384,12 +384,12 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
384 enabled = (val & AVTAB_ENABLED_OLD) ? AVTAB_ENABLED : 0; 384 enabled = (val & AVTAB_ENABLED_OLD) ? AVTAB_ENABLED : 0;
385 385
386 if (!(val & (AVTAB_AV | AVTAB_TYPE))) { 386 if (!(val & (AVTAB_AV | AVTAB_TYPE))) {
387 printk("security: avtab: null entry\n"); 387 printk("SELinux: avtab: null entry\n");
388 return -1; 388 return -1;
389 } 389 }
390 if ((val & AVTAB_AV) && 390 if ((val & AVTAB_AV) &&
391 (val & AVTAB_TYPE)) { 391 (val & AVTAB_TYPE)) {
392 printk("security: avtab: entry has both access vectors and types\n"); 392 printk("SELinux: avtab: entry has both access vectors and types\n");
393 return -1; 393 return -1;
394 } 394 }
395 395
@@ -403,7 +403,7 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
403 } 403 }
404 404
405 if (items != items2) { 405 if (items != items2) {
406 printk("security: avtab: entry only had %d items, expected %d\n", items2, items); 406 printk("SELinux: avtab: entry only had %d items, expected %d\n", items2, items);
407 return -1; 407 return -1;
408 } 408 }
409 return 0; 409 return 0;
@@ -411,7 +411,7 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
411 411
412 rc = next_entry(buf16, fp, sizeof(u16)*4); 412 rc = next_entry(buf16, fp, sizeof(u16)*4);
413 if (rc < 0) { 413 if (rc < 0) {
414 printk("security: avtab: truncated entry\n"); 414 printk("SELinux: avtab: truncated entry\n");
415 return -1; 415 return -1;
416 } 416 }
417 417
@@ -424,7 +424,7 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
424 if (!policydb_type_isvalid(pol, key.source_type) || 424 if (!policydb_type_isvalid(pol, key.source_type) ||
425 !policydb_type_isvalid(pol, key.target_type) || 425 !policydb_type_isvalid(pol, key.target_type) ||
426 !policydb_class_isvalid(pol, key.target_class)) { 426 !policydb_class_isvalid(pol, key.target_class)) {
427 printk(KERN_WARNING "security: avtab: invalid type or class\n"); 427 printk(KERN_WARNING "SELinux: avtab: invalid type or class\n");
428 return -1; 428 return -1;
429 } 429 }
430 430
@@ -435,19 +435,19 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
435 } 435 }
436 if (!set || set > 1) { 436 if (!set || set > 1) {
437 printk(KERN_WARNING 437 printk(KERN_WARNING
438 "security: avtab: more than one specifier\n"); 438 "SELinux: avtab: more than one specifier\n");
439 return -1; 439 return -1;
440 } 440 }
441 441
442 rc = next_entry(buf32, fp, sizeof(u32)); 442 rc = next_entry(buf32, fp, sizeof(u32));
443 if (rc < 0) { 443 if (rc < 0) {
444 printk("security: avtab: truncated entry\n"); 444 printk("SELinux: avtab: truncated entry\n");
445 return -1; 445 return -1;
446 } 446 }
447 datum.data = le32_to_cpu(*buf32); 447 datum.data = le32_to_cpu(*buf32);
448 if ((key.specified & AVTAB_TYPE) && 448 if ((key.specified & AVTAB_TYPE) &&
449 !policydb_type_isvalid(pol, datum.data)) { 449 !policydb_type_isvalid(pol, datum.data)) {
450 printk(KERN_WARNING "security: avtab: invalid type\n"); 450 printk(KERN_WARNING "SELinux: avtab: invalid type\n");
451 return -1; 451 return -1;
452 } 452 }
453 return insertf(a, &key, &datum, p); 453 return insertf(a, &key, &datum, p);
@@ -468,12 +468,12 @@ int avtab_read(struct avtab *a, void *fp, struct policydb *pol)
468 468
469 rc = next_entry(buf, fp, sizeof(u32)); 469 rc = next_entry(buf, fp, sizeof(u32));
470 if (rc < 0) { 470 if (rc < 0) {
471 printk(KERN_ERR "security: avtab: truncated table\n"); 471 printk(KERN_ERR "SELinux: avtab: truncated table\n");
472 goto bad; 472 goto bad;
473 } 473 }
474 nel = le32_to_cpu(buf[0]); 474 nel = le32_to_cpu(buf[0]);
475 if (!nel) { 475 if (!nel) {
476 printk(KERN_ERR "security: avtab: table is empty\n"); 476 printk(KERN_ERR "SELinux: avtab: table is empty\n");
477 rc = -EINVAL; 477 rc = -EINVAL;
478 goto bad; 478 goto bad;
479 } 479 }
@@ -486,9 +486,9 @@ int avtab_read(struct avtab *a, void *fp, struct policydb *pol)
486 rc = avtab_read_item(a, fp, pol, avtab_insertf, NULL); 486 rc = avtab_read_item(a, fp, pol, avtab_insertf, NULL);
487 if (rc) { 487 if (rc) {
488 if (rc == -ENOMEM) 488 if (rc == -ENOMEM)
489 printk(KERN_ERR "security: avtab: out of memory\n"); 489 printk(KERN_ERR "SELinux: avtab: out of memory\n");
490 else if (rc == -EEXIST) 490 else if (rc == -EEXIST)
491 printk(KERN_ERR "security: avtab: duplicate entry\n"); 491 printk(KERN_ERR "SELinux: avtab: duplicate entry\n");
492 else 492 else
493 rc = -EINVAL; 493 rc = -EINVAL;
494 goto bad; 494 goto bad;
diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c
index 50ad85d4b77c..a996cf1d378a 100644
--- a/security/selinux/ss/conditional.c
+++ b/security/selinux/ss/conditional.c
@@ -96,7 +96,7 @@ int evaluate_cond_node(struct policydb *p, struct cond_node *node)
96 if (new_state != node->cur_state) { 96 if (new_state != node->cur_state) {
97 node->cur_state = new_state; 97 node->cur_state = new_state;
98 if (new_state == -1) 98 if (new_state == -1)
99 printk(KERN_ERR "security: expression result was undefined - disabling all rules.\n"); 99 printk(KERN_ERR "SELinux: expression result was undefined - disabling all rules.\n");
100 /* turn the rules on or off */ 100 /* turn the rules on or off */
101 for (cur = node->true_list; cur != NULL; cur = cur->next) { 101 for (cur = node->true_list; cur != NULL; cur = cur->next) {
102 if (new_state <= 0) { 102 if (new_state <= 0) {
@@ -276,7 +276,7 @@ static int cond_insertf(struct avtab *a, struct avtab_key *k, struct avtab_datum
276 */ 276 */
277 if (k->specified & AVTAB_TYPE) { 277 if (k->specified & AVTAB_TYPE) {
278 if (avtab_search(&p->te_avtab, k)) { 278 if (avtab_search(&p->te_avtab, k)) {
279 printk("security: type rule already exists outside of a conditional."); 279 printk("SELinux: type rule already exists outside of a conditional.");
280 goto err; 280 goto err;
281 } 281 }
282 /* 282 /*
@@ -291,7 +291,7 @@ static int cond_insertf(struct avtab *a, struct avtab_key *k, struct avtab_datum
291 node_ptr = avtab_search_node(&p->te_cond_avtab, k); 291 node_ptr = avtab_search_node(&p->te_cond_avtab, k);
292 if (node_ptr) { 292 if (node_ptr) {
293 if (avtab_search_node_next(node_ptr, k->specified)) { 293 if (avtab_search_node_next(node_ptr, k->specified)) {
294 printk("security: too many conflicting type rules."); 294 printk("SELinux: too many conflicting type rules.");
295 goto err; 295 goto err;
296 } 296 }
297 found = 0; 297 found = 0;
@@ -302,13 +302,13 @@ static int cond_insertf(struct avtab *a, struct avtab_key *k, struct avtab_datum
302 } 302 }
303 } 303 }
304 if (!found) { 304 if (!found) {
305 printk("security: conflicting type rules.\n"); 305 printk("SELinux: conflicting type rules.\n");
306 goto err; 306 goto err;
307 } 307 }
308 } 308 }
309 } else { 309 } else {
310 if (avtab_search(&p->te_cond_avtab, k)) { 310 if (avtab_search(&p->te_cond_avtab, k)) {
311 printk("security: conflicting type rules when adding type rule for true.\n"); 311 printk("SELinux: conflicting type rules when adding type rule for true.\n");
312 goto err; 312 goto err;
313 } 313 }
314 } 314 }
@@ -316,7 +316,7 @@ static int cond_insertf(struct avtab *a, struct avtab_key *k, struct avtab_datum
316 316
317 node_ptr = avtab_insert_nonunique(&p->te_cond_avtab, k, d); 317 node_ptr = avtab_insert_nonunique(&p->te_cond_avtab, k, d);
318 if (!node_ptr) { 318 if (!node_ptr) {
319 printk("security: could not insert rule."); 319 printk("SELinux: could not insert rule.");
320 goto err; 320 goto err;
321 } 321 }
322 322
@@ -376,12 +376,12 @@ static int cond_read_av_list(struct policydb *p, void *fp, struct cond_av_list *
376static int expr_isvalid(struct policydb *p, struct cond_expr *expr) 376static int expr_isvalid(struct policydb *p, struct cond_expr *expr)
377{ 377{
378 if (expr->expr_type <= 0 || expr->expr_type > COND_LAST) { 378 if (expr->expr_type <= 0 || expr->expr_type > COND_LAST) {
379 printk("security: conditional expressions uses unknown operator.\n"); 379 printk("SELinux: conditional expressions uses unknown operator.\n");
380 return 0; 380 return 0;
381 } 381 }
382 382
383 if (expr->bool > p->p_bools.nprim) { 383 if (expr->bool > p->p_bools.nprim) {
384 printk("security: conditional expressions uses unknown bool.\n"); 384 printk("SELinux: conditional expressions uses unknown bool.\n");
385 return 0; 385 return 0;
386 } 386 }
387 return 1; 387 return 1;
diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c
index 920b5e36a1af..e499af474b35 100644
--- a/security/selinux/ss/ebitmap.c
+++ b/security/selinux/ss/ebitmap.c
@@ -364,7 +364,7 @@ int ebitmap_read(struct ebitmap *e, void *fp)
364 count = le32_to_cpu(buf[2]); 364 count = le32_to_cpu(buf[2]);
365 365
366 if (mapunit != sizeof(u64) * 8) { 366 if (mapunit != sizeof(u64) * 8) {
367 printk(KERN_ERR "security: ebitmap: map size %u does not " 367 printk(KERN_ERR "SELinux: ebitmap: map size %u does not "
368 "match my size %Zd (high bit was %d)\n", 368 "match my size %Zd (high bit was %d)\n",
369 mapunit, sizeof(u64) * 8, e->highbit); 369 mapunit, sizeof(u64) * 8, e->highbit);
370 goto bad; 370 goto bad;
@@ -382,19 +382,19 @@ int ebitmap_read(struct ebitmap *e, void *fp)
382 for (i = 0; i < count; i++) { 382 for (i = 0; i < count; i++) {
383 rc = next_entry(&startbit, fp, sizeof(u32)); 383 rc = next_entry(&startbit, fp, sizeof(u32));
384 if (rc < 0) { 384 if (rc < 0) {
385 printk(KERN_ERR "security: ebitmap: truncated map\n"); 385 printk(KERN_ERR "SELinux: ebitmap: truncated map\n");
386 goto bad; 386 goto bad;
387 } 387 }
388 startbit = le32_to_cpu(startbit); 388 startbit = le32_to_cpu(startbit);
389 389
390 if (startbit & (mapunit - 1)) { 390 if (startbit & (mapunit - 1)) {
391 printk(KERN_ERR "security: ebitmap start bit (%d) is " 391 printk(KERN_ERR "SELinux: ebitmap start bit (%d) is "
392 "not a multiple of the map unit size (%u)\n", 392 "not a multiple of the map unit size (%u)\n",
393 startbit, mapunit); 393 startbit, mapunit);
394 goto bad; 394 goto bad;
395 } 395 }
396 if (startbit > e->highbit - mapunit) { 396 if (startbit > e->highbit - mapunit) {
397 printk(KERN_ERR "security: ebitmap start bit (%d) is " 397 printk(KERN_ERR "SELinux: ebitmap start bit (%d) is "
398 "beyond the end of the bitmap (%u)\n", 398 "beyond the end of the bitmap (%u)\n",
399 startbit, (e->highbit - mapunit)); 399 startbit, (e->highbit - mapunit));
400 goto bad; 400 goto bad;
@@ -405,7 +405,7 @@ int ebitmap_read(struct ebitmap *e, void *fp)
405 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 405 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
406 if (!tmp) { 406 if (!tmp) {
407 printk(KERN_ERR 407 printk(KERN_ERR
408 "security: ebitmap: out of memory\n"); 408 "SELinux: ebitmap: out of memory\n");
409 rc = -ENOMEM; 409 rc = -ENOMEM;
410 goto bad; 410 goto bad;
411 } 411 }
@@ -418,7 +418,7 @@ int ebitmap_read(struct ebitmap *e, void *fp)
418 } 418 }
419 n = tmp; 419 n = tmp;
420 } else if (startbit <= n->startbit) { 420 } else if (startbit <= n->startbit) {
421 printk(KERN_ERR "security: ebitmap: start bit %d" 421 printk(KERN_ERR "SELinux: ebitmap: start bit %d"
422 " comes after start bit %d\n", 422 " comes after start bit %d\n",
423 startbit, n->startbit); 423 startbit, n->startbit);
424 goto bad; 424 goto bad;
@@ -426,7 +426,7 @@ int ebitmap_read(struct ebitmap *e, void *fp)
426 426
427 rc = next_entry(&map, fp, sizeof(u64)); 427 rc = next_entry(&map, fp, sizeof(u64));
428 if (rc < 0) { 428 if (rc < 0) {
429 printk(KERN_ERR "security: ebitmap: truncated map\n"); 429 printk(KERN_ERR "SELinux: ebitmap: truncated map\n");
430 goto bad; 430 goto bad;
431 } 431 }
432 map = le64_to_cpu(map); 432 map = le64_to_cpu(map);
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index bd7d6a00342d..6bdb0ff6a927 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -111,6 +111,11 @@ static struct policydb_compat_info policydb_compat[] = {
111 .version = POLICYDB_VERSION_POLCAP, 111 .version = POLICYDB_VERSION_POLCAP,
112 .sym_num = SYM_NUM, 112 .sym_num = SYM_NUM,
113 .ocon_num = OCON_NUM, 113 .ocon_num = OCON_NUM,
114 },
115 {
116 .version = POLICYDB_VERSION_PERMISSIVE,
117 .sym_num = SYM_NUM,
118 .ocon_num = OCON_NUM,
114 } 119 }
115}; 120};
116 121
@@ -194,6 +199,7 @@ static int policydb_init(struct policydb *p)
194 goto out_free_symtab; 199 goto out_free_symtab;
195 200
196 ebitmap_init(&p->policycaps); 201 ebitmap_init(&p->policycaps);
202 ebitmap_init(&p->permissive_map);
197 203
198out: 204out:
199 return rc; 205 return rc;
@@ -401,14 +407,14 @@ static int policydb_index_others(struct policydb *p)
401{ 407{
402 int i, rc = 0; 408 int i, rc = 0;
403 409
404 printk(KERN_DEBUG "security: %d users, %d roles, %d types, %d bools", 410 printk(KERN_DEBUG "SELinux: %d users, %d roles, %d types, %d bools",
405 p->p_users.nprim, p->p_roles.nprim, p->p_types.nprim, p->p_bools.nprim); 411 p->p_users.nprim, p->p_roles.nprim, p->p_types.nprim, p->p_bools.nprim);
406 if (selinux_mls_enabled) 412 if (selinux_mls_enabled)
407 printk(", %d sens, %d cats", p->p_levels.nprim, 413 printk(", %d sens, %d cats", p->p_levels.nprim,
408 p->p_cats.nprim); 414 p->p_cats.nprim);
409 printk("\n"); 415 printk("\n");
410 416
411 printk(KERN_DEBUG "security: %d classes, %d rules\n", 417 printk(KERN_DEBUG "SELinux: %d classes, %d rules\n",
412 p->p_classes.nprim, p->te_avtab.nel); 418 p->p_classes.nprim, p->te_avtab.nel);
413 419
414#ifdef DEBUG_HASHES 420#ifdef DEBUG_HASHES
@@ -687,6 +693,7 @@ void policydb_destroy(struct policydb *p)
687 kfree(p->type_attr_map); 693 kfree(p->type_attr_map);
688 kfree(p->undefined_perms); 694 kfree(p->undefined_perms);
689 ebitmap_destroy(&p->policycaps); 695 ebitmap_destroy(&p->policycaps);
696 ebitmap_destroy(&p->permissive_map);
690 697
691 return; 698 return;
692} 699}
@@ -702,20 +709,20 @@ int policydb_load_isids(struct policydb *p, struct sidtab *s)
702 709
703 rc = sidtab_init(s); 710 rc = sidtab_init(s);
704 if (rc) { 711 if (rc) {
705 printk(KERN_ERR "security: out of memory on SID table init\n"); 712 printk(KERN_ERR "SELinux: out of memory on SID table init\n");
706 goto out; 713 goto out;
707 } 714 }
708 715
709 head = p->ocontexts[OCON_ISID]; 716 head = p->ocontexts[OCON_ISID];
710 for (c = head; c; c = c->next) { 717 for (c = head; c; c = c->next) {
711 if (!c->context[0].user) { 718 if (!c->context[0].user) {
712 printk(KERN_ERR "security: SID %s was never " 719 printk(KERN_ERR "SELinux: SID %s was never "
713 "defined.\n", c->u.name); 720 "defined.\n", c->u.name);
714 rc = -EINVAL; 721 rc = -EINVAL;
715 goto out; 722 goto out;
716 } 723 }
717 if (sidtab_insert(s, c->sid[0], &c->context[0])) { 724 if (sidtab_insert(s, c->sid[0], &c->context[0])) {
718 printk(KERN_ERR "security: unable to load initial " 725 printk(KERN_ERR "SELinux: unable to load initial "
719 "SID %s.\n", c->u.name); 726 "SID %s.\n", c->u.name);
720 rc = -EINVAL; 727 rc = -EINVAL;
721 goto out; 728 goto out;
@@ -809,13 +816,13 @@ static int mls_read_range_helper(struct mls_range *r, void *fp)
809 816
810 items = le32_to_cpu(buf[0]); 817 items = le32_to_cpu(buf[0]);
811 if (items > ARRAY_SIZE(buf)) { 818 if (items > ARRAY_SIZE(buf)) {
812 printk(KERN_ERR "security: mls: range overflow\n"); 819 printk(KERN_ERR "SELinux: mls: range overflow\n");
813 rc = -EINVAL; 820 rc = -EINVAL;
814 goto out; 821 goto out;
815 } 822 }
816 rc = next_entry(buf, fp, sizeof(u32) * items); 823 rc = next_entry(buf, fp, sizeof(u32) * items);
817 if (rc < 0) { 824 if (rc < 0) {
818 printk(KERN_ERR "security: mls: truncated range\n"); 825 printk(KERN_ERR "SELinux: mls: truncated range\n");
819 goto out; 826 goto out;
820 } 827 }
821 r->level[0].sens = le32_to_cpu(buf[0]); 828 r->level[0].sens = le32_to_cpu(buf[0]);
@@ -826,21 +833,21 @@ static int mls_read_range_helper(struct mls_range *r, void *fp)
826 833
827 rc = ebitmap_read(&r->level[0].cat, fp); 834 rc = ebitmap_read(&r->level[0].cat, fp);
828 if (rc) { 835 if (rc) {
829 printk(KERN_ERR "security: mls: error reading low " 836 printk(KERN_ERR "SELinux: mls: error reading low "
830 "categories\n"); 837 "categories\n");
831 goto out; 838 goto out;
832 } 839 }
833 if (items > 1) { 840 if (items > 1) {
834 rc = ebitmap_read(&r->level[1].cat, fp); 841 rc = ebitmap_read(&r->level[1].cat, fp);
835 if (rc) { 842 if (rc) {
836 printk(KERN_ERR "security: mls: error reading high " 843 printk(KERN_ERR "SELinux: mls: error reading high "
837 "categories\n"); 844 "categories\n");
838 goto bad_high; 845 goto bad_high;
839 } 846 }
840 } else { 847 } else {
841 rc = ebitmap_cpy(&r->level[1].cat, &r->level[0].cat); 848 rc = ebitmap_cpy(&r->level[1].cat, &r->level[0].cat);
842 if (rc) { 849 if (rc) {
843 printk(KERN_ERR "security: mls: out of memory\n"); 850 printk(KERN_ERR "SELinux: mls: out of memory\n");
844 goto bad_high; 851 goto bad_high;
845 } 852 }
846 } 853 }
@@ -866,7 +873,7 @@ static int context_read_and_validate(struct context *c,
866 873
867 rc = next_entry(buf, fp, sizeof buf); 874 rc = next_entry(buf, fp, sizeof buf);
868 if (rc < 0) { 875 if (rc < 0) {
869 printk(KERN_ERR "security: context truncated\n"); 876 printk(KERN_ERR "SELinux: context truncated\n");
870 goto out; 877 goto out;
871 } 878 }
872 c->user = le32_to_cpu(buf[0]); 879 c->user = le32_to_cpu(buf[0]);
@@ -874,7 +881,7 @@ static int context_read_and_validate(struct context *c,
874 c->type = le32_to_cpu(buf[2]); 881 c->type = le32_to_cpu(buf[2]);
875 if (p->policyvers >= POLICYDB_VERSION_MLS) { 882 if (p->policyvers >= POLICYDB_VERSION_MLS) {
876 if (mls_read_range_helper(&c->range, fp)) { 883 if (mls_read_range_helper(&c->range, fp)) {
877 printk(KERN_ERR "security: error reading MLS range of " 884 printk(KERN_ERR "SELinux: error reading MLS range of "
878 "context\n"); 885 "context\n");
879 rc = -EINVAL; 886 rc = -EINVAL;
880 goto out; 887 goto out;
@@ -882,7 +889,7 @@ static int context_read_and_validate(struct context *c,
882 } 889 }
883 890
884 if (!policydb_context_isvalid(p, c)) { 891 if (!policydb_context_isvalid(p, c)) {
885 printk(KERN_ERR "security: invalid security context\n"); 892 printk(KERN_ERR "SELinux: invalid security context\n");
886 context_destroy(c); 893 context_destroy(c);
887 rc = -EINVAL; 894 rc = -EINVAL;
888 } 895 }
@@ -1128,7 +1135,7 @@ static int class_read(struct policydb *p, struct hashtab *h, void *fp)
1128 cladatum->comdatum = hashtab_search(p->p_commons.table, 1135 cladatum->comdatum = hashtab_search(p->p_commons.table,
1129 cladatum->comkey); 1136 cladatum->comkey);
1130 if (!cladatum->comdatum) { 1137 if (!cladatum->comdatum) {
1131 printk(KERN_ERR "security: unknown common %s\n", 1138 printk(KERN_ERR "SELinux: unknown common %s\n",
1132 cladatum->comkey); 1139 cladatum->comkey);
1133 rc = -EINVAL; 1140 rc = -EINVAL;
1134 goto bad; 1141 goto bad;
@@ -1283,13 +1290,13 @@ static int mls_read_level(struct mls_level *lp, void *fp)
1283 1290
1284 rc = next_entry(buf, fp, sizeof buf); 1291 rc = next_entry(buf, fp, sizeof buf);
1285 if (rc < 0) { 1292 if (rc < 0) {
1286 printk(KERN_ERR "security: mls: truncated level\n"); 1293 printk(KERN_ERR "SELinux: mls: truncated level\n");
1287 goto bad; 1294 goto bad;
1288 } 1295 }
1289 lp->sens = le32_to_cpu(buf[0]); 1296 lp->sens = le32_to_cpu(buf[0]);
1290 1297
1291 if (ebitmap_read(&lp->cat, fp)) { 1298 if (ebitmap_read(&lp->cat, fp)) {
1292 printk(KERN_ERR "security: mls: error reading level " 1299 printk(KERN_ERR "SELinux: mls: error reading level "
1293 "categories\n"); 1300 "categories\n");
1294 goto bad; 1301 goto bad;
1295 } 1302 }
@@ -1491,7 +1498,7 @@ int policydb_read(struct policydb *p, void *fp)
1491 goto bad; 1498 goto bad;
1492 1499
1493 if (le32_to_cpu(buf[0]) != POLICYDB_MAGIC) { 1500 if (le32_to_cpu(buf[0]) != POLICYDB_MAGIC) {
1494 printk(KERN_ERR "security: policydb magic number 0x%x does " 1501 printk(KERN_ERR "SELinux: policydb magic number 0x%x does "
1495 "not match expected magic number 0x%x\n", 1502 "not match expected magic number 0x%x\n",
1496 le32_to_cpu(buf[0]), POLICYDB_MAGIC); 1503 le32_to_cpu(buf[0]), POLICYDB_MAGIC);
1497 goto bad; 1504 goto bad;
@@ -1499,27 +1506,27 @@ int policydb_read(struct policydb *p, void *fp)
1499 1506
1500 len = le32_to_cpu(buf[1]); 1507 len = le32_to_cpu(buf[1]);
1501 if (len != strlen(POLICYDB_STRING)) { 1508 if (len != strlen(POLICYDB_STRING)) {
1502 printk(KERN_ERR "security: policydb string length %d does not " 1509 printk(KERN_ERR "SELinux: policydb string length %d does not "
1503 "match expected length %Zu\n", 1510 "match expected length %Zu\n",
1504 len, strlen(POLICYDB_STRING)); 1511 len, strlen(POLICYDB_STRING));
1505 goto bad; 1512 goto bad;
1506 } 1513 }
1507 policydb_str = kmalloc(len + 1,GFP_KERNEL); 1514 policydb_str = kmalloc(len + 1,GFP_KERNEL);
1508 if (!policydb_str) { 1515 if (!policydb_str) {
1509 printk(KERN_ERR "security: unable to allocate memory for policydb " 1516 printk(KERN_ERR "SELinux: unable to allocate memory for policydb "
1510 "string of length %d\n", len); 1517 "string of length %d\n", len);
1511 rc = -ENOMEM; 1518 rc = -ENOMEM;
1512 goto bad; 1519 goto bad;
1513 } 1520 }
1514 rc = next_entry(policydb_str, fp, len); 1521 rc = next_entry(policydb_str, fp, len);
1515 if (rc < 0) { 1522 if (rc < 0) {
1516 printk(KERN_ERR "security: truncated policydb string identifier\n"); 1523 printk(KERN_ERR "SELinux: truncated policydb string identifier\n");
1517 kfree(policydb_str); 1524 kfree(policydb_str);
1518 goto bad; 1525 goto bad;
1519 } 1526 }
1520 policydb_str[len] = 0; 1527 policydb_str[len] = 0;
1521 if (strcmp(policydb_str, POLICYDB_STRING)) { 1528 if (strcmp(policydb_str, POLICYDB_STRING)) {
1522 printk(KERN_ERR "security: policydb string %s does not match " 1529 printk(KERN_ERR "SELinux: policydb string %s does not match "
1523 "my string %s\n", policydb_str, POLICYDB_STRING); 1530 "my string %s\n", policydb_str, POLICYDB_STRING);
1524 kfree(policydb_str); 1531 kfree(policydb_str);
1525 goto bad; 1532 goto bad;
@@ -1536,7 +1543,7 @@ int policydb_read(struct policydb *p, void *fp)
1536 p->policyvers = le32_to_cpu(buf[0]); 1543 p->policyvers = le32_to_cpu(buf[0]);
1537 if (p->policyvers < POLICYDB_VERSION_MIN || 1544 if (p->policyvers < POLICYDB_VERSION_MIN ||
1538 p->policyvers > POLICYDB_VERSION_MAX) { 1545 p->policyvers > POLICYDB_VERSION_MAX) {
1539 printk(KERN_ERR "security: policydb version %d does not match " 1546 printk(KERN_ERR "SELinux: policydb version %d does not match "
1540 "my version range %d-%d\n", 1547 "my version range %d-%d\n",
1541 le32_to_cpu(buf[0]), POLICYDB_VERSION_MIN, POLICYDB_VERSION_MAX); 1548 le32_to_cpu(buf[0]), POLICYDB_VERSION_MIN, POLICYDB_VERSION_MAX);
1542 goto bad; 1549 goto bad;
@@ -1570,16 +1577,20 @@ int policydb_read(struct policydb *p, void *fp)
1570 ebitmap_read(&p->policycaps, fp) != 0) 1577 ebitmap_read(&p->policycaps, fp) != 0)
1571 goto bad; 1578 goto bad;
1572 1579
1580 if (p->policyvers >= POLICYDB_VERSION_PERMISSIVE &&
1581 ebitmap_read(&p->permissive_map, fp) != 0)
1582 goto bad;
1583
1573 info = policydb_lookup_compat(p->policyvers); 1584 info = policydb_lookup_compat(p->policyvers);
1574 if (!info) { 1585 if (!info) {
1575 printk(KERN_ERR "security: unable to find policy compat info " 1586 printk(KERN_ERR "SELinux: unable to find policy compat info "
1576 "for version %d\n", p->policyvers); 1587 "for version %d\n", p->policyvers);
1577 goto bad; 1588 goto bad;
1578 } 1589 }
1579 1590
1580 if (le32_to_cpu(buf[2]) != info->sym_num || 1591 if (le32_to_cpu(buf[2]) != info->sym_num ||
1581 le32_to_cpu(buf[3]) != info->ocon_num) { 1592 le32_to_cpu(buf[3]) != info->ocon_num) {
1582 printk(KERN_ERR "security: policydb table sizes (%d,%d) do " 1593 printk(KERN_ERR "SELinux: policydb table sizes (%d,%d) do "
1583 "not match mine (%d,%d)\n", le32_to_cpu(buf[2]), 1594 "not match mine (%d,%d)\n", le32_to_cpu(buf[2]),
1584 le32_to_cpu(buf[3]), 1595 le32_to_cpu(buf[3]),
1585 info->sym_num, info->ocon_num); 1596 info->sym_num, info->ocon_num);
@@ -1823,7 +1834,7 @@ int policydb_read(struct policydb *p, void *fp)
1823 for (genfs_p = NULL, genfs = p->genfs; genfs; 1834 for (genfs_p = NULL, genfs = p->genfs; genfs;
1824 genfs_p = genfs, genfs = genfs->next) { 1835 genfs_p = genfs, genfs = genfs->next) {
1825 if (strcmp(newgenfs->fstype, genfs->fstype) == 0) { 1836 if (strcmp(newgenfs->fstype, genfs->fstype) == 0) {
1826 printk(KERN_ERR "security: dup genfs " 1837 printk(KERN_ERR "SELinux: dup genfs "
1827 "fstype %s\n", newgenfs->fstype); 1838 "fstype %s\n", newgenfs->fstype);
1828 kfree(newgenfs->fstype); 1839 kfree(newgenfs->fstype);
1829 kfree(newgenfs); 1840 kfree(newgenfs);
@@ -1873,7 +1884,7 @@ int policydb_read(struct policydb *p, void *fp)
1873 if (!strcmp(newc->u.name, c->u.name) && 1884 if (!strcmp(newc->u.name, c->u.name) &&
1874 (!c->v.sclass || !newc->v.sclass || 1885 (!c->v.sclass || !newc->v.sclass ||
1875 newc->v.sclass == c->v.sclass)) { 1886 newc->v.sclass == c->v.sclass)) {
1876 printk(KERN_ERR "security: dup genfs " 1887 printk(KERN_ERR "SELinux: dup genfs "
1877 "entry (%s,%s)\n", 1888 "entry (%s,%s)\n",
1878 newgenfs->fstype, c->u.name); 1889 newgenfs->fstype, c->u.name);
1879 goto bad_newc; 1890 goto bad_newc;
@@ -1931,7 +1942,7 @@ int policydb_read(struct policydb *p, void *fp)
1931 if (rc) 1942 if (rc)
1932 goto bad; 1943 goto bad;
1933 if (!mls_range_isvalid(p, &rt->target_range)) { 1944 if (!mls_range_isvalid(p, &rt->target_range)) {
1934 printk(KERN_WARNING "security: rangetrans: invalid range\n"); 1945 printk(KERN_WARNING "SELinux: rangetrans: invalid range\n");
1935 goto bad; 1946 goto bad;
1936 } 1947 }
1937 lrt = rt; 1948 lrt = rt;
diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h
index c4ce996e202c..ba593a3da877 100644
--- a/security/selinux/ss/policydb.h
+++ b/security/selinux/ss/policydb.h
@@ -243,6 +243,8 @@ struct policydb {
243 243
244 struct ebitmap policycaps; 244 struct ebitmap policycaps;
245 245
246 struct ebitmap permissive_map;
247
246 unsigned int policyvers; 248 unsigned int policyvers;
247 249
248 unsigned int reject_unknown : 1; 250 unsigned int reject_unknown : 1;
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 3f2bad28ee7b..d75050819b06 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -40,6 +40,7 @@
40#include <linux/sched.h> 40#include <linux/sched.h>
41#include <linux/audit.h> 41#include <linux/audit.h>
42#include <linux/mutex.h> 42#include <linux/mutex.h>
43#include <linux/selinux.h>
43#include <net/netlabel.h> 44#include <net/netlabel.h>
44 45
45#include "flask.h" 46#include "flask.h"
@@ -61,6 +62,7 @@ extern void selnl_notify_policyload(u32 seqno);
61unsigned int policydb_loaded_version; 62unsigned int policydb_loaded_version;
62 63
63int selinux_policycap_netpeer; 64int selinux_policycap_netpeer;
65int selinux_policycap_openperm;
64 66
65/* 67/*
66 * This is declared in avc.c 68 * This is declared in avc.c
@@ -412,10 +414,35 @@ static int context_struct_compute_av(struct context *scontext,
412 return 0; 414 return 0;
413 415
414inval_class: 416inval_class:
415 printk(KERN_ERR "%s: unrecognized class %d\n", __FUNCTION__, tclass); 417 printk(KERN_ERR "%s: unrecognized class %d\n", __func__, tclass);
416 return -EINVAL; 418 return -EINVAL;
417} 419}
418 420
421/*
422 * Given a sid find if the type has the permissive flag set
423 */
424int security_permissive_sid(u32 sid)
425{
426 struct context *context;
427 u32 type;
428 int rc;
429
430 POLICY_RDLOCK;
431
432 context = sidtab_search(&sidtab, sid);
433 BUG_ON(!context);
434
435 type = context->type;
436 /*
437 * we are intentionally using type here, not type-1, the 0th bit may
438 * someday indicate that we are globally setting permissive in policy.
439 */
440 rc = ebitmap_get_bit(&policydb.permissive_map, type);
441
442 POLICY_RDUNLOCK;
443 return rc;
444}
445
419static int security_validtrans_handle_fail(struct context *ocontext, 446static int security_validtrans_handle_fail(struct context *ocontext,
420 struct context *ncontext, 447 struct context *ncontext,
421 struct context *tcontext, 448 struct context *tcontext,
@@ -1096,7 +1123,7 @@ static int validate_classes(struct policydb *p)
1096 continue; 1123 continue;
1097 if (i > p->p_classes.nprim) { 1124 if (i > p->p_classes.nprim) {
1098 printk(KERN_INFO 1125 printk(KERN_INFO
1099 "security: class %s not defined in policy\n", 1126 "SELinux: class %s not defined in policy\n",
1100 def_class); 1127 def_class);
1101 if (p->reject_unknown) 1128 if (p->reject_unknown)
1102 return -EINVAL; 1129 return -EINVAL;
@@ -1107,7 +1134,7 @@ static int validate_classes(struct policydb *p)
1107 pol_class = p->p_class_val_to_name[i-1]; 1134 pol_class = p->p_class_val_to_name[i-1];
1108 if (strcmp(pol_class, def_class)) { 1135 if (strcmp(pol_class, def_class)) {
1109 printk(KERN_ERR 1136 printk(KERN_ERR
1110 "security: class %d is incorrect, found %s but should be %s\n", 1137 "SELinux: class %d is incorrect, found %s but should be %s\n",
1111 i, pol_class, def_class); 1138 i, pol_class, def_class);
1112 return -EINVAL; 1139 return -EINVAL;
1113 } 1140 }
@@ -1125,7 +1152,7 @@ static int validate_classes(struct policydb *p)
1125 nprim = 1 << (perms->nprim - 1); 1152 nprim = 1 << (perms->nprim - 1);
1126 if (perm_val > nprim) { 1153 if (perm_val > nprim) {
1127 printk(KERN_INFO 1154 printk(KERN_INFO
1128 "security: permission %s in class %s not defined in policy\n", 1155 "SELinux: permission %s in class %s not defined in policy\n",
1129 def_perm, pol_class); 1156 def_perm, pol_class);
1130 if (p->reject_unknown) 1157 if (p->reject_unknown)
1131 return -EINVAL; 1158 return -EINVAL;
@@ -1136,14 +1163,14 @@ static int validate_classes(struct policydb *p)
1136 perdatum = hashtab_search(perms->table, def_perm); 1163 perdatum = hashtab_search(perms->table, def_perm);
1137 if (perdatum == NULL) { 1164 if (perdatum == NULL) {
1138 printk(KERN_ERR 1165 printk(KERN_ERR
1139 "security: permission %s in class %s not found in policy, bad policy\n", 1166 "SELinux: permission %s in class %s not found in policy, bad policy\n",
1140 def_perm, pol_class); 1167 def_perm, pol_class);
1141 return -EINVAL; 1168 return -EINVAL;
1142 } 1169 }
1143 pol_val = 1 << (perdatum->value - 1); 1170 pol_val = 1 << (perdatum->value - 1);
1144 if (pol_val != perm_val) { 1171 if (pol_val != perm_val) {
1145 printk(KERN_ERR 1172 printk(KERN_ERR
1146 "security: permission %s in class %s has incorrect value\n", 1173 "SELinux: permission %s in class %s has incorrect value\n",
1147 def_perm, pol_class); 1174 def_perm, pol_class);
1148 return -EINVAL; 1175 return -EINVAL;
1149 } 1176 }
@@ -1157,7 +1184,7 @@ static int validate_classes(struct policydb *p)
1157 BUG_ON(!cladatum); 1184 BUG_ON(!cladatum);
1158 if (!cladatum->comdatum) { 1185 if (!cladatum->comdatum) {
1159 printk(KERN_ERR 1186 printk(KERN_ERR
1160 "security: class %s should have an inherits clause but does not\n", 1187 "SELinux: class %s should have an inherits clause but does not\n",
1161 pol_class); 1188 pol_class);
1162 return -EINVAL; 1189 return -EINVAL;
1163 } 1190 }
@@ -1172,7 +1199,7 @@ static int validate_classes(struct policydb *p)
1172 def_perm = kdefs->av_inherit[i].common_pts[j]; 1199 def_perm = kdefs->av_inherit[i].common_pts[j];
1173 if (j >= perms->nprim) { 1200 if (j >= perms->nprim) {
1174 printk(KERN_INFO 1201 printk(KERN_INFO
1175 "security: permission %s in class %s not defined in policy\n", 1202 "SELinux: permission %s in class %s not defined in policy\n",
1176 def_perm, pol_class); 1203 def_perm, pol_class);
1177 if (p->reject_unknown) 1204 if (p->reject_unknown)
1178 return -EINVAL; 1205 return -EINVAL;
@@ -1183,13 +1210,13 @@ static int validate_classes(struct policydb *p)
1183 perdatum = hashtab_search(perms->table, def_perm); 1210 perdatum = hashtab_search(perms->table, def_perm);
1184 if (perdatum == NULL) { 1211 if (perdatum == NULL) {
1185 printk(KERN_ERR 1212 printk(KERN_ERR
1186 "security: permission %s in class %s not found in policy, bad policy\n", 1213 "SELinux: permission %s in class %s not found in policy, bad policy\n",
1187 def_perm, pol_class); 1214 def_perm, pol_class);
1188 return -EINVAL; 1215 return -EINVAL;
1189 } 1216 }
1190 if (perdatum->value != j + 1) { 1217 if (perdatum->value != j + 1) {
1191 printk(KERN_ERR 1218 printk(KERN_ERR
1192 "security: permission %s in class %s has incorrect value\n", 1219 "SELinux: permission %s in class %s has incorrect value\n",
1193 def_perm, pol_class); 1220 def_perm, pol_class);
1194 return -EINVAL; 1221 return -EINVAL;
1195 } 1222 }
@@ -1219,7 +1246,7 @@ static inline int convert_context_handle_invalid_context(struct context *context
1219 u32 len; 1246 u32 len;
1220 1247
1221 context_struct_to_string(context, &s, &len); 1248 context_struct_to_string(context, &s, &len);
1222 printk(KERN_ERR "security: context %s is invalid\n", s); 1249 printk(KERN_ERR "SELinux: context %s is invalid\n", s);
1223 kfree(s); 1250 kfree(s);
1224 } 1251 }
1225 return rc; 1252 return rc;
@@ -1299,7 +1326,7 @@ out:
1299bad: 1326bad:
1300 context_struct_to_string(&oldc, &s, &len); 1327 context_struct_to_string(&oldc, &s, &len);
1301 context_destroy(&oldc); 1328 context_destroy(&oldc);
1302 printk(KERN_ERR "security: invalidating context %s\n", s); 1329 printk(KERN_ERR "SELinux: invalidating context %s\n", s);
1303 kfree(s); 1330 kfree(s);
1304 goto out; 1331 goto out;
1305} 1332}
@@ -1308,6 +1335,8 @@ static void security_load_policycaps(void)
1308{ 1335{
1309 selinux_policycap_netpeer = ebitmap_get_bit(&policydb.policycaps, 1336 selinux_policycap_netpeer = ebitmap_get_bit(&policydb.policycaps,
1310 POLICYDB_CAPABILITY_NETPEER); 1337 POLICYDB_CAPABILITY_NETPEER);
1338 selinux_policycap_openperm = ebitmap_get_bit(&policydb.policycaps,
1339 POLICYDB_CAPABILITY_OPENPERM);
1311} 1340}
1312 1341
1313extern void selinux_complete_init(void); 1342extern void selinux_complete_init(void);
@@ -1350,7 +1379,7 @@ int security_load_policy(void *data, size_t len)
1350 /* Verify that the kernel defined classes are correct. */ 1379 /* Verify that the kernel defined classes are correct. */
1351 if (validate_classes(&policydb)) { 1380 if (validate_classes(&policydb)) {
1352 printk(KERN_ERR 1381 printk(KERN_ERR
1353 "security: the definition of a class is incorrect\n"); 1382 "SELinux: the definition of a class is incorrect\n");
1354 LOAD_UNLOCK; 1383 LOAD_UNLOCK;
1355 sidtab_destroy(&sidtab); 1384 sidtab_destroy(&sidtab);
1356 policydb_destroy(&policydb); 1385 policydb_destroy(&policydb);
@@ -1384,14 +1413,14 @@ int security_load_policy(void *data, size_t len)
1384 /* Verify that the kernel defined classes are correct. */ 1413 /* Verify that the kernel defined classes are correct. */
1385 if (validate_classes(&newpolicydb)) { 1414 if (validate_classes(&newpolicydb)) {
1386 printk(KERN_ERR 1415 printk(KERN_ERR
1387 "security: the definition of a class is incorrect\n"); 1416 "SELinux: the definition of a class is incorrect\n");
1388 rc = -EINVAL; 1417 rc = -EINVAL;
1389 goto err; 1418 goto err;
1390 } 1419 }
1391 1420
1392 rc = security_preserve_bools(&newpolicydb); 1421 rc = security_preserve_bools(&newpolicydb);
1393 if (rc) { 1422 if (rc) {
1394 printk(KERN_ERR "security: unable to preserve booleans\n"); 1423 printk(KERN_ERR "SELinux: unable to preserve booleans\n");
1395 goto err; 1424 goto err;
1396 } 1425 }
1397 1426
@@ -1443,17 +1472,11 @@ err:
1443 1472
1444/** 1473/**
1445 * security_port_sid - Obtain the SID for a port. 1474 * security_port_sid - Obtain the SID for a port.
1446 * @domain: communication domain aka address family
1447 * @type: socket type
1448 * @protocol: protocol number 1475 * @protocol: protocol number
1449 * @port: port number 1476 * @port: port number
1450 * @out_sid: security identifier 1477 * @out_sid: security identifier
1451 */ 1478 */
1452int security_port_sid(u16 domain, 1479int security_port_sid(u8 protocol, u16 port, u32 *out_sid)
1453 u16 type,
1454 u8 protocol,
1455 u16 port,
1456 u32 *out_sid)
1457{ 1480{
1458 struct ocontext *c; 1481 struct ocontext *c;
1459 int rc = 0; 1482 int rc = 0;
@@ -2203,7 +2226,7 @@ int security_get_permissions(char *class, char ***perms, int *nperms)
2203 match = hashtab_search(policydb.p_classes.table, class); 2226 match = hashtab_search(policydb.p_classes.table, class);
2204 if (!match) { 2227 if (!match) {
2205 printk(KERN_ERR "%s: unrecognized class %s\n", 2228 printk(KERN_ERR "%s: unrecognized class %s\n",
2206 __FUNCTION__, class); 2229 __func__, class);
2207 rc = -EINVAL; 2230 rc = -EINVAL;
2208 goto out; 2231 goto out;
2209 } 2232 }